class NetConfig():
def __init__(self):
self.rnn_unit = 10
self.input_size = 7
self.output_size = 1
self.lr = 0.0006
self.time_step = 20
self.batch_size = 80
self.weights = {
"in":tf.Variable(tf.random_normal([self.input_size,self.rnn_unit])),
"out":tf.Variable(tf.random_normal([self.rnn_unit,self.output_size]))
}
self.biases = {
"in":tf.Variable(tf.constant(0.1,shape=[self.rnn_unit,])),
"out":tf.Variable(tf.constant(0.1,shape=[self.output_size,]))
}
class MyModel():
def __init__(self):
self.sess = tf.Session()
self.nc = NetConfig()
def train_rnn(self,data,save_path,iter_num):
weights = self.nc.weights
biases = self.nc.biases
input_size = self.nc.input_size
rnn_unit = self.nc.rnn_unit
output_size = self.nc.output_size
time_step = self.nc.time_step
batch_size = self.nc.batch_size
lr = self.nc.lr
sess = self.sess
X = tf.placeholder(tf.float32,shape=[None,time_step,input_size])
Y = tf.placeholder(tf.float32,shape=[None,time_step,output_size])
batch_index,train_x,train_y = get_train_data(data)
pred,_ = self.rnn(X)
loss = tf.reduce_mean(tf.square(tf.reshape(pred,[-1])-tf.reshape(Y,[-1])))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
saver = tf.train.Saver(tf.global_variables(),max_to_keep = 15)
sess.run(tf.global_variables_initializer())
total_loss = []
for i in range(iter_num):
for step in range(len(batch_index)-1):
_,loss_ = sess.run([train_op,loss],feed_dict = {X:train_x[batch_index[step]:batch_index[step+1]],Y:train_y[batch_index[step]:batch_index[step+1]]
total_loss.append(loss_)
if i%10 == 0:
print("迭代第:"+str(i)+ "次, Loss为:" + str(loss_))
if i%20 ==0:
print("保存模型:",saver.save(sess,save_path,global_step=i))
return total_loss
def predict_rnn(self,data,model_path):
weights = self.nc.weights
biases = self.nc.biases
input_size = self.nc.input_size
rnn_unit = self.nc.rnn_unit
output_size = self.nc.output_size
time_step = self.nc.time_step
batch_size = self.nc.batch_size
lr = self.nc.lr
sess = self.sess
X = tf.placeholder(tf.float32,shape = [None,time_step,input_size])
mean_,std_,test_x,test_y = get_test_data(data)
pred,_ = self.rnn(X)
saver = tf.train.Saver(tf.global_variables())
module_file = tf.train.lasest_checkpoint(model_path)
saver.restore(sess,module_file)
test_predict = []
for step in range(len(test_x)-1):
prob_ = sess.run(pred,feed_dict={X:[test_x[step]]})
predict = prob.reshape((-1))
test_predict.extend(predict)
test_y = np.array(test_y)*std_[7] + mean_[7]
test_predict = np.array(test_predict)*std_[7] + mean_[7]
acc = np.average(np.abs(test_predict - test_y[:len(test_predict)])/test_y[:len(test_predict)])
return test_y,test_predict,acc
def rnn(self,X):
weights = self.nc.weights
biases = self.nc.biases
input_size = self.nc.input_size
rnn_unit = self.nc.rnn_unit
output_size = self.nc.output_size
time_step = self.nc.time_step
batch_size = self.nc.batch_size
lr = self.nc.lr
sess = self.sess
batch_size = tf.shape(X)[0]
time_step = tf.shape(X)[1]
w_in = weights["in"]
b_in = biases["in"]
input_ = tf.reshape(X,[-1,input_size])
input_rnn = tf.matmul(input_,w_in)+b_in
input_rnn = tf.reshape(input_rnn,[-1,time_step,rnn_unit])
cell = tf.nn.rnn_cell.BasicRNNCell(rnn_unit)
init_state = cell.zero_state(batch_size,dtype = tf.float32)
output_rnn,final_states = tf.nn.dynamic_rnn(cell,input_rnn,initial_state = init_state,dtype = tf.float32)
output = tf.reshape(output_rnn,[-1,rnn_unit])
w_out = weights['out']
b_out = biases['out']
pred = tf.matmul(output,w_out)+b_out
return pred,final_states
def plot_train_loss(total_loss):
plt.figure()
plt.plot(list(range(len(total_loss))), total_loss, color='b')
plt.show()
def plot_predict(test_y, test_predict):
plt.figure()
plt.plot(list(range(len(test_predict))), test_predict, color='b')
plt.plot(list(range(len(test_y))), test_y, color='r')
plt.show()
f=open('dataset_2.csv')
df=pd.read_csv(f)
data=df.iloc[:,2:10].values
my_model = MyModel()
f=open('data/dataset.csv')
df=pd.read_csv(f)
data=df.iloc[:,2:10].values
my_model = MyModel()
total_loss_rnn = my_model.train_rnn(data,save_path = "./stock_model_rnn/model", iter_num = 200)
tf.reset_default_graph()
my_model = MyModel()
test_y,test_predict = my_model.predict_rnn(data, "./stock_model_rnn/")
plot_predict(test_y, test_predict)
error_rnn = np.sum((test_y[1:300] - test_predict[1:300])**2)
print(error_rnn)