Tensorflow:使用高级接口TFLearn用于数据预测

import numpy as np

import tensorflow as tf

import matplotlib.pyplot as plt
learn=tf.contrib.learn
HIDDEN_SIZE = 30
NUM_LAYERS = 2 
TIMESTEPS = 10
TRAINING_STEPS = 10000
BATCH_SIZE = 32
TRAINING_EXAMPLES = 10000
TESTING_EXAMPLES = 10000
SAMPLE_GAP = 0.01

def generate_data(seq):
    X = []
    Y = []
    for i in range(len(seq)-TIMESTEPS-1):
        X.append([seq[i:i+TIMESTEPS]])
        Y.append([seq[i+TIMESTEPS]])
    return np.array(X,dtype = np.float32), np.array(Y,dtype = np.float32)
def lstm_model(X,Y):
    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
    cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell]*NUM_LAYERS)
    x_= tf.unstack(X,axis = 1)
    
    output, _ =tf.nn.rnn(cell,x_,dtype=tf.float32)
    output = output[-1]
    prediction,loss = learn.modeks.linear_regression(output,Y)
    
    train_op = tf.contrib.layers.optimize_loss(
            loss,tf.contrib.framework.get_global_step(),
            optimizer= "Adagrad", learning_rate= 0.1)
    return prediction, loss,train_op
regressor = learn.Estimator(model_fn= lstm_model)

test_start =TRAINING_EXAMPLES*SAMPLE_GAP
test_end =(TRAINING_EXAMPLES+TESTING_EXAMPLES)*SAMPLE_GAP
train_X,train_Y = generate_data(np.sin(np.linspace(0
        ,test_start,TRAINING_EXAMPLES,dtype=np.float32)))
test_X,test_Y = generate_data(np.sin(np.linspace(
        test_start,test_end,TESTING_EXAMPLES,dtype=np.float32)))

regressor.fit(train_X,train_Y,batch_size=BATCH_SIZE,steps=TRAINING_STEPS)
predicted = [[pred] for pred in regressor.predict(test_X)]

rmse = np.sqrt(((predicted-test_Y)**2).mean(axis=0))
print("Mean Square Error is :%f"%rmse[0])

fig = plt.figure()
plot_predicted = plt.plot(predicted,label ='predicted')
plot_test=plt.plot(test_Y,label='real_sin')
plt.legend([plot_predicted,plot_test],['predicted','real_sin'])

 x_= tf.unstack(X,axis = 1)版本不同,使用不同,tensorflow=1.10,而低版本使用unpack即可

你可能感兴趣的:(tensorflow)