一:我们利用rnn循环神经网络来预测sin函数的一个例子来学习:
在预测sin函数之前我们首先来了解一下tensorflow的高层封装TFLearn,它可以让tensorflow的代码效率更高效,TFLearn集成在tf.contrib.learn里,TFLearn即封装了一些神经网络结构,又省去了模型训练的部分,让tensorflow的程序变得更加简短。
from sklearn import cross_validation from sklearn import datasets from sklearn import metrics import tensorflow as tf from sklearn.model_selection import train_test_split learn=tf.contrib.learn def my_model(features,target): target=tf.one_hot(target,3,1,0) logits,loss=learn.models.logistic_regression(features,target) train_op=tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return tf.arg_max(logits,1),loss,train_op iris=datasets.load_iris() x_train,x_test,y_train,y_test=train_test_split( iris.data,iris.target,test_size=0.2,random_state=0) classifier=learn.Estimator(model_fn=my_model) classifier.fit(x_train,y_train,steps=100) y_predicted=classifier.predict(x_test) score=metrics.accuracy_score(y_test,y_predicted) print ('Accuracy: %.2f%%'%(score*100))
二:时间序列预测sin函数,因为标准的循环神经网络预测的是离散的数值,所以在程序中需要将连续的sin函数曲线离散话,在这里离散化就是在一个给定的区间[0,MAX]内,通过有限个采样点模拟一个连续的曲线。
import numpy as np import tensorflow as tf from tensorflow.contrib import rnn import matplotlib as mpl from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat mpl.use('Agg') from matplotlib import pyplot as plt learn = tf.contrib.learn HIDDEN_SIZE = 30 NUM_LAYERS = 2 TIMESTEPS = 10 TRAINING_STEPS = 10000 BATCH_SIZE = 32 TRAINING_EXAMPLES = 10000 TESTING_EXAMPLES = 1000 SAMPLE_GAP = 0.01 def generate_data(seq): X = [] Y = [] for i in range(len(seq) - TIMESTEPS - 1): X.append([seq[i:i + TIMESTEPS]]) Y.append([seq[i + TIMESTEPS]]) return np.array(X, dtype=np.float32), np.array(Y, dtype=np.float32) def LstmCell(): lstm_cell = rnn.BasicLSTMCell(HIDDEN_SIZE, state_is_tuple=True) return lstm_cell def lstm_model(X, y): cell = rnn.MultiRNNCell([LstmCell() for _ in range(NUM_LAYERS)]) output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32) output = tf.reshape(output, [-1, HIDDEN_SIZE]) predictions = tf.contrib.layers.fully_connected(output, 1, None) labels = tf.reshape(y, [-1]) predictions = tf.reshape(predictions, [-1]) loss = tf.losses.mean_squared_error(predictions, labels) train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(), optimizer="Adagrad", learning_rate=0.1) return predictions, loss, train_op regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir="Models/model_2")) test_start = TRAINING_EXAMPLES * SAMPLE_GAP test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP train_X, train_y = generate_data(np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32))) test_X, test_y = generate_data(np.sin(np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32))) regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) predicted = [[pred] for pred in regressor.predict(test_X)] rmse = np.sqrt(((predicted - test_y) ** 2).mean(axis=0)) print("Mean Square Error is:%f" % rmse[0]) plot_predicted, = plt.plot(predicted, label='predicted') plot_test, = plt.plot(test_y, label='real_sin') plt.legend([plot_predicted, plot_test], ['predicted','real_sin']) fig=plt.figure() plt.show() fig.svaefig('sin.png') 最后我们得到Mean Square Error is:0.001563,说明sin函数预测的还算可以 ,几乎是重合的