14. 使用tensorflow2中keras框架搭建LSTM神经网络模型进行时间序列预测

使用tensorflow2中keras框架搭建LSTM神经网络模型进行时间序列预测

    • Step1:导入包
    • Step2:设置GPU的使用率可按需增长
    • Step3:读取数据
    • Step4:设置参数
    • Step5:构建数据集
      • 切分数据
      • 创建数据集
    • Step6:模型构建及训练
      • 构建LSMT神经网络模型
      • 训练模型并保存
      • 画图查看模型学习情况
    • Step7:模型检验
      • 训练检验
      • 画图检验
    • Step8:进行预测
      • 设置训练集
      • 进行预测
      • 画图展示
    • 完整代码

Step1:导入包

import numpy as np
import  pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import os
import tensorflow as tf
from tensorflow import keras

Step2:设置GPU的使用率可按需增长

# set GPU
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)
print(len(gpus))
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(logical_gpus))

Step3:读取数据

# read data-sensor.csv
dataframe = pd.read_csv('data-sensor.csv')
pd_value = dataframe.values

Step4:设置参数

look_back = 4
features = 28

look_back:一匹数据所含的数据个数
features:每个数据所拥有的特征数

Step5:构建数据集

切分数据

# ========= split dataset ===================
train_size = int(len(pd_value) * 0.8)
trainlist = pd_value[:train_size]
testlist = pd_value[train_size:]

创建数据集

# ========= numpy train ===========
def create_dataset(dataset, look_back):
#这里的look_back与timestep相同
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back)]
        dataX.append(a)
        dataY.append(dataset[i + look_back])
    return numpy.array(dataX),numpy.array(dataY)
#训练数据太少 look_back并不能过大
trainX,trainY  = create_dataset(trainlist,look_back)
testX,testY = create_dataset(testlist,look_back)
# ========== set dataset ======================
trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], features))
testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1] , features))

Step6:模型构建及训练

构建LSMT神经网络模型

# create and fit the LSTM network
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(64, activation='relu', return_sequences=True, input_shape=(look_back, features)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(32, activation='relu'))
model.add(tf.keras.layers.Dense(features))
model.compile(metrics=['accuracy'], loss='mean_squared_error', optimizer='adam')

model.summary()

训练模型并保存

history = model.fit(trainX, trainY, validation_data=(testX, testY),epochs=15, verbose=1).history
model.save("lstm-model.h5")

画图查看模型学习情况

plt.plot(history['loss'], linewidth=2, label='Train')
plt.plot(history['val_loss'], linewidth=2, label='Test')
plt.legend(loc='upper right')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
#plt.ylim(ymin=0.70,ymax=1)
plt.show()

Step7:模型检验

训练检验

trainPredict = model.predict(trainX)
testPredict = model.predict(testX)

画图检验

训练集情况

plt.plot(trainY[:100,1])
plt.plot(trainPredict[:100,1])
plt.show()

测试集情况

plt.plot(testY[:100,1])
plt.plot(testPredict[:100,1])
plt.plot()

Step8:进行预测

设置训练集

# set predict_data
predict_begin = 1
predict_num = 100
predict_result = np.zeros((predict_num+look_back,features),dtype=float)
for i in range(look_back):
    predict_result[i] = testX[-predict_begin:][0,i]

预测后100个数据情况,从最后一个数据开始进行时间序列预测。构建预测结果变量predict_result

进行预测

# predict
for i in range(predict_num):
    begin_data = np.reshape(predict_result[i:i+look_back,], (predict_begin, look_back, features))
    predict_data = model.predict(begin_data) 
    predict_result[look_back+i] = predict_data
    buff = predict_result[i+1:i+look_back]
    predict_call_back = np.append(buff,predict_data,axis=0)

构建滚动预测数据,每次取四行数据进行预测。
预测后,将原预测前的三行数据与预测后的结果进行拼接,组成四行数据进行下一轮的预测。

画图展示

# show plot
plt.plot(predict_result[-predict_num:,5])
plt.plot()

完整代码

import numpy as np
import  pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import os
import tensorflow as tf
from tensorflow import keras

# set GPU
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)
print(len(gpus))
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(logical_gpus))

# read data-sensor.csv
dataframe = pd.read_csv('data-sensor.csv')
pd_value = dataframe.values

# ========= split dataset ===================
train_size = int(len(pd_value) * 0.8)
trainlist = pd_value[:train_size]
testlist = pd_value[train_size:]

look_back = 4
features = 28
step_out = 1

# ========= numpy train ===========
def create_dataset(dataset, look_back):
#这里的look_back与timestep相同
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back)]
        dataX.append(a)
        dataY.append(dataset[i + look_back])
    return numpy.array(dataX),numpy.array(dataY)
#训练数据太少 look_back并不能过大
trainX,trainY  = create_dataset(trainlist,look_back)
testX,testY = create_dataset(testlist,look_back)

# ========== set dataset ======================
trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], features))
testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1] , features))

# create and fit the LSTM network
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(64, activation='relu', return_sequences=True, input_shape=(look_back, features)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.LSTM(32, activation='relu'))
model.add(tf.keras.layers.Dense(features))
#model.compile(optimizer='adam', loss='mse')
model.compile(metrics=['accuracy'], loss='mean_squared_error', optimizer='adam')

model.summary()

history = model.fit(trainX, trainY, validation_data=(testX, testY),epochs=15, verbose=1).history
model.save("lstm-model.h5")

plt.plot(history['loss'], linewidth=2, label='Train')
plt.plot(history['val_loss'], linewidth=2, label='Test')
plt.legend(loc='upper right')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
#plt.ylim(ymin=0.70,ymax=1)
plt.show()

trainPredict = model.predict(trainX)
testPredict = model.predict(testX)

plt.plot(trainY[:100,1])
plt.plot(trainPredict[:100,1])
plt.show()

plt.plot(testY[:100,1])
plt.plot(testPredict[:100,1])
plt.plot()

# set predict_data
predict_begin = 1
predict_num = 100
predict_result = np.zeros((predict_num+look_back,features),dtype=float)
for i in range(look_back):
    predict_result[i] = testX[-predict_begin:][0,i]

# predict
for i in range(predict_num):
    begin_data = np.reshape(predict_result[i:i+look_back,], (predict_begin, look_back, features))
    predict_data = model.predict(begin_data) 
    predict_result[look_back+i] = predict_data
    buff = predict_result[i+1:i+look_back]
    predict_call_back = np.append(buff,predict_data,axis=0)

# show plot
plt.plot(predict_result[-predict_num:,5])
plt.plot()

参考文档:

简单粗暴LSTM:LSTM进行时间序列预测

Kesci: Keras 实现 LSTM——时间序列预测

【tensorflow2.0】处理时间序列数据

时间序列预测09:如何开发LSTM实现时间序列预测详解 03 Multi-step LSTM

Python:利用LSTM预测时间序列数据

python利用LSTM进行时间序列分析预测

LSTM时间序列预测及网络层搭建

LSTM与Prophet时间序列预测实验BraveY

你可能感兴趣的:(车联网项目,深度学习,Tensorflow,神经网络,深度学习,python,tensorflow,lstm)