其实没咋看,之前做过很多图像的工作,想着换个东西搞一搞,就参考了几个代码工程,实现了使用LSTM单步预测洛伦兹序列。
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.preprocessing import MinMaxScaler
from scipy import integrate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def lorenz(xyz,t,s,r,b):
x,y,z = xyz.tolist() # 数组到列表
dxdt = s*(y-x)
dydt = r*x-y-x*z
dzdt = x*y-b*z
return dxdt,dydt,dzdt
t = np.arange(0,50,0.01)
x0,y0,z0 = 0,1,0
xyz_ini1 = [x0,y0,z0]
xyz_ini2 = [x0,y0+0.01,z0]
delta = 10
rho = 28
beta = 2.6
para = (delta,rho,beta) # 不使用列表而采用元组
track1 = integrate.odeint(lorenz,xyz_ini1,t,args=para)
track2 = integrate.odeint(lorenz,xyz_ini2,t,args=para)
fig = plt.figure(figsize=(12,6))
ax = fig.gca(projection='3d') #获取当前子图,指定三维模式
ax.plot(track1[:,0],track1[:,1],track1[:,2],lw=1.0,color='r') #画轨迹1
ax.plot(track2[:,0],track2[:,1],track2[:,2],lw=1.0,color='g') #画轨迹2
...
plt.show()
#作者:钱高高 https://www.bilibili.com/read/cv15279742/ 出处:bilibili
yy = track1[:,2]
mmax,mmin = max(yy), min(yy)
yy = (yy-mmin)/(mmax- mmin)# 归一化
xx = list(range(1,5001))
# 画出来
fig = plt.figure(figsize=(12,6))
plt.plot(xx,yy,'r',lw=1)
plt.show()
参考jejun5博客
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 转化成 tensor->(batch_size, seq_len, feature_size)
X = torch.tensor(X.reshape(-1, time_step, 1), dtype=torch.float).to(device)
Y = torch.tensor(Y.reshape(-1, 1, 1), dtype=torch.float).to(device)
print('Total datasets: ', X.shape, '-->', Y.shape)
# 划分数据
split_ratio = 0.8
len_train = int(X.shape[0] * split_ratio)
X_train, Y_train = X[:len_train, :, :], Y[:len_train, :, :]
print('Train datasets: ', X_train.shape, '-->', Y_train.shape)
Total datasets: torch.Size([4991, 8, 1]) --> torch.Size([4991, 1, 1])
Train datasets: torch.Size([3992, 8, 1]) --> torch.Size([3992, 1, 1])
batch_size = 10
ds = TensorDataset(X, Y)
dl = DataLoader(ds, batch_size=batch_size, num_workers=0)
ds_train = TensorDataset(X_train, Y_train)
dl_train = DataLoader(ds_train, batch_size=batch_size, num_workers=0)
# 查看第一个batch
x, y = next(iter(dl_train))
print(x.shape)
print(y.shape)
torch.Size([10, 8, 1])
torch.Size([10, 1, 1])
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lstm = nn.LSTM(input_size=1, hidden_size=6, num_layers=3, batch_first=True)
self.fc = nn.Linear(in_features=6, out_features=1)
def forward(self, x):
# x is input, size (batch_size, seq_len, input_size)
x, _ = self.lstm(x)
# x is output, size (batch_size, seq_len, hidden_size)
x = x[:, -1, :]
x = self.fc(x)
x = x.view(-1, 1, 1)
return x
# 自定义训练方式
model = Net().to(device)
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
def train_step(model,features, labels):
# 正向传播求损失
predictions = model.forward(features)
loss = loss_function(predictions, labels)
# 反向传播求梯度
loss.backward()
# 参数更新
optimizer.step()
optimizer.zero_grad()
return loss.item()
# 测试一个batch
features, labels = next(iter(dl_train))
loss = train_step(model, features, labels)
loss
0.0006788573227822781
def train_model(model, epochs):
for epoch in range(1, epochs+1):
list_loss = []
for features, labels in dl_train:
lossi = train_step(model,features, labels)
list_loss.append(lossi)
loss = np.mean(list_loss)
if epoch % 10 == 0:
print('epoch={} | loss={} '.format(epoch,loss))
train_model(model, 50)
epoch=10 | loss=0.0007172323520110524
epoch=20 | loss=0.0004083490441055826
epoch=30 | loss=0.00010935294388638716
epoch=40 | loss=0.00019470315970688824
epoch=50 | loss=0.0004210038754169432
y_pred = model.forward(X).detach().cpu().numpy().squeeze()
n = list(range(len(y_pred)))
print(type(yy))
fig = plt.figure(figsize=(12,6))
plt.plot(n,yy[:len(n)],'r',lw=1)
plt.plot(n,y_pred,'blue',lw=1)
plt.show()