from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
N_ITER = 5000
BATCH_SIZE = 64
DATA_PATH = './data/data.xls'
NUM_CLASSES = 1
HIDDEN_SIZE = 64
NUM_LAYERS = 2
LEARNING_RATE = 0.0001
NUM_EPOCHS = 500
SEQ_LENGTH = 3
INPUT_SIZE = 3
mm_x = MinMaxScaler()
mm_y = MinMaxScaler()
def read_data(data_path):
data = pd.read_excel(data_path)
feature = data
label = data.iloc[:, [2]]
return feature, label
def normalization(x, y):
x = mm_x.fit_transform(x.values)
y = mm_y.fit_transform(y)
return x, y
def sliding_windows(data):
x = []
y = []
for i in range(len(data) - SEQ_LENGTH - 1):
_x = data[i:i + SEQ_LENGTH, :]
_y = data[i + SEQ_LENGTH, -1]
x.append(_x)
y.append(_y)
x = np.array(x)
y = np.array(y)
return x, y
def data_generator(x_train, y_train, x_test, y_test):
train_dataset = TensorDataset(torch.from_numpy(x_train).to(torch.float32),
torch.from_numpy(y_train).to(torch.float32))
test_dataset = TensorDataset(torch.from_numpy(x_test).to(torch.float32), torch.from_numpy(y_test).to(torch.float32))
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
test_Loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
return train_loader, test_Loader
feature, label = read_data(DATA_PATH)
feature, label = normalization(feature, label)
x, y = sliding_windows(feature)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
train_loader, test_loader = data_generator(x_train, y_train, x_test, y_test)
class LSTM(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM, self).__init__()
self.num_classes = num_classes
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.seq_length = SEQ_LENGTH
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, 1)
def forward(self, x):
output, (h_n, c_n) = self.lstm(x, None)
h_out = output[:, -1, :]
out = self.fc(h_out)
return out
model = LSTM(num_classes=NUM_CLASSES, input_size=INPUT_SIZE,
hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
def train():
iter = 0
for epoch in range(NUM_EPOCHS):
for i, (batch_x, batch_y) in enumerate(train_loader):
batch_y = Variable(torch.reshape(batch_y, (len(batch_y), 1)))
outputs = model(batch_x)
loss = criterion(outputs, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter += 1
if iter % 100 == 0:
print("iter: %d, loss: %1.5f" % (iter, loss.item()))
def eval(test_x, test_y):
model.eval()
test_x = Variable(torch.from_numpy(test_x).to(torch.float32))
test_y = Variable(torch.from_numpy(test_y).to(torch.float32))
train_predict = model(test_x)
data_predict = train_predict.data.numpy()
y_data_plot = test_y.data.numpy()
y_data_plot = np.reshape(y_data_plot, (-1, 1))
data_predict = mm_y.inverse_transform(data_predict)
y_data_plot = mm_y.inverse_transform(y_data_plot)
plt.plot(y_data_plot)
plt.plot(data_predict)
plt.legend(('real', 'predict'), fontsize='15')
plt.show()
print('MAE/RMSE')
print(mean_absolute_error(y_data_plot, data_predict))
print(np.sqrt(mean_squared_error(y_data_plot, data_predict)))
print(y_data_plot.flatten()[:20])
print(data_predict.flatten()[:20])
train()
eval(x_test, y_test)