当初因为某些原因从keras转向使用pytorch,入门复现自己原本keras的一些常用代码大概花了一周左右,通过官方文档以及到处查,最终汇总了一些常用的代码,闲来无事记录一下,大家如果和我一样由其他深度学习框架转向比较热门的pytorch入门找资料的话可以参考,如果是回归类问题都是可以直接使用的,也包含了一些自己入门的思路。
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
from torch.autograd import Variable
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
对于数据的处理当时也找了很久,输入数据是以xlsx表格的形式,之后则是转化为张量
'''import data'''
dimension = 8
num_epochs = 1000
data_high = np.array(pd.read_excel('./data/high_8.xlsx',header=None))
data_low = np.array(pd.read_excel('./data/low_8.xlsx',header=None))
data_test = np.array(pd.read_excel('./data/test_8.xlsx',header=None))
i1 = len(data_low)
i2 = len(data_high)
i3 = len(data_test)
lowx = data_low[:,0:dimension]
lowy = data_low[:,dimension]
highx = data_high[:,0:dimension]
highy = data_high[:,dimension]
testx = data_test[:,0:dimension]
testy = data_test[:,dimension]
datalow_tensor = torch.from_numpy(data_low)
datahigh_tensor = torch.from_numpy(data_high)
lowx_tensor = Variable(torch.from_numpy(lowx))
lowy_tensor = Variable(torch.from_numpy(lowy))
highx_tensor = Variable(torch.from_numpy(highx))
highy_tensor = Variable(torch.from_numpy(highy))
testx_tensor = Variable(torch.from_numpy(testx))
testy_tensor = Variable(torch.from_numpy(testy))
其中input_num为输入数据的维度,output_num为输出数据的维度,num_1为每一层网络神经元的个数。
第一种是用贯序模型的写法,但是要使用某个Sequential内的某一个nn不太方便。
''' net build'''
class Net_dnn_1(nn.Module):#net1=net2 两种写法
def __init__(self,input_num,output_num,num_1):
super(Net_dnn_1,self).__init__()
self.fc1 = nn.Sequential(nn.Linear(input_num, num_1),nn.ReLU())
self.fc2 = nn.Sequential(nn.Linear(num_1,num_1),nn.Dropout(p=0.5),nn.ReLU())
self.fc3 = nn.Sequential(nn.Linear(num_1,num_1),nn.Dropout(p=0.5),nn.ReLU())
self.fc4 = nn.Sequential(nn.Linear(num_1,num_1),nn.Dropout(p=0.5),nn.ReLU())
self.fc5 = nn.Sequential(nn.Linear(num_1,num_1),nn.Dropout(p=0.5),nn.ReLU())
self.fc6 = nn.Sequential(nn.Linear(num_1,num_1),nn.Dropout(p=0.5),nn.ReLU())
self.fc7 = nn.Sequential(nn.Linear(num_1,num_1),nn.Dropout(p=0.5),nn.ReLU())
self.fc8 = nn.Linear(num_1, output_num)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.fc4(x)
x = self.fc5(x)
x = self.fc6(x)
x = self.fc7(x)
x = self.fc8(x)
return x
第二种是完全分开的写法,比较繁琐。
class Net_dnn_2(nn.Module):
def __init__(self,input_num,output_num,num_1):
super(Net_dnn_2,self).__init__()
self.layer_1 = nn.Linear(input_num, num_1)
self.layer_2 = nn.ReLU()
self.layer_3 = nn.Linear(num_1, num_1)
self.layer_4 = nn.Dropout(p=0.5)
self.layer_5 = nn.ReLU()
self.layer_6 = nn.Linear(num_1, num_1)
self.layer_7 = nn.Dropout(p=0.5)
self.layer_8 = nn.ReLU()
self.layer_9 = nn.Linear(num_1, num_1)
self.layer_10 = nn.Dropout(p=0.5)
self.layer_11 = nn.ReLU()
self.layer_12 = nn.Linear(num_1, num_1)
self.layer_13 = nn.Dropout(p=0.5)
self.layer_14 = nn.ReLU()
self.layer_15 = nn.Linear(num_1, num_1)
self.layer_16 = nn.Dropout(p=0.5)
self.layer_17 = nn.ReLU()
self.layer_18 = nn.Linear(num_1, num_1)
self.layer_19 = nn.Dropout(p=0.5)
self.layer_20 = nn.ReLU()
self.layer_21 = nn.Linear(num_1, output_num)
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.layer_4(x)
x = self.layer_5(x)
x = self.layer_6(x)
x = self.layer_7(x)
x = self.layer_8(x)
x = self.layer_9(x)
x = self.layer_10(x)
x = self.layer_11(x)
x = self.layer_12(x)
x = self.layer_13(x)
x = self.layer_14(x)
x = self.layer_15(x)
x = self.layer_16(x)
x = self.layer_17(x)
x = self.layer_18(x)
x = self.layer_19(x)
x = self.layer_20(x)
x = self.layer_21(x)
return x
使用adam优化器。
net_1 = Net_dnn_3(8,1,250)
net_1 = net_1.double()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net_1.parameters(), lr=1e-2)
Loss_list = []
Loss_test=[]
'''DNN train'''
for epoch in range(num_epochs):
net_1.train()
optimizer.zero_grad()
y_p_train = net_1(lowx_tensor)
y_p_train_1 = y_p_train.squeeze(-1)
loss = criterion(y_p_train_1,lowy_tensor)
loss_test = criterion(net_1(highx_tensor).squeeze(-1),highy_tensor)
Loss_list.append(loss.detach().numpy() / (i1))
Loss_test.append(loss_test.detach().numpy() / (i2))
loss.backward()
optimizer.step()
if(epoch+1)%5==0:
print('Epoch[%d/%d],Loss:%.4f,r2:%.4f'
%(epoch+1,num_epochs,loss.data/ (i1),r2))
这个好像是loss曲线简单的可视化
x1 = range(0, num_epochs)
y1 = Loss_list
y2 = Loss_test
plt.subplot(3, 1, 1)
plt.plot(x1, y1, '.-', label='Train loss',markevery=20)
plt.plot(x1, y2, '*-', label='Test loss',markevery=20)
plt.xlabel('Loss vs. epoches')
plt.ylabel('Loss')
plt.legend()
plt.show()