PyTorch 笔记Ⅴ——多元线性回归

文章目录

    • 导入必要的包
    • 数据读取
      • 从原始数据读取房价信息
      • sklearn自动读取
      • 对比两种方式所获取数据
    • 训练阶段
    • 模型预测

导入必要的包

import torch
from torch import nn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from sklearn.preprocessing import scale
from torchsummary import summary
from matplotlib.font_manager import FontProperties

torch.__version__
'1.6.0'

数据读取

本次数据读取将采用两种方式进行,一种是从原始的housing.csv文件进行读取,一种是利用sklearn集成的数据读取方式进行读取

从原始数据读取房价信息

data_frame = pd.read_csv('./boston/housing.csv', header=None)

原始的housing.csv将一行数据存储在一个单元格中,因此需要读取此单元格并将里面的内容从字符串转变为float类型

all_data = []

for column in range(len(data_frame)):
    column_data = []
    for data in list(data_frame.iloc[column])[0].split(' '):
        if data != '':
            column_data.append(float(data))
    all_data.append(column_data)

all_data = np.array(all_data)

划分训练数据与标签数据(GroundTruth)

x_data = all_data[:, :13]
y_data = all_data[:, 13]

查看部分数据

print('x_data:\n', x_data, '\n x_data shape:', x_data.shape,
      '\ny_data:\n', y_data, '\n y_data shape:', y_data.shape)
x_data:
 [[6.3200e-03 1.8000e+01 2.3100e+00 ... 1.5300e+01 3.9690e+02 4.9800e+00]
 [2.7310e-02 0.0000e+00 7.0700e+00 ... 1.7800e+01 3.9690e+02 9.1400e+00]
 [2.7290e-02 0.0000e+00 7.0700e+00 ... 1.7800e+01 3.9283e+02 4.0300e+00]
 ...
 [6.0760e-02 0.0000e+00 1.1930e+01 ... 2.1000e+01 3.9690e+02 5.6400e+00]
 [1.0959e-01 0.0000e+00 1.1930e+01 ... 2.1000e+01 3.9345e+02 6.4800e+00]
 [4.7410e-02 0.0000e+00 1.1930e+01 ... 2.1000e+01 3.9690e+02 7.8800e+00]] 
 x_data shape: (506, 13) 
y_data:
 [24.  21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9 15.  18.9 21.7 20.4
 18.2 19.9 23.1 17.5 20.2 18.2 13.6 19.6 15.2 14.5 15.6 13.9 16.6 14.8
 18.4 21.  12.7 14.5 13.2 13.1 13.5 18.9 20.  21.  24.7 30.8 34.9 26.6
 25.3 24.7 21.2 19.3 20.  16.6 14.4 19.4 19.7 20.5 25.  23.4 18.9 35.4
 24.7 31.6 23.3 19.6 18.7 16.  22.2 25.  33.  23.5 19.4 22.  17.4 20.9
 24.2 21.7 22.8 23.4 24.1 21.4 20.  20.8 21.2 20.3 28.  23.9 24.8 22.9
 23.9 26.6 22.5 22.2 23.6 28.7 22.6 22.  22.9 25.  20.6 28.4 21.4 38.7
 43.8 33.2 27.5 26.5 18.6 19.3 20.1 19.5 19.5 20.4 19.8 19.4 21.7 22.8
 18.8 18.7 18.5 18.3 21.2 19.2 20.4 19.3 22.  20.3 20.5 17.3 18.8 21.4
 15.7 16.2 18.  14.3 19.2 19.6 23.  18.4 15.6 18.1 17.4 17.1 13.3 17.8
 14.  14.4 13.4 15.6 11.8 13.8 15.6 14.6 17.8 15.4 21.5 19.6 15.3 19.4
 17.  15.6 13.1 41.3 24.3 23.3 27.  50.  50.  50.  22.7 25.  50.  23.8
 23.8 22.3 17.4 19.1 23.1 23.6 22.6 29.4 23.2 24.6 29.9 37.2 39.8 36.2
 37.9 32.5 26.4 29.6 50.  32.  29.8 34.9 37.  30.5 36.4 31.1 29.1 50.
 33.3 30.3 34.6 34.9 32.9 24.1 42.3 48.5 50.  22.6 24.4 22.5 24.4 20.
 21.7 19.3 22.4 28.1 23.7 25.  23.3 28.7 21.5 23.  26.7 21.7 27.5 30.1
 44.8 50.  37.6 31.6 46.7 31.5 24.3 31.7 41.7 48.3 29.  24.  25.1 31.5
 23.7 23.3 22.  20.1 22.2 23.7 17.6 18.5 24.3 20.5 24.5 26.2 24.4 24.8
 29.6 42.8 21.9 20.9 44.  50.  36.  30.1 33.8 43.1 48.8 31.  36.5 22.8
 30.7 50.  43.5 20.7 21.1 25.2 24.4 35.2 32.4 32.  33.2 33.1 29.1 35.1
 45.4 35.4 46.  50.  32.2 22.  20.1 23.2 22.3 24.8 28.5 37.3 27.9 23.9
 21.7 28.6 27.1 20.3 22.5 29.  24.8 22.  26.4 33.1 36.1 28.4 33.4 28.2
 22.8 20.3 16.1 22.1 19.4 21.6 23.8 16.2 17.8 19.8 23.1 21.  23.8 23.1
 20.4 18.5 25.  24.6 23.  22.2 19.3 22.6 19.8 17.1 19.4 22.2 20.7 21.1
 19.5 18.5 20.6 19.  18.7 32.7 16.5 23.9 31.2 17.5 17.2 23.1 24.5 26.6
 22.9 24.1 18.6 30.1 18.2 20.6 17.8 21.7 22.7 22.6 25.  19.9 20.8 16.8
 21.9 27.5 21.9 23.1 50.  50.  50.  50.  50.  13.8 13.8 15.  13.9 13.3
 13.1 10.2 10.4 10.9 11.3 12.3  8.8  7.2 10.5  7.4 10.2 11.5 15.1 23.2
  9.7 13.8 12.7 13.1 12.5  8.5  5.   6.3  5.6  7.2 12.1  8.3  8.5  5.
 11.9 27.9 17.2 27.5 15.  17.2 17.9 16.3  7.   7.2  7.5 10.4  8.8  8.4
 16.7 14.2 20.8 13.4 11.7  8.3 10.2 10.9 11.   9.5 14.5 14.1 16.1 14.3
 11.7 13.4  9.6  8.7  8.4 12.8 10.5 17.1 18.4 15.4 10.8 11.8 14.9 12.6
 14.1 13.  13.4 15.2 16.1 17.8 14.9 14.1 12.7 13.5 14.9 20.  16.4 17.7
 19.5 20.2 21.4 19.9 19.  19.1 19.1 20.1 19.9 19.6 23.2 29.8 13.8 13.3
 16.7 12.  14.6 21.4 23.  23.7 25.  21.8 20.6 21.2 19.1 20.6 15.2  7.
  8.1 13.6 20.1 21.8 24.5 23.1 19.7 18.3 21.2 17.5 16.8 22.4 20.6 23.9
 22.  11.9] 
 y_data shape: (506,)

sklearn自动读取

利用sklearn中的load_boston直接获取

from sklearn.datasets import load_boston

boston = load_boston()

获取数据键

boston.keys()
dict_keys(['data', 'target', 'feature_names', 'DESCR', 'filename'])

获取训练数据

boston['data'].shape
(506, 13)

获取特征名

boston['feature_names']
array(['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
       'TAX', 'PTRATIO', 'B', 'LSTAT'], dtype='

获取一则训练数据

boston['data'][0]
array([6.320e-03, 1.800e+01, 2.310e+00, 0.000e+00, 5.380e-01, 6.575e+00,
       6.520e+01, 4.090e+00, 1.000e+00, 2.960e+02, 1.530e+01, 3.969e+02,
       4.980e+00])

获取标签数据(GroundTruth)

boston['target']
array([24. , 21.6, 34.7, 33.4, 36.2, 28.7, 22.9, 27.1, 16.5, 18.9, 15. ,
       18.9, 21.7, 20.4, 18.2, 19.9, 23.1, 17.5, 20.2, 18.2, 13.6, 19.6,
       15.2, 14.5, 15.6, 13.9, 16.6, 14.8, 18.4, 21. , 12.7, 14.5, 13.2,
       13.1, 13.5, 18.9, 20. , 21. , 24.7, 30.8, 34.9, 26.6, 25.3, 24.7,
       21.2, 19.3, 20. , 16.6, 14.4, 19.4, 19.7, 20.5, 25. , 23.4, 18.9,
       35.4, 24.7, 31.6, 23.3, 19.6, 18.7, 16. , 22.2, 25. , 33. , 23.5,
       19.4, 22. , 17.4, 20.9, 24.2, 21.7, 22.8, 23.4, 24.1, 21.4, 20. ,
       20.8, 21.2, 20.3, 28. , 23.9, 24.8, 22.9, 23.9, 26.6, 22.5, 22.2,
       23.6, 28.7, 22.6, 22. , 22.9, 25. , 20.6, 28.4, 21.4, 38.7, 43.8,
       33.2, 27.5, 26.5, 18.6, 19.3, 20.1, 19.5, 19.5, 20.4, 19.8, 19.4,
       21.7, 22.8, 18.8, 18.7, 18.5, 18.3, 21.2, 19.2, 20.4, 19.3, 22. ,
       20.3, 20.5, 17.3, 18.8, 21.4, 15.7, 16.2, 18. , 14.3, 19.2, 19.6,
       23. , 18.4, 15.6, 18.1, 17.4, 17.1, 13.3, 17.8, 14. , 14.4, 13.4,
       15.6, 11.8, 13.8, 15.6, 14.6, 17.8, 15.4, 21.5, 19.6, 15.3, 19.4,
       17. , 15.6, 13.1, 41.3, 24.3, 23.3, 27. , 50. , 50. , 50. , 22.7,
       25. , 50. , 23.8, 23.8, 22.3, 17.4, 19.1, 23.1, 23.6, 22.6, 29.4,
       23.2, 24.6, 29.9, 37.2, 39.8, 36.2, 37.9, 32.5, 26.4, 29.6, 50. ,
       32. , 29.8, 34.9, 37. , 30.5, 36.4, 31.1, 29.1, 50. , 33.3, 30.3,
       34.6, 34.9, 32.9, 24.1, 42.3, 48.5, 50. , 22.6, 24.4, 22.5, 24.4,
       20. , 21.7, 19.3, 22.4, 28.1, 23.7, 25. , 23.3, 28.7, 21.5, 23. ,
       26.7, 21.7, 27.5, 30.1, 44.8, 50. , 37.6, 31.6, 46.7, 31.5, 24.3,
       31.7, 41.7, 48.3, 29. , 24. , 25.1, 31.5, 23.7, 23.3, 22. , 20.1,
       22.2, 23.7, 17.6, 18.5, 24.3, 20.5, 24.5, 26.2, 24.4, 24.8, 29.6,
       42.8, 21.9, 20.9, 44. , 50. , 36. , 30.1, 33.8, 43.1, 48.8, 31. ,
       36.5, 22.8, 30.7, 50. , 43.5, 20.7, 21.1, 25.2, 24.4, 35.2, 32.4,
       32. , 33.2, 33.1, 29.1, 35.1, 45.4, 35.4, 46. , 50. , 32.2, 22. ,
       20.1, 23.2, 22.3, 24.8, 28.5, 37.3, 27.9, 23.9, 21.7, 28.6, 27.1,
       20.3, 22.5, 29. , 24.8, 22. , 26.4, 33.1, 36.1, 28.4, 33.4, 28.2,
       22.8, 20.3, 16.1, 22.1, 19.4, 21.6, 23.8, 16.2, 17.8, 19.8, 23.1,
       21. , 23.8, 23.1, 20.4, 18.5, 25. , 24.6, 23. , 22.2, 19.3, 22.6,
       19.8, 17.1, 19.4, 22.2, 20.7, 21.1, 19.5, 18.5, 20.6, 19. , 18.7,
       32.7, 16.5, 23.9, 31.2, 17.5, 17.2, 23.1, 24.5, 26.6, 22.9, 24.1,
       18.6, 30.1, 18.2, 20.6, 17.8, 21.7, 22.7, 22.6, 25. , 19.9, 20.8,
       16.8, 21.9, 27.5, 21.9, 23.1, 50. , 50. , 50. , 50. , 50. , 13.8,
       13.8, 15. , 13.9, 13.3, 13.1, 10.2, 10.4, 10.9, 11.3, 12.3,  8.8,
        7.2, 10.5,  7.4, 10.2, 11.5, 15.1, 23.2,  9.7, 13.8, 12.7, 13.1,
       12.5,  8.5,  5. ,  6.3,  5.6,  7.2, 12.1,  8.3,  8.5,  5. , 11.9,
       27.9, 17.2, 27.5, 15. , 17.2, 17.9, 16.3,  7. ,  7.2,  7.5, 10.4,
        8.8,  8.4, 16.7, 14.2, 20.8, 13.4, 11.7,  8.3, 10.2, 10.9, 11. ,
        9.5, 14.5, 14.1, 16.1, 14.3, 11.7, 13.4,  9.6,  8.7,  8.4, 12.8,
       10.5, 17.1, 18.4, 15.4, 10.8, 11.8, 14.9, 12.6, 14.1, 13. , 13.4,
       15.2, 16.1, 17.8, 14.9, 14.1, 12.7, 13.5, 14.9, 20. , 16.4, 17.7,
       19.5, 20.2, 21.4, 19.9, 19. , 19.1, 19.1, 20.1, 19.9, 19.6, 23.2,
       29.8, 13.8, 13.3, 16.7, 12. , 14.6, 21.4, 23. , 23.7, 25. , 21.8,
       20.6, 21.2, 19.1, 20.6, 15.2,  7. ,  8.1, 13.6, 20.1, 21.8, 24.5,
       23.1, 19.7, 18.3, 21.2, 17.5, 16.8, 22.4, 20.6, 23.9, 22. , 11.9])

对比两种方式所获取数据

对比训练数据是否相同

(x_data == boston['data']).all()
True

对比标签数据(GroundTruth)是否相同

(y_data == boston['target']).all()
True

训练阶段

获取设备信息

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

device
device(type='cuda', index=0)

构建模型

class Net(nn.Module):
    def __init__(self, input_num, hidden_num, output_num):
        super(Net, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_num, hidden_num),
            nn.ReLU(),
            nn.Linear(hidden_num, output_num),
            nn.ReLU()
        )

    def forward(self, input):
        return self.net(input)
net = Net(input_num=13, hidden_num=14, output_num=1).to(device)

summary(net, input_size=(13,))
----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Linear-1                   [-1, 14]             196
              ReLU-2                   [-1, 14]               0
            Linear-3                    [-1, 1]              15
              ReLU-4                    [-1, 1]               0
================================================================
Total params: 211
Trainable params: 211
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.00
Params size (MB): 0.00
Estimated Total Size (MB): 0.00
----------------------------------------------------------------

使用scale进行归一化,使用unsqueeze整理维度

x_data, y_data = torch.FloatTensor(scale(x_data)).to(device), torch.unsqueeze(torch.FloatTensor(y_data), dim=1).to(device)

划分训练集及验证集

train_x, train_y = x_data[0:400, :], y_data[0:400]
valid_x, valid_y = x_data[400:, :], y_data[400:]

设置超参数

epochs = 200
learning_rate = 0.001
batch_size = 10
total_step = int(train_x.shape[0] / batch_size)

定义优化器以及损失函数

optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)

loss_func = torch.nn.MSELoss()

定义参数重置函数,保证每次重新执行for循环时从开始训练

def weight_reset(m):
    if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
        m.reset_parameters()
%%time

net.apply(weight_reset)

epoch_train_loss_value = []
step_train_loss_value = []
epoch_valid_loss_value = []

for i in range(epochs):
    for step in range(total_step):
        xs = train_x[step * batch_size:(step + 1) * batch_size, :]
        ys = train_y[step * batch_size:(step + 1) * batch_size]
        prediction = net(xs)
        loss = loss_func(prediction, ys)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        step_train_loss_value.append(loss.cpu().detach().numpy())
    valid_loss = loss_func(net(valid_x), valid_y)
    epoch_valid_loss_value.append(valid_loss)
    epoch_train_loss_value.append(np.mean(step_train_loss_value))
    print('epoch={:3d}/{:3d}, train_loss={:.4f}, valid_loss={:.4f}'.format(i + 1,
                                                                    epochs,
                                                                    np.mean(step_train_loss_value),
                                                                    valid_loss))
epoch=  1/200, train_loss=653.8335, valid_loss=264.7458
epoch=  2/200, train_loss=646.1258, valid_loss=261.1905
epoch=  3/200, train_loss=637.3733, valid_loss=257.5962
epoch=  4/200, train_loss=627.2027, valid_loss=254.1343
epoch=  5/200, train_loss=615.4006, valid_loss=250.7759
epoch=  6/200, train_loss=601.7940, valid_loss=247.1643
epoch=  7/200, train_loss=586.2170, valid_loss=243.3159
epoch=  8/200, train_loss=568.6688, valid_loss=239.3569
epoch=  9/200, train_loss=549.4550, valid_loss=235.2637
epoch= 10/200, train_loss=529.1013, valid_loss=231.0793
......................................................
epoch=190/200, train_loss=60.6248, valid_loss=16.4171
epoch=191/200, train_loss=60.3678, valid_loss=16.3581
epoch=192/200, train_loss=60.1132, valid_loss=16.3041
epoch=193/200, train_loss=59.8609, valid_loss=16.2524
epoch=194/200, train_loss=59.6110, valid_loss=16.2020
epoch=195/200, train_loss=59.3633, valid_loss=16.1552
epoch=196/200, train_loss=59.1179, valid_loss=16.1132
epoch=197/200, train_loss=58.8747, valid_loss=16.0770
epoch=198/200, train_loss=58.6338, valid_loss=16.0435
epoch=199/200, train_loss=58.3949, valid_loss=16.0146
epoch=200/200, train_loss=58.1583, valid_loss=15.9905
Wall time: 14.5 s

损失值可视化

fig = plt.gcf()
fig.set_size_inches(10, 5)

plt.xlabel('Epochs', fontsize=15)
plt.ylabel('Loss', fontsize=15)
plt.plot(epoch_train_loss_value, 'blue', label='Train loss')
plt.plot(epoch_valid_loss_value, 'red', label='Valid loss')
plt.legend(loc='best')
plt.title('Training and Validation loss', fontsize=15)
plt.show()

PyTorch 笔记Ⅴ——多元线性回归_第1张图片

模型预测

遍历valid数据,求出每一组的预测值

prediction = []

for i in range(valid_x.shape[0]):
    prediction.append(net(valid_x[i, :]).item())

预测值可视化,可以发现经过200轮的训练,预测值与真实值交融性很好,表明模型效果较好

fig = plt.gcf()
fig.set_size_inches(10, 5)

myfont = FontProperties(fname='./font/msyh.ttc')

plt.title('预测值与实际值', fontproperties=myfont, fontsize=15)
plt.scatter(np.arange(len(prediction)), prediction, label='Prediction', s=20)
plt.scatter(np.arange(len(prediction)), valid_y.cpu().numpy(), label='GroundTruth', s=20)
plt.xlabel('数据/组', fontproperties=myfont, fontsize=15)
plt.ylabel('房价', fontproperties=myfont, fontsize=15)
plt.legend()

plt.show()

PyTorch 笔记Ⅴ——多元线性回归_第2张图片

你可能感兴趣的:(PyTorch基础笔记,python,pytorch,机器学习,深度学习,线性回归)