声明:本文章是根据《动手学深度学习pytorch版》进行学习,如有侵犯请告知,必删除
"""
简单的线性回归实现
本节将介绍如何只利用tensor和autograd实现一个线性回归的训练
"""
获取数据,数据处理,训练模型
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
import sys
sys.path.append("..")
from d21zh_pytorch import *
# 生成数据集: 构造一个简单的人工训练数据集,直观的比较学到的参数与真实模型参数的区别
num_inputs = 2 # 输入特征数为2
num_examples = 1000 # 指定数据集的大小为1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.from_numpy(np.random.normal(0, 1, (num_examples, num_inputs)))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
# 使用真实权重中的随机噪声,用来生成标签
labels += torch.from_numpy(np.random.normal(0, 0.01, size=labels.size()))
# features 的每一行是长度为2的向量, labels的每一行是长度为1的标量
print(features[0], labels[0])
# 通过生成第二个特征features[:,1]和标签labels的散点图,直观的观察二者间的线性关系
set_figsize()
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
# plt.show()
# 读取第一个小批量数据样本并打印
batch_size = 10
for x, y in data_iter(batch_size, features, labels):
print(x, y)
break
# 将权重初始化成均值为0,标准差为0.01的正态随机数,偏差初始化为0
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.double)
b = torch.zeros(1, dtype=torch.double)
w.requires_grad_(requires_grad=True) # 方便接下来对这些参数求梯度迭代参数的值
b.requires_grad_(requires_grad=True)
# 前面是数据的处理及读取
# 训练模型
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss # 平方差损失函数
for epoch in range(num_epochs): # 训练模型一共需要num_epochs个迭代周期
# 在每个迭代周期中要使用所有训练数据集中的样本一次
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum() # 损失函数转换成标量以便于求导
l.backward() # 小批量的损失对模型参数求梯度
sgd([w, b], lr, batch_size) # 使用小批量随机梯度下降迭代模型
# 梯度清零
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch+1, train_l.mean().item()))
# 训练完成输出参数
print(true_w, '\n', w)
print(true_b, '\n', b)
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
def use_svg_display():
# 用矢量图显示
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
# 设置图的尺寸
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
# 读取数据,每次返回batch_size(批量大小)个随机样本的特征和标签
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices) # 样本的读取顺序是随机的
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i:min(i + batch_size, num_examples)])
# 最后一次可能不足一个batch
yield features.index_select(0, j), labels.index_select(0, j)
# 定义线性回归函数 mm函数以用来做矩阵乘法
def linreg(X, w, b):
return torch.mm(X, w) + b
mat2 = torch.double
# 定义损失函数
def squared_loss(y_hat, y):
# PS 这里返回的是向量,pytorch里的MSELoss并没有除以2
return (y_hat - y.view(y_hat.size())) ** 2 / 2
# 定义优化算法 实现小批量随机梯度下降法,通过不断迭代模型参数来优化损失函数
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * param.grad / batch_size
编译环境
win10+Anaconda4.8.2+pytorch+pycharm
# 定义模型 # 用nn.Module实现一个线性回归模型 class LinearNet(nn.Module): def __init__(self, n_feature): super(LinearNet, self).__init__() self.Linear = nn.Linear(n_feature, 1) def forward(self, x): y = self.linear(x) return y net = LinearNet(num_inputs) print(net)
""" # 写法2 net = nn.Sequential( nn.Linear(num_inputs, 1) # 此处还可以传入其它层 ) # 写法3 net = nn.Sequential() net.add_module('linear', nn.Linear(num_inputs, 1)) """ #写法4 from collections import OrderedDict net = nn.Sequential(OrderedDict([ ('linear', nn.Linear(num_inputs, 1)) # ...... ]))
生成数据后直接调用网络
import numpy as np import torch from torch import nn import torch.utils.data as Data import random # step1 生成数据集 num_inputs = 2 num_examples = 100 true_w = [2, -3.4] true_b = 4.2 features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float) labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float) # step2 读取数据 batch_size = 10 # 将训练数据的特征和标签进行组合 dataset = Data.TensorDataset(features, labels) # 随机读取小批量 data_iter = Data.DataLoader(dataset, batch_size, shuffle=True) for X, y in data_iter: print(X, y) break """ # step3 定义模型 class LinearNet(nn.Module): def __init__(self, n_feature): super(LinearNet, self).__init__() self.linear = nn.Linear(n_feature, 1) def forward(self, x): y = self.linear(x) return y net = LinearNet(num_inputs) # 其他写法 # 1th net = nn.Squential( nn.Linear(num_inputs,1) ) """ net = nn.Sequential() net.add_module('linear', nn.Linear(num_inputs, 1)) print(net) # step4 初始化模型参数 from torch.nn import init init.normal_(net[0].weight, mean=0, std=0.01) # 均值为0,标准差为0.01 init.constant_(net[0].bias, val=0) # 也可直接修改bias的data # step5 定义损失函数 loss = nn.MSELoss() # 用均方误差作为损失函数 # step6 定义优化算法 import torch.optim as optim optimizer = optim.SGD(net.parameters(), lr=0.03) print(optimizer) # step7 训练模型 num_epochs = 3 for epoch in range(1, num_epochs + 1): for X, y in data_iter: output = net(X) l = loss(output, y.view(-1, 1)) optimizer.zero_grad() l.backward() optimizer.step() print('epoch %d, loss: %f' % (epoch, l.item()))
问题: 其中在用nn.module 方法实现线性模型时,出现LinearNet object does not support indexing的问题