矢量计算表达式
import torch
a = torch.ones(1000)
b = torch.ones(1000)
c = torch.zeros(1000)
for i in range(1000):
c[i] = a[i] + b[i]
d = a + b
a = torch.ones(3)
b = 10
print(a+b)
%matplotlib inline
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
print(torch.__version__)
我们构造一个简单的人工数据集,样本数为1000,输入特征数为2,
我们使用回归模型的真实权重w=[2,-3,4]转置,和偏差b=4.2,以及一个随机噪声生成标签
num_inputs = 2
num_examples = 1000
truew_w = [2, -3.4]
true_b = 4.2
features = torch.from_numpy(np.random.normal(0, 1, (num_examples, num_inputs)))
labels = truew_w[0] * features[:, 0] + truew_w[1] * features[:, 1] + true_b
labels+=torch.from_numpy(np.random.normal(0,0.01,size = labels.size())) # 加噪音
feature是一个每一行长度为2的向量,labels的每一行是一个长度为1的向量
print(features[0],labels[0])
# 这两个函数已经保留在d2lzh包中方便以后使用
def use_svg_display():
# 用矢量图显示
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
use_svg_display()
# 设置图的尺寸
plt.rcParams['figure.figsize'] = figsize
# # 在../d2lzh_pytorch里面添加上面两个函数后就可以这样导入
# import sys
# sys.path.append("..")
# from d2lzh_pytorch import *
set_figsize()
plt.scatter(features[:, 1].numpy(), labels.numpy(),1)
plt.scatter(features[:, 1].numpy(), labels.numpy())
在训练模型的时候,我们需要遍历数据集并不断读取小批量的数据样本,这里我们定义一个函数,它每次返回batch_size(批量大小)个随机的特征和标签
# 本函数已经保留在d2lzh包中方便以后使用
def data_iter(batch_size,features,labels):
num_examples = len(features)
indices = list(range(num_examples))
for i in range(0,num_examples,batch_size):
j = torch.LongTensor(indices[i:min(i+batch_size,num_examples)]) #最后一次可能不足一个batch
yield features.index_select(0,j),labels.index_select(0,j)
#yield的含义和return差不多,只不过返回的是一个生成器
#torch.index_select(x, 0, indices) 第二个参数0是按照行筛选
#读取一个小批量数据集
batch_size = 10
for X, y in data_iter(batch_size, features=features, labels=labels):
print(X, '\n', y)
break
w = torch.tensor(np.random.normal(0,0.01,(num_inputs,1 )),dtype = torch.double)
b = torch.zeros(1,dtype = torch.double)
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.double)
b = torch.zeros(1, dtype=torch.double)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
# 本函数已保存在d2lzh包中方便以后使用
def linreg(X, w, b):
return torch.mm(X, w) + b
def squared_loss(y_hat, y): # 本函数已保存在pytorch_d2lzh包中方便以后使用
# 这里返回的是向量,并且pytorch里的MSEloss并没有除以 2
return (y_hat - y.view(y_hat.size())) ** 2 / 2
下面的sgd函数实现了上一节中介绍的小批量随机梯度下降算法,它通过不断迭代模型参数来优化损失函数
def sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中方便以后使用
for param in params:
param.data -= lr * param.grad / batch_size # 这里更改param时用的param.data
print('hello world')
import torch
from torch.autograd import Variable
lr = 0.03
num_epochs = 5
net = linreg
loss = squared_loss
# training
for epoch in range(num_epochs): # training repeats num_epochs times
# in each epoch, all the samples in dataset will be used once
# X is the feature and y is the label of a batch sample
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum()
# calculate the gradient of batch sample loss
l.backward()
# using small batch random gradient descent to iter model parameters
sgd([w, b], lr, batch_size)
# reset parameter gradient
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs): # 训练模型一共需要num_epochs个迭代周期
# 在每一个迭代周期中,会使用训练数据集中所有样本一次(假设样本数能够被批量大小整除)。X
# 和y分别是小批量样本的特征和标签
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum() # l是有关小批量X和y的损失
l=Variable(l,requires_grad=True)
#l=torch.tensor(l,requires_grad=True) #生成变量
l.backward() # 小批量的损失对模型参数求梯度
sgd([w, b], lr, batch_size) # 使用小批量随机梯度下降迭代模型参数
# 不要忘了梯度清零
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
print(true_w, '\n', w)
print(true_b, '\n', b)
import torch
from torch import nn #'nn'是neural networks(神经网络)的缩写
import numpy as np
torch.manual_seed(1)
print(torch.__version__)
torch.set_default_tensor_type('torch.FloatTensor')
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
import torch.utils.data as Data
batch_size = 10
# 将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
# 把 dataset 放入 DataLoader
# 这里的data_iter的使用和上一节中的一样,让我们读取并打印第一个小批量数据样本
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=2, # 多线程来读数据
)
for X, y in data_iter:
print(X, '\n', y)
break
#nn.Linear(in_features, out_features, bias=True)里面的参数情况
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net) # 使用print可以打印出网络的结构
事实上我们还可以用nn.Sequential来更加方便地搭建网络,Sequential是一个有序地容器,网络层将按照传入Sequential的顺序依次被添加到计算图中
# 写法一
from collections import OrderedDict
net = nn.Sequential(nn.Linear(num_inputs, 1)
# 此处还可以传入其他层
)
# 写法二
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))
# net.add_module...... 继续加入其他层
# 写法三
net = nn.Sequential(OrderedDict([
('linear',nn.Linear(num_inputs,1))
#......
]))
print(net)
print(net[0])
可以通过net.parameters()来查看模型所有的可学习参数,此函数将返回一个生成器
for param in net.parameters():
print(param)
from torch.nn import init
init.normal_(net[0].weight,mean=0,std=0.01)
init.constant_(net[0].bias,val=0 )
# 也可以直接修改bias的data net[0].bias.data.fill_(0)
loss = nn.MSELoss()
import torch.optim as optim
optimizer = optim.SGD(net.parameters(),lr=0.03)
print(optimizer)
# optimizer = optim.SGD([
# 如果对某个参数不指定学习率,就使用最外层的默认学习率
# {'param': net.subnet1.parameters()}, # lr = 0.03
# {'param': net.subnet1.parameters(),'lr': 0.01},],lr=0.03)
# 调整学习率
#for param_group in optimizer.param_groups:
# param['lr'] *=0.1 #学习率为之前的0.1倍
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
下面比较学到的模型参数和真实的模型参数,我们从net获得需要的层,并访问其权重(weight)和偏差(bias)
dense = net[0]
print(true_w,dense.weight)
print(true_b,dense.bias)
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
#import time
import sys
#sys.path.append("..") # 为了导入上层目录的d2lzh_pytorch
import d2lzh_pytorch as d2l
print(torch.__version__)
print(torchvision.__version__)
#mnist_train = torchvision.datasets.FashionMNIST(
# root='/home/kesci/input/FashionMNIST2065', train=True, download=True, transform=transforms.ToTensor())
#mnist_test = torchvision.datasets.FashionMNIST(
# root='/home/kesci/input/FashionMNIST2065', train=False, download=True, transform=transforms.ToTensor())
#print(type(mnist_train))
#print(len(mnist_train), len(mnist_test))
mnist_train = torchvision.datasets.FashionMNIST(
root='~/Datasets/FashionMNIST/', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(
root='~/Datasets/FashionMNIST/', train=False, download=True, transform=transforms.ToTensor())
print(type(mnist_train))
print(len(mnist_train), len(mnist_test))
feature, label = mnist_train[0]
print(feature.shape, label)
如果不做变换输入的数据是图像,我们可以看一下图片的类型参数:
mnist_PIL = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True)
PIL_feature, label = mnist_PIL[0]
print(PIL_feature)
# 本函数已保存在d2lzh包中方便以后使用
def show_fashion_mnist(images, labels):
d2l.use_svg_display()
# 这里的_表示我们忽略(不使用)的变量
_, figs = plt.subplots(1, len(images), figsize=(12, 12))
for f, img, lbl in zip(figs, images, labels):
f.imshow(img.view((28, 28)).numpy())
f.set_title(lbl)
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.show()
# 本函数已保存在d2lzh包中方便以后使用
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
X, y = [], []
for i in range(10):
X.append(mnist_train[i][0])
y.append(mnist_train[i][1])
show_fashion_mnist(X, get_fashion_mnist_labels(y))
batch_size = 256
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
import torch
import torchvision
import numpy as np
import d2lzh_pytorch as d2l
print(torch.__version__)
print(torchvision.__version__)
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs = 784
print(28*28)
num_outputs = 10
W = torch.tensor(np.random.normal(0, 0.01, (num_inputs, num_outputs)), dtype=torch.float)
b = torch.zeros(num_outputs, dtype=torch.float)
# 我们需要模型的参数
W.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], requires_grad=True)
X = torch.Tensor([[1,2,3],[4,5,6]])
X
tensor([[1., 2., 3.],
[4., 5., 6.]])
print(X.sum(dim=0, keepdim=True)) # dim = 0,表示对列进行操作
print(X.sum(dim=1, keepdim=True)) # dim = 1,表示对行进行操作
tensor([[5., 7., 9.]])
tensor([[ 6.],
[15.]])
def softmax(x):
x_exp = x.exp()
partition = x_exp.sum(dim = 1,keepdim = True)
return x_exp/partition #这里使用了广播机制?
# 可以将每个元素变成非负数,且每一行和为1
x = torch.rand((2,5))
x_prob = softmax(x)
x_prob
tensor([[0.2273, 0.1223, 0.2860, 0.1291, 0.2352],
[0.2017, 0.2129, 0.1801, 0.2801, 0.1252]])
def net(X):
return softmax(torch.mm(X.view((-1, num_inputs)), W) + b)
y_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
y = torch.LongTensor([0, 2])
y_hat.gather(1, y.view(-1, 1)) # 选取y_hat中第一个中的第一个和第二个中的第三个,即0.1和0.5
tensor([[0.1000],
[0.5000]])
def cross_entropy(y_hat,y):
return -torch.log(y_hat.gather(1,y.view(-1,1)))
def accuracy(y_hat, y):
return (y_hat.argmax(dim=1) == y).float().mean().item()
accuracy(y_hat, y)
0.5
def cross_entropy(y_hat,y):
return -torch.log(y_hat.gather(1,y.view(-1,1)))
# 本函数已保存在d2lzh_pytorch包中方便以后使用。该函数将被逐步改进:它的完整实现将在“图像增广”一节中描述
def evaluate_accuracy(data_iter, net):
acc_sum, n = 0.0, 0
for X, y in data_iter:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
print(evaluate_accuracy(test_iter, net))
# 此时的模型还未训练
0.1016
num_epochs, lr = 5, 0.1 # 本函数已保存在d2lzh_pytorch包中方便以后使用
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
d2l.sgd(params, lr, batch_size)
else:
optimizer.step() # “softmax回归的简洁实现”一节将用到
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [W, b], lr)
epoch 1, loss 0.7859, train acc 0.751, test acc 0.795
epoch 2, loss 0.5707, train acc 0.813, test acc 0.813
epoch 3, loss 0.5260, train acc 0.826, test acc 0.821
epoch 4, loss 0.5010, train acc 0.832, test acc 0.826
epoch 5, loss 0.4856, train acc 0.836, test acc 0.828
X, y = iter(test_iter).next()
true_labels = d2l.get_fashion_mnist_labels(y.numpy())
pred_labels = d2l.get_fashion_mnist_labels(net(X).argmax(dim=1).numpy())
titles = [true + '\n' + pred for true, pred in zip(true_labels, pred_labels)]
d2l.show_fashion_mnist(X[0:9], titles[0:9])
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-WiqxTOtF-1581411898222)(output_103_0.svg)]
import torch
from torch import nn
from torch.nn import init
import numpy as np
import d2lzh_pytorch as d2l
print(torch.__version__)
1.3.1+cpu
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
class LinearNet(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(LinearNet, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x): # x 的形状: (batch, 1, 28, 28)
y = self.linear(x.view(x.shape[0], -1))
return y
# net = LinearNet(num_inputs, num_outputs)
class FlattenLayer(nn.Module): #这个函数是对x形状额转化,因为每个batch样本x的形状为(batch_size,1,28,28)
def __init__(self): #我们需要将x的形状转化为(batch_size,784)才送入全连接层
super(FlattenLayer, self).__init__()
def forward(self, x): # x 的形状: (batch, *, *, ...)
return x.view(x.shape[0], -1)
from collections import OrderedDict
net = nn.Sequential(
# FlattenLayer(),
# LinearNet(num_inputs, num_outputs)
OrderedDict([
('flatten', FlattenLayer()),
('linear', nn.Linear(num_inputs, num_outputs))]) # 或者写成我们自己定义的 LinearNet(num_inputs, num_outputs) 也可以
)
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
Parameter containing:
tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], requires_grad=True)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
num_epochs = 5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None,
None, optimizer)
epoch 1, loss 0.0031, train acc 0.751, test acc 0.770
epoch 2, loss 0.0022, train acc 0.813, test acc 0.815
epoch 3, loss 0.0021, train acc 0.827, test acc 0.818
epoch 4, loss 0.0020, train acc 0.831, test acc 0.807
epoch 5, loss 0.0019, train acc 0.836, test acc 0.822
具体来说,给定一个小批量样本 X ∈ R n × d \boldsymbol{X} \in \mathbb{R}^{n \times d} X∈Rn×d,其批量大小为 n n n,输入个数为 d d d。假设多层感知机只有一个隐藏层,其中隐藏单元个数为 h h h。记隐藏层的输出(也称为隐藏层变量或隐藏变量)为 H \boldsymbol{H} H,有 H ∈ R n × h \boldsymbol{H} \in \mathbb{R}^{n \times h} H∈Rn×h。因为隐藏层和输出层均是全连接层,可以设隐藏层的权重参数和偏差参数分别为 W h ∈ R d × h \boldsymbol{W}_h \in \mathbb{R}^{d \times h} Wh∈Rd×h和 b h ∈ R 1 × h \boldsymbol{b}_h \in \mathbb{R}^{1 \times h} bh∈R1×h,输出层的权重和偏差参数分别为 W o ∈ R h × q \boldsymbol{W}_o \in \mathbb{R}^{h \times q} Wo∈Rh×q和 b o ∈ R 1 × q \boldsymbol{b}_o \in \mathbb{R}^{1 \times q} bo∈R1×q
我们先来看一种含单隐藏层的多层感知机的设计。其输出 O ∈ R n × q \boldsymbol{O} \in \mathbb{R}^{n \times q} O∈Rn×q的计算为
H = X W h + b h , O = H W o + b o , \begin{aligned} \boldsymbol{H} &= \boldsymbol{X} \boldsymbol{W}_h + \boldsymbol{b}_h,\ \boldsymbol{O} &= \boldsymbol{H} \boldsymbol{W}_o + \boldsymbol{b}_o, \end{aligned} H=XWh+bh, O=HWo+bo,
也就是将隐藏层的输出直接作为输出层的输入。如果将以上两个式子联立起来,可以得到
O = ( X W h + b h ) W o + b o = X W h W o + b h W o + b o . \boldsymbol{O} = (\boldsymbol{X} \boldsymbol{W}_h + \boldsymbol{b}_h)\boldsymbol{W}_o + \boldsymbol{b}_o = \boldsymbol{X} \boldsymbol{W}_h\boldsymbol{W}_o + \boldsymbol{b}_h \boldsymbol{W}_o + \boldsymbol{b}_o. O=(XWh+bh)Wo+bo=XWhWo+bhWo+bo.
从联立后的式子可以看出,虽然神经网络引入了隐藏层,却依然等价于一个单层神经网络:其中输出层权重参数为 W h W o \boldsymbol{W}_h\boldsymbol{W}_o WhWo,偏差参数为 b h W o + b o \boldsymbol{b}_h \boldsymbol{W}_o + \boldsymbol{b}_o bhWo+bo。不难发现,即便再添加更多的隐藏层,以上设计依然只能与仅含输出层的单层神经网络等价。
ReLU ( x ) = max ( x , 0 ) . \text{ReLU}(x) = \max(x, 0). ReLU(x)=max(x,0).
%matplotlib inline
import torch
import numpy as np
import matplotlib.pylab as plt
import sys
#sys.path.append("..")
import d2lzh_pytorch as d2l
def xyplot(x_vals, y_vals, name):
d2l.set_figsize(figsize=(5, 2.5))
d2l.plt.plot(x_vals.detach().numpy(), y_vals.detach().numpy())
d2l.plt.xlabel('x')
d2l.plt.ylabel(name + '(x)')
x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)
y = x.relu()
xyplot(x,y,'relu')
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-0D1pCfuW-1581411898226)(output_125_0.svg)]
显然,当输入为负数时,ReLU函数的导数为0;当输入为正数时,ReLU函数的导数为1。尽管输入为0时ReLU函数不可导,但是我们可以取此处的导数为0。下面绘制ReLU函数的导数。
y.sum().backward()
xyplot(x,x.grad,'grad of relu')
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-WLEuIwmS-1581411898227)(output_127_0.svg)]
sigmoid ( x ) = 1 1 + exp ( − x ) . \text{sigmoid}(x) = \frac{1}{1 + \exp(-x)}. sigmoid(x)=1+exp(−x)1.
y = x.sigmoid()
xyplot(x,y,'sigmoid')
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-S2y6K3Ov-1581411898228)(output_129_0.svg)]
依据链式法则,sigmoid函数的导数
sigmoid ′ ( x ) = sigmoid ( x ) ( 1 − sigmoid ( x ) ) . \text{sigmoid}'(x) = \text{sigmoid}(x)\left(1-\text{sigmoid}(x)\right). sigmoid′(x)=sigmoid(x)(1−sigmoid(x)).
下面绘制了sigmoid函数的导数。当输入为0时,sigmoid函数的导数达到最大值0.25;当输入越偏离0时,sigmoid函数的导数越接近0。
x.grad.zero_()
y.sum().backward()
xyplot(x,x.grad,'grad of sigmoid')
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-vv5LskY6-1581411898229)(output_131_0.svg)]
tanh(双曲正切)函数可以将元素的值变换到-1和1之间:
tanh ( x ) = 1 − exp ( − 2 x ) 1 + exp ( − 2 x ) . \text{tanh}(x) = \frac{1 - \exp(-2x)}{1 + \exp(-2x)}. tanh(x)=1+exp(−2x)1−exp(−2x).
我们接着绘制tanh函数。当输入接近0时,tanh函数接近线性变换。虽然该函数的形状和sigmoid函数的形状很像,但tanh函数在坐标系的原点上对称。
y = x.tanh()
xyplot(x, y, 'tanh')
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-8Cc22YMb-1581411898231)(output_133_0.svg)]
下面绘制了tanh函数的导数。当输入为0时,tanh函数的导数达到最大值1;当输入越偏离0时,tanh函数的导数越接近0。
x.grad.zero_()
y.sum().backward()
xyplot(x, x.grad, 'grad of tanh')
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-HJybUl1z-1581411898231)(output_135_0.svg)]
多层感知机就是含有至少一个隐藏层的由全连接层组成的神经网络,且每个隐藏层的输出通过激活函数进行变换。多层感知机的层数和各隐藏层中隐藏单元个数都是超参数。以单隐藏层为例并沿用本节之前定义的符号,多层感知机按以下方式计算输出:
H = ϕ ( X W h + b h ) , O = H W o + b o , \begin{aligned} \boldsymbol{H} &= \phi(\boldsymbol{X} \boldsymbol{W}_h + \boldsymbol{b}_h),\ \boldsymbol{O} &= \boldsymbol{H} \boldsymbol{W}_o + \boldsymbol{b}_o, \end{aligned} H=ϕ(XWh+bh), O=HWo+bo,
其中 ϕ \phi ϕ表示激活函数。在分类问题中,我们可以对输出 O \boldsymbol{O} O做softmax运算,并使用softmax回归中的交叉熵损失函数。 在回归问题中,我们将输出层的输出个数设为1,并将输出 O \boldsymbol{O} O直接提供给线性回归中使用的平方损失函数。
# 导入需要的包
import torch
import numpy as np
#import sys
#sys.path.append("..")
import d2lzh_pytorch as d2l
这里继续使用Fashion-MNIST数据集
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = torch.tensor(np.random.normal(0, 0.01, (num_inputs, num_hiddens)), dtype=torch.float)
b1 = torch.zeros(num_hiddens, dtype=torch.float)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_outputs)), dtype=torch.float)
b2 = torch.zeros(num_outputs, dtype=torch.float)
params = [W1, b1, W2, b2]
for param in params:
param.requires_grad_(requires_grad=True)
def relu(X):
return torch.max(input=X, other=torch.tensor(0.0))
def net(X):
X = X.view((-1, num_inputs))
H = relu(torch.matmul(X, W1) + b1)
return torch.matmul(H, W2) + b2
loss = torch.nn.CrossEntropyLoss()
训练多层感知机的步骤和3.6节中训练softmax回归的步骤没什么区别。我们直接调用d2lzh_pytorch包中的train_ch3函数,它的实现已经在3.6节里介绍过。我们在这里设超参数迭代周期数为5,学习率为100.0。
num_epochs, lr = 5, 100.0
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)
epoch 1, loss 0.0030, train acc 0.713, test acc 0.802
epoch 2, loss 0.0019, train acc 0.824, test acc 0.772
epoch 3, loss 0.0017, train acc 0.845, test acc 0.829
epoch 4, loss 0.0015, train acc 0.857, test acc 0.817
epoch 5, loss 0.0015, train acc 0.864, test acc 0.851
import torch
from torch import nn
from torch.nn import init
import numpy as np
#import sys
#sys.path.append("..")
import d2lzh_pytorch as d2l
num_inputs, num_outputs, num_hiddens = 784, 10, 256
net = nn.Sequential(
d2l.FlattenLayer(),
nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.Linear(num_hiddens, num_outputs),
)
for params in net.parameters():
init.normal_(params, mean=0, std=0.01)
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
num_epochs = 5
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
epoch 1, loss 0.0031, train acc 0.706, test acc 0.748
epoch 2, loss 0.0019, train acc 0.824, test acc 0.797
epoch 3, loss 0.0017, train acc 0.843, test acc 0.839
epoch 4, loss 0.0015, train acc 0.854, test acc 0.837
epoch 5, loss 0.0014, train acc 0.863, test acc 0.831