考虑有d层神经网络(t代表层数)
h t = f t ( h t − 1 ) \bm{h}^t = f_t(\bm{h^{t-1}}) ht=ft(ht−1)
y = ℓ ∘ f d ∘ . . . ∘ f 1 ( x ) y = \ell\circ f_d \circ ... \circ f_1(x) y=ℓ∘fd∘...∘f1(x)
计算损失 ℓ \ell ℓ关于参数 W t \bm{W_t} Wt的梯度
∂ ℓ ∂ W t = ∂ ℓ ∂ h d ∂ h d ∂ h d − 1 . . . ∂ h t + 1 ∂ h t ∂ h t ∂ W t \frac{\partial\ell}{\partial\bm{W}^t}=\frac{\partial\ell}{\partial\bm{h}^d}\frac{\partial\bm{h}^d}{\partial\bm{h}^{d-1}}...\frac{\partial\bm{h}^{t+1}}{\partial\bm{h}^{t}}\frac{\partial\bm{h}^{t}}{\partial\bm{W}^{t}} ∂Wt∂ℓ=∂hd∂ℓ∂hd−1∂hd...∂ht∂ht+1∂Wt∂ht
梯度更新的逻辑就是每一层梯度累乘后作为梯度再进行更新操作,然而上述推导过程其实是d-t次矩阵乘法,这就会极大概率的产生两个问题。
# 顺序块---自定义重写Sequential的实现---我们可以更多的自定义模型中的操作
#!pip install torchsummary
from torchsummary import summary
class MySequential(nn.Module):
def __init__(self, *args):
super().__init__()
for block in args:
self._modules[block] = block
def forward(self, X):
for block in self._modules.values():
X = block(X)
return X
net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
summary(net, input_size=(1, 20))
# 在正向传播函数中执行代码
class FixedHiddenMLP(nn.Module):
def __init__(self):
super().__init__()
self.rand_weight = torch.rand((20, 20), requires_grad=False)# 权重赋值区间不用参数迭代更新
self.linear = nn.Linear(20, 20)
def forward(self, X):
print(f"input shape: {X.shape}") # 打印输入的形状
X = self.linear(X)
X = F.relu(torch.mm(X, self.rand_weight) + 1)
X = self.linear(X)
while X.abs().sum() > 1:
X /= 2
print(f"output shape: {X.shape}") # 打印输出的形状
return X.sum()
net = FixedHiddenMLP()
net(X)
# 混合搭配各种组合块的方法
class NewMySequential(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())
self.linear = nn.Linear(32, 16)
def forward(self, X):
return self.linear(self.net(X))
net = nn.Sequential(NewMySequential(), nn.Linear(16, 20), FixedHiddenMLP())
net(X)
print(net)
print(net[2].state_dict()) # 可以直接查看当前层的所有参数
print(net[2].bias) # 查看最后一层的偏移
print(net[2].bias.data) # 直接访问
print(net[2].weight.grad) # 访问当前梯度
print(*[(name, param.shape) for name, param in net(0).named_parameters()])
# 访问net(0)中的所有参数 * 表示打印出列表的每个数据
print(*[(name, param.shape) for name, param in net.named_parameters()])
# 访问net中的所有参数 * 表示打印出列表的每个数据
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
net.apply(init_normal)
net[0].weight.data[0], net[0].bias.data[0]
def xavier(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
# 经典初始化方法
def init_42(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 42)
# 对于一个module来说不管是整体或者是单个层其实都是可以使用初始化方法的
net[0].apply(xavier) # 第一个层用xavier进行初始化
net[2].apply(init_42)
print(net[0].weight.data[0])
print(net[2].weight.data)
# 当然也可以自定义初始化
shared = nn.Linear(8, 8)
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
nn.ReLU(), nn.Linear(8, 1))
net(X)
# 这样无论如何训练shared永远不会被更新参数
# 自定义一个无参数的层
import torch
import torch.nn.functional as F
from torch import nn
class CenteredLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, X):
return X - X.mean()
layer = CenteredLayer()
layer(torch.FloatTensor([1, 2, 3, 4, 5]))
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer()) # 可以将层直接加入
# 自定义一个带参数的层
class MyLinear(nn.Module):
def __init__(self, in_units, units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units,))
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
linear = MyLinear(5, 3)
linear.weight
# 使用自定义层直接执行正向传播计算
linear(torch.rand(2, 5))
# 使用自定义层构建模型
net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))
net(torch.rand(2, 64))
# 加载和保存张量
import torch
from torch import nn
from torch.nn import functional as F
x = torch.arange(4)
torch.save(x, 'x-file')
x2 = torch.load('x-file')
x2
# 存储一个张量列表,然后读回内存
y = torch.zeros(4)
torch.save([x, y], 'x-files')
x2, y2 = torch.load('x-files')
(x2, y2)
# 写入或读取从字符串映射到张量的字典
mydict = {'x': x, 'y': y}
torch.save(mydict, 'mydict')
mydict2 = torch.load('mydict')
mydict2
# 加载保存模型参数
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(F.relu(self.hidden(x)))
net = MLP()
X = torch.randn(size=(2, 20))
Y = net(X)
# 将模型的参数存储为一个叫做“mlp.params”的文件
torch.save(net.state_dict(), 'mlp.params')
# 实例化了原始多层感知机模型的一个备份。 直接读取文件中存储的参数
clone = MLP()
clone.load_state_dict(torch.load('mlp.params'))
clone.eval()
Y_clone = clone(X)
Y_clone == Y
# 验证迁移后模型是否正常