层:
块:
# 以前章多层感知机的代码为例
import torch
from torch import nn
from torch.nn import functional as F # functional中有一些没有参数的函数
net = nn.Sequential(nn.Linear(20, 256),
nn.ReLU(),
nn.Linear(256, 10))
X = torch.rand(2, 20)
net(X)
tensor([[-0.0483, 0.1510, -0.1159, 0.0637, 0.0996, -0.1699, 0.1072, -0.0492,
-0.1295, -0.2415],
[-0.1151, 0.1059, -0.1224, 0.0256, -0.0237, -0.1785, 0.0105, -0.1170,
-0.1108, -0.1422]], grad_fn=)
其中 nn.Sequential 即为 PyTorch 中表示一个块的类。通过实例化 nn.Sequential 来构建模型,层作为参数传入。
使用 net(X) 调用模型实际上是 net(X).__call__(X) 的简写。此前向传播函数将每个块连接在一起,将每个块的输出作为下一个块的输入。
块必须提供的基本功能:
class MLP(nn.Module): # 自定义模型须继承基类
def __init__(self):
super().__init__() # 使用父类的构造函数进行初始化
self.hidden = nn.Linear(20, 256) # 隐藏层
self.out = nn.Linear(256, 10) # 输出层
def forward(self, X): # 定义模型的前向传播
return self.out(F.relu(self.hidden(X))) # F.relu仅为函数调用,区别于nn.ReLU为实例化ReLU类
net = MLP()
X, net(X)
(tensor([[0.8223, 0.2317, 0.2167, 0.2294, 0.8206, 0.0267, 0.6652, 0.5543, 0.9675,
0.8493, 0.1979, 0.8684, 0.9007, 0.8543, 0.9402, 0.3485, 0.4197, 0.6307,
0.0776, 0.8749],
[0.6078, 0.8124, 0.1102, 0.8815, 0.4162, 0.4978, 0.5868, 0.6088, 0.7090,
0.8099, 0.9512, 0.0493, 0.8988, 0.7997, 0.7061, 0.0673, 0.6092, 0.3032,
0.4287, 0.6183]]),
tensor([[ 0.2697, -0.2452, -0.2702, 0.1155, 0.0762, -0.2333, -0.1353, 0.1700,
0.1048, 0.0197],
[ 0.3513, -0.3166, -0.2621, 0.1910, 0.1542, -0.0595, -0.0876, 0.1494,
0.1625, 0.0250]], grad_fn=))
实现一下简化的 MySequential,只需要定义两个关键函数:
class MySequential(nn.Module):
def __init__(self, *args):
super().__init__()
for idx, module in enumerate(args): # enumerate()函数将一个可遍历的数据对象组合为一个索引序列,同时列出数据和数据下标。
self._modules[str(idx)] = module # 逐个加入_modules字典
def forward(self, X):
for block in self._modules.values():
X = block(X)
return X
net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net(X)
tensor([[ 0.1703, -0.1589, 0.1464, 0.0128, 0.1446, 0.0349, 0.2976, 0.0873,
0.0138, 0.1869],
[ 0.1007, -0.2184, 0.1443, -0.0698, 0.0846, 0.0186, 0.2745, 0.1464,
-0.1429, 0.1786]], grad_fn=)
class FixedHiddenMLP(nn.Module):
def __init__(self):
super().__init__()
self.rand_weight = torch.rand((20, 20), requires_grad=False) # 此随机权重参数不是模型参数,不计算梯度,在训练期间保持不变
self.linear = nn.Linear(20, 20)
def forward(self, X):
X = self.linear(X)
X = F.relu(torch.mm(X, self.rand_weight) + 1)
X = self.linear(X) # 复用全连接层,相当于两个全连接层共享参数
# 控制流
while X.abs().sum() > 1: # 当L1范数大于1时将输出向量除以2(无实际意义,仅作演示,简而言之,爱咋咋地)
X /= 2
return X.sum()
net = FixedHiddenMLP()
net(X)
tensor(0.0186, grad_fn=)
以下示例混合使用前述各组合块。简而言之,自定义模块需要干两件事情:
class NestMLP(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU())
self.linear = nn.Linear(32, 16)
def forward(self, X):
return self.linear(self.net(X))
chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
chimera(X)
tensor(0.1851, grad_fn=)
由于 Python 中GIL锁的限制,我们担心GPU需要迁就于CPU的速度。
(1)如果将 MySequential 中存储块的方式更改为 Python 列表,会出现什么样的问题?
由下述示例可见,使用列表也不影响使用。
但相较于存储模型的默认位置 _modules,在自定义的位置 modules 放各层相当于没有“注册”,不能直接使用默认的ToString去 print 模型结构则
class MySequential(nn.Module):
def __init__(self, *args):
super().__init__()
for idx, module in enumerate(args):
self._modules[str(idx)] = module
def forward(self, X):
for block in self._modules.values():
X = block(X)
return X
class MySequential2(nn.Module):
def __init__(self, *args):
super().__init__()
self.modules = list(args) #直接使用列表
def forward(self, X):
for block in self._modules.values():
X = block(X)
return X
net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net2 = MySequential2(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
print(net), print(net2), net(X), net2(X)
MySequential(
(0): Linear(in_features=20, out_features=256, bias=True)
(1): ReLU()
(2): Linear(in_features=256, out_features=10, bias=True)
)
MySequential2()
(None,
None,
tensor([[-0.1234, -0.1815, 0.0898, -0.1077, -0.1857, 0.1911, -0.0182, 0.0517,
-0.0648, -0.0026],
[-0.1889, -0.0274, 0.0809, -0.1412, -0.1933, 0.1627, 0.0667, 0.0802,
0.0877, -0.1025]], grad_fn=),
tensor([[0.8223, 0.2317, 0.2167, 0.2294, 0.8206, 0.0267, 0.6652, 0.5543, 0.9675,
0.8493, 0.1979, 0.8684, 0.9007, 0.8543, 0.9402, 0.3485, 0.4197, 0.6307,
0.0776, 0.8749],
[0.6078, 0.8124, 0.1102, 0.8815, 0.4162, 0.4978, 0.5868, 0.6088, 0.7090,
0.8099, 0.9512, 0.0493, 0.8988, 0.7997, 0.7061, 0.0673, 0.6092, 0.3032,
0.4287, 0.6183]]))
(2)实现一个块,他以两个块为参数,例如 net1 和 net2,并返回前向传播中两个网络的串联输出。这也被称为平行块。
net3 = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 64))
net4 = MySequential(nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 10))
class MySequential2(nn.Module):
def __init__(self, net1, net2):
super().__init__()
self.net1 = net1
self.net2 = net2
def forward(self, X):
return self.net2(self.net1(X))
net5 = MySequential2(net3, net4)
net5(X)
tensor([[ 0.1421, -0.0433, 0.0998, -0.0017, -0.2430, -0.0957, 0.0042, -0.0263,
-0.0330, 0.2241],
[ 0.1457, -0.0369, 0.0850, -0.0112, -0.2209, -0.0903, 0.0021, -0.0414,
-0.0438, 0.2144]], grad_fn=)
(3)假设我们想要连接同一网络的多个实例。实现一个函数,该函数生成同一个块的多个实例,并在此基础上构建更大的网络。
层数怎么做到匹配?
强行匹配没意义哇。