import torch
from torch import nn
from d2l import torch as d2l
# 定义卷积块(BN+RelU+Conv): 输入和输出的长和宽不变
def conv_block(input_channels, num_channels):
return nn.Sequential(
nn.BatchNorm2d(input_channels), nn.ReLU(),
nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1)
)
# 定义稠密块
class DensBlock(nn.Module):
def __init__(self, num_convs, input_channels, num_channels):
"""
:param num_convs: 卷积层个数
:param input_channels: 输入通道数
:param num_channels: 输出通道数
"""
super().__init__()
layer = []
for i in range(num_convs):
layer.append(
conv_block(num_channels * i + input_channels, num_channels)
)
self.net = nn.Sequential(*layer)
def forward(self, X):
for blk in self.net:
Y = blk(X)
# 连接通道维度上每个块的输入和输出
X = torch.cat((X, Y), dim=1)
return X
查看稠密块
blk = DensBlock(2, 3 ,10)
X = torch.randn(4, 3, 8, 8)
Y = blk(X)
Y.shape # [4, 23, 8, 8] 23 = 3 + 2*10
# 定义过渡层(卷积块(BN+Conv)和池化层)
def transition_block(input_channels, num_channels):
return nn.Sequential(
nn.BatchNorm2d(input_channels), nn.ReLU(),
nn.Conv2d(input_channels, num_channels, kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2)
)
查看过渡层
blk = transition_block(23, 10)
X = torch.randn(4, 23, 8, 8)
Y = blk(X)
Y.shape # [4, 23, 8, 8] ---> [3, 10, 8, 8]
# 首先使用同ResNet一样的单卷积层和最大汇聚层
b1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
# 接下来,类似于ResNet使用的4个残差块,DenseNet使用的是4个稠密块。
# 稠密块里的卷积层通道数(即增长率)设为32,所以每个稠密块将增加128个通道。
# 在每个模块之间,ResNet通过步幅为2的残差块减小高和宽,DenseNet则使用过渡层来减半高和宽,并减半通道数
num_channels, growth_rate = 64, 32 # 当前的通道数, 增长率
num_convs_in_dense_blocks = [4, 4, 4, 4] # 4个稠密块内卷积层的个数
blks = []
for i, num_convs in enumerate(num_convs_in_dense_blocks):
blks.append(
DensBlock(num_convs, num_channels, growth_rate)
)
num_channels +=num_convs * growth_rate # 上一个稠密块的输出通道数
if i != len(num_convs_in_dense_blocks) -1 :
# 在稠密块之间添加一个过渡层,使通道数减半。
blks.append(transition_block(num_channels, num_channels // 2))
num_channels = num_channels // 2
net = nn.Sequential(
b1,
*blks,
nn.BatchNorm2d(num_channels), nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(num_channels, 10)
)
X = torch.randn(1, 1, 96, 96)
for layer in net :
X = layer(X)
print(layer.__class__.__name__, 'output shape:\t', X.shape)
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
import time
lr, num_epochs = 0.1, 10
start = time.perf_counter()
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
end = time.perf_counter()
print("运行耗时 %.4f s" % (end-start))