深度学习学习笔记
学习视频:https://www.bilibili.com/video/BV1af4y1L7Zu/?spm_id_from=autoNext&vd_source=75dce036dc8244310435eaf03de4e330
FCN :用深度学习做语义分割的奠基工作,现在用的不多。用转置卷积来实现结果。用转置卷积来替换CNN最后的全连接层,来实现每个像素的预测。这里的CNN 去掉了 全局平均池化层和最后的全连接层,1 * 1 Conv主要用来降低通道数,接下来是转置卷积层将图片扩大,通道数等价于有多少类,对每个类的预测值存在通道中(通道数为类别数)。
QA:
%matplotlib inline
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
使用在ImageNet数据集上预训练的ResNet-18模型来提取图像特征
pretrained_net = torchvision.models.resnet18(pretrained=True)
list(pretrained_net.children())[-3:] # 查看最后三层
[Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
),
AdaptiveAvgPool2d(output_size=(1, 1)),
Linear(in_features=512, out_features=1000, bias=True)]
创建一个全卷积网络实例net
# 去掉最后的两层 (maxavgpool和全连接层)
net = nn.Sequential(*list(pretrained_net.children())[:-2])
X = torch.rand(size=(1, 3, 320, 480))
net(X).shape
torch.Size([1, 512, 10, 15])
使用1×1卷积层将输出通道数转换为Pascal VOC2012数据集的类数(21类)。 将要素地图的高度和宽度增加32倍
# 构造出 FCN
num_classes = 21 # 最后的分类数
# 1 * 1 的卷积层
net.add_module('final_conv', nn.Conv2d(512, num_classes, kernel_size=1))
# 转置卷积层 stride = 32 要把图片放大32倍 padding=16、kernel_size=64 尽量保持高宽不变
net.add_module(
'transpose_conv',
nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, padding=16,
stride=32))
初始化转置卷积层
# 实现一个双线性插值的核
def bilinear_kernel(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = (torch.arange(kernel_size).reshape(-1, 1),
torch.arange(kernel_size).reshape(1, -1))
filt = (1 - torch.abs(og[0] - center) / factor) * \
(1 - torch.abs(og[1] - center) / factor)
weight = torch.zeros(
(in_channels, out_channels, kernel_size, kernel_size))
weight[range(in_channels), range(out_channels), :, :] = filt
return weight
双线性插值的上采样实验
很好的初始化转置神经网络的方法
conv_trans = nn.ConvTranspose2d(3, 3, kernel_size=4, padding=1, stride=2,
bias=False)
# 双线性核 初始化权重
conv_trans.weight.data.copy_(bilinear_kernel(3, 3, 4));
# 读入图片,转成pytorch能用的形式
img = torchvision.transforms.ToTensor()(d2l.Image.open('../img/catdog1.jpg'))
X = img.unsqueeze(0)
Y = conv_trans(X) # 放大在缩小
out_img = Y[0].permute(1, 2, 0).detach()
d2l.set_figsize()
print('input image shape:', img.permute(1, 2, 0).shape)
d2l.plt.imshow(img.permute(1, 2, 0))
print('output image shape:', out_img.shape)
d2l.plt.imshow(out_img);
input image shape: torch.Size([450, 640, 3])
output image shape: torch.Size([900, 1280, 3])
用双线性插值的上采样初始化转置卷积层。对于1×1卷积层,我们使用Xavier初始化参数
W = bilinear_kernel(num_classes, num_classes, 64) # 构造核
net.transpose_conv.weight.data.copy_(W); # 写到FCN最后层的权重初始化里面(使用双线性初始化转置卷积核)
读取数据集
batch_size, crop_size = 32, (320, 480)
train_iter, test_iter = d2l.load_data_voc(batch_size, crop_size)
read 1114 examples
read 1078 examples
训练
# loss 对每个高 宽 做平均,这样每个图片可以拿到一个值(因为现在每个样本是个矩阵,之前分类每个样本是个值)
def loss(inputs, targets):
return F.cross_entropy(inputs, targets, reduction='none').mean(1).mean(1)
num_epochs, lr, wd, devices = 5, 0.001, 1e-3, d2l.try_all_gpus()
trainer = torch.optim.SGD(net.parameters(), lr=lr, weight_decay=wd)
d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices)
预测
def predict(img):
X = test_iter.dataset.normalize_image(img).unsqueeze(0)
pred = net(X.to(devices[0])).argmax(dim=1)
return pred.reshape(pred.shape[1], pred.shape[2])
可视化预测的类别
def label2image(pred):
# 给每个像素的预测值,变成RGB值
colormap = torch.tensor(d2l.VOC_COLORMAP, device=devices[0])
X = pred.long()
return colormap[X, :]
voc_dir = d2l.download_extract('voc2012', 'VOCdevkit/VOC2012')
test_images, test_labels = d2l.read_voc_images(voc_dir, False)
n, imgs = 4, []
for i in range(n):
# 剪裁成我们需要的大小
crop_rect = (0, 0, 320, 480)
X = torchvision.transforms.functional.crop(test_images[i], *crop_rect)
# 预测 并转成RGB图片
pred = label2image(predict(X))
imgs += [
X.permute(1, 2, 0),
pred.cpu(),
torchvision.transforms.functional.crop(test_labels[i],
*crop_rect).permute(1, 2, 0)]
d2l.show_images(imgs[::3] + imgs[1::3] + imgs[2::3], 3, n, scale=2);