Pytorch 预训练模型的应用

1、加载预训练模型

调用网上的预训练参数:

如果在下载文件夹里没有相应的预训练模型参数文件(.pth),则会直接从网上下载。

import torchvision.models as models
 
#resnet
model = models.ResNet(pretrained=True)
model = models.resnet18(pretrained=True)
model = models.resnet34(pretrained=True)
model = models.resnet50(pretrained=True)
 
#vgg
model = models.VGG(pretrained=True)
model = models.vgg11(pretrained=True)
model = models.vgg16(pretrained=True)
model = models.vgg16_bn(pretrained=True)

调用本地预训练参数:

import torchvision.models as models

model = models.resnet50(pretrained=False)
#'E:\Python\DeepLearning\pretrained_model\resnet50.pth'为我本地预训练参数所在文件
# 位置
model.load_state_dict(torch.load(r'E:\Python\DeepLearning\pretrained_model\resnet50.pth'))

2、预训练模型的修改

2.1、简单参数修改

以resnet50网络为例,resnet50网络最后一层分类层fc是对1000种类型进行划分,对于自己的数据集,如果只有9类,修改的代码如下:

# coding=UTF-8
import torchvision.models as models
 
#调用模型
model = models.resnet50(pretrained=True)
#提取fc层中固定的参数
#in_features是fc层中的输入维数
fc_features = model.fc.in_features
#修改类别为9
model.fc = nn.Linear(fc_features, 9)
2.2、增减卷积层

有的时候我们往往要修改网络中的层次结构,这时只能用参数覆盖的方法,即自己先定义一个类似的网络,再将预训练中的参数提取到自己的网络中来。

#================================
#       自定义一个类似的网络
#================================
import torchvision.models as models
import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo

class CNN(nn.Module):

    def __init__(self, block, layers, num_classes=9):
        self.inplanes = 64
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        # 新增一个反卷积层
        self.convtranspose1 = nn.ConvTranspose2d(2048, 2048, kernel_size=3, stride=1, padding=1, output_padding=0,groups=1, bias=False, dilation=1)
        # 新增一个最大池化层
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        # 去掉原来的fc层,新增一个fclass层
        self.fclass = nn.Linear(2048, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        # 新加层的forward
        x = x.view(x.size(0), -1)
        x = self.convtranspose1(x)
        x = self.maxpool2(x)
        x = x.view(x.size(0), -1)
        x = self.fclass(x)

        return x


class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out

#================================
#    为自定义网络导入参数(方法一)
#================================
# 加载model以及预训练参数
resnet50 = models.resnet50(pretrained=False)
resnet50.load_state_dict(torch.load(r'E:\Python\DeepLearning\pretrained_model\resnet50.pth'))
# 定义resnet50的网络框架
cnn = CNN(Bottleneck, [3, 4, 6, 3])
# 读取预训练参数
pretrained_dict = resnet50.state_dict()
# 读取自定义模型的参数
model_dict = cnn.state_dict()
# 将pretrained_dict里不属于model_dict的键剔除掉
pretrained_dict = {
     k: v for k, v in pretrained_dict.items() if k in model_dict}
# 更新现有的model_dict
model_dict.update(pretrained_dict)
# 加载我们真正需要的state_dict
cnn.load_state_dict(model_dict)
# 输出自定义模型框架结构
print(cnn)
#================================
#   为自定义网络导入参数(方法二)
#================================
# 加载model
resnet50 = models.resnet50(pretrained=False)
resnet50.load_state_dict(torch.load(r'E:\Python\DeepLearning\pretrained_model\resnet50.pth'))
cnn = CNN(Bottleneck, [3, 4, 6, 3])
# strict设置为False,则cnn与resnet50不同的层的参数将不会导入
cnn.load_state_dict(resnet50.state_dict(), strict=False)
print(cnn)
2.3、选择特定的层进行finetune

先查看网络的直接子模块,将不需要调整的模块中的参数设置为param.requires_grad = False,同时用一个list收集需要调整的模块中的参数。

# 载入预训练模型参数后...
for name, value in model.named_parameters():
    if name 满足某些条件:
        value.requires_grad = False

# setup optimizer
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(params, lr=1e-4)

一般可以创建一个列表来包含需要训练的子模块,用法如下:

# 通过这条语句可以先看看框架里含有那些模块名
for name,value in model.named_parameters():
    print(name)
# 或者:print(model.state_dict().keys())
# 载入预训练模型参数后...
# 创建需要训练的子模块名称
requires_grad_list = ['fc.weight','layer4.2.bn3.weight','layer4.2.conv3.weight']
for name, value in model.named_parameters():
    # 若不在requires_grad_list中就将其设为不用训练
    if name not in requires_grad_list:
        value.requires_grad = False

# 筛选出需要训练的子模块保存到parms中
params = filter(lambda p: p.requires_grad, model.parameters())
# 将parms传入优化器中进行参数训练优化
optimizer = torch.optim.Adam(params, lr=1e-4)
2.4、设置不同更新速度

如果载入的这些参数中,所有参数都更新,但要求一些参数和另一些参数的更新速度(学习率learning rate)不一样:

先导出参数名称

# 载入预训练模型参数后...
for name, value in model.named_parameters():
    print(name)
# 或
print(model.state_dict().keys())

假设该模型中有encoder,viewer和decoder两部分,参数名称分别是

'encoder.visual_emb.0.weight',
'encoder.visual_emb.0.bias',
'viewer.bd.Wsi',
'viewer.bd.bias',
'decoder.core.layer_0.weight_ih',
'decoder.core.layer_0.weight_hh',

假设要求encode、viewer的学习率为1e-6, decoder的学习率为1e-4,那么在将参数传入优化器时

ignored_params = list(map(id, model.decoder.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer = torch.optim.Adam([{
     'params':base_params,'lr':1e-6},
                              {
     'params':model.decoder.parameters()}
                              ],
                              lr=1e-4, momentum=0.9)

3、保存网络结构和参数

方法一:

保存整个网络的结构和参数

torch.save(model_name,'parms_name.pth')

重载

model = torch.load('parms_name.pth')

这种方法会将模型结构与参数都进行保存,重载时会将模型结构和参数都进行重载。

方法二(推荐):

只保存模型参数信息

torch.save(model_name.state_dict(),'parms_name.pth')

重载

model_name.load_state_dict(torch.load('parms_name.pth'))

4、其他

model.modules()和model.children()的区别:

Module.modules()方法返回网络中所有模块的一个iterator,而Module.children()方法返回所有直接子模块的一个iterator。

# model.modules()类似于 [[1, 2], 3],其遍历结果为:
[[1, 2], 3], [1, 2], 1, 2, 3
 
# model.children()类似于 [[1, 2], 3],其遍历结果为:
[1, 2], 3
参考资料:

1、预训练模型官方代码看这里

2、https://blog.csdn.net/weixin_36670529/article/details/105910572

3、https://blog.csdn.net/weixin_36049506/article/details/89522860

你可能感兴趣的:(深度学习,pytorch,深度学习,神经网络,迁移学习)