pytorch迁移学习中对模型进行结构上的一些修改

import torchvision
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision.models import resnet



model=models.resnet50(pretrained=True)
#提取fc层中输入的特征维度
fc_features=model.fc.in_features
#修改最后的分类层的分类数量
model.fc=nn.Linear(fc_features,2)

但是以上方法只是更改了最后的分类层,还可以通过如下方法修改中间的一些值

#增减卷积层
import math
import torch.utils.model_zoo as model_zoo
class CNN(nn.Module):  

    def __init__(self, block, layers, num_classes=9):  
        self.inplanes = 64  
        super().__init__()  
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,  
                               bias=False)  
        self.bn1 = nn.BatchNorm2d(64)  
        self.relu = nn.ReLU(inplace=True)  
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)  
        self.layer1 = self._make_layer(block, 64, layers[0])  
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)  
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)  
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)  
        self.avgpool = nn.AvgPool2d(7, stride=1)  
        #新增一个反卷积层  
        self.convtranspose1 = nn.ConvTranspose2d(2048, 2048, kernel_size=3, stride=1, padding=1, output_padding=0, groups=1, bias=False, dilation=1)  
        #新增一个最大池化层  
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)  
        #去掉原来的fc层,新增一个fclass层  
        self.fclass = nn.Linear(2048, num_classes)  

        for m in self.modules():  
            if isinstance(m, nn.Conv2d):  
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels  
                m.weight.data.normal_(0, math.sqrt(2. / n))  
            elif isinstance(m, nn.BatchNorm2d):  
                m.weight.data.fill_(1)  
                m.bias.data.zero_()  

    def _make_layer(self, block, planes, blocks, stride=1):  
        downsample = None  
        if stride != 1 or self.inplanes != planes * block.expansion:  
            downsample = nn.Sequential(  
                nn.Conv2d(self.inplanes, planes * block.expansion,  
                          kernel_size=1, stride=stride, bias=False),  
                nn.BatchNorm2d(planes * block.expansion),  
            )  

        layers = []  
        layers.append(block(self.inplanes, planes, stride, downsample))  
        self.inplanes = planes * block.expansion  
        for i in range(1, blocks):  
            layers.append(block(self.inplanes, planes))  

        return nn.Sequential(*layers)  

    def forward(self, x):  
        x = self.conv1(x)  
        x = self.bn1(x)  
        x = self.relu(x)  
        x = self.maxpool(x)  

        x = self.layer1(x)  
        x = self.layer2(x)  
        x = self.layer3(x)  
        x = self.layer4(x)  

        x = self.avgpool(x)  
        #新加层的forward  
        x = x.view(x.size(0), -1)  
        x = self.convtranspose1(x)  
        x = self.maxpool2(x)  
        x = x.view(x.size(0), -1)  
        x = self.fclass(x)  

        return x   
 
resnet50=models.resnet50(pretrained=50)
cnn=CNN(resnet.Bottleneck,[3,4,6,3])
pretrained_dict = resnet50.state_dict()  
model_dict = cnn.state_dict()  

#去除cnn中没有的层的参数
pretrained_dict={k:v for k,v in pretrained_dict.items() if k in model_dict }
#更新,model_dict独有的就不更新,pre和model里面有同名key的之就被更新了
model_dict.update(pretrained_dict) 
cnn.load_state_dict(model_dict)

参考pytorch中的pre-train函数模型引用及修改(增减网络层,修改某层参数等)_whut_ldz的博客-CSDN博客_model.fc.in_featuresPyTorch中使用预训练的模型初始化网络的一部分参数_乐兮山南水北的博客-CSDN博客

目标检测之pytorch预训练模型的使用(削减削减网络层,修改参数)fine-tune技巧 - 知乎 (zhihu.com)

Pytorch自由载入部分模型参数并冻结 - 知乎 (zhihu.com)

你可能感兴趣的:(pytorch学习笔记,pytorch,迁移学习,深度学习)