Xception for DeepLab V3+(含超详细代码注解及论文原图)

import torch
import torchvision.models as models
from torch import nn


# 深度可分离卷积
class SeparableConv2d_same(nn.Module):
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False):
        super(SeparableConv2d_same, self).__init__()
        # 深度卷积
        self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
                               groups=inplanes, bias=bias)
        # 逐点卷积
        self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)

    def forward(self, x):
        # 深度卷积
        x = self.conv1(x)
        # 逐点卷积
        x = self.pointwise(x)
        return x


# xception 20个block外加5个散装(开始两个,结束三个)
class Block(nn.Module):
    # (输入通道,输出通道,同种conv数量,步长,空洞率,是否在从relu开始,是否为最后(20)模块)
    def __init__(self, inplanes, planes, reps, stride=1
                 , dilation=1, start_with_rule=True, grow_first=True, is_last=False):
        super(Block, self).__init__()
        # 区别block1-3与block4-19
        # 满足条件为block1-3,定义1X1卷积和卷积后的bn层
        if planes != inplanes or stride != 1:
            # 1X1卷积,步长为2,(跳跃连接前的)卷积下采样结构
            self.skip = nn.Conv2d(
                inplanes, planes, 1, stride=stride, bias=False
            )
            # batch_normal
            self.skipbn = nn.BatchNorm2d(planes)
        # 不满足条件为block4-19,不定义skip
        else:
            self.skip = None
        # 定义relu层
        # inplace = True ,会改变输入数据的值,节省反复申请与释放内存的空间与时间,只是将原来的地址传递,效率更好
        self.relu = nn.ReLU(inplace=True)
        # 定义rep
        rep = []
        filters = inplanes  # 记录输入通道数
        # 如果是每个组开始,则先定义一个由relu-sepconv-bn组成的可分离卷积块
        if grow_first:
            rep.append(self.relu)  # 第一层relu
            rep.append(SeparableConv2d_same(
                filters, planes, 3, stride=1, dilation=dilation
            ))  # 第二层3X3可分离卷积层
            rep.append(nn.BatchNorm2d(planes))  # 第三层bn层
            filters = planes  # filters更新为输出通道数
        # 定义rsp-1个由relu-sepconv-bn组成的可分离卷积块
        for i in range(reps - 1):
            rep.append(self.relu)  # 第一层relu
            rep.append(SeparableConv2d_same(
                filters, filters, 3, stride=1, dilation=dilation
            ))  # 第二层3X3可分离卷积层
            rep.append(nn.BatchNorm2d(filters))
        # 如果不是每个组开始,定义一个由relu-sepconv-bn组成的可分离卷积块
        if not grow_first:
            rep.append(self.relu)  # 第一层relu
            rep.append(SeparableConv2d_same(
                inplanes, planes, 3, stride=1, dilation=dilation
            ))  # 第二层3X3可分离卷积层
            rep.append(nn.BatchNorm2d(planes))  # 第三层bn层
        # 是否保留块开始的RELU()
        if not start_with_rule:
            rep = rep[1:]
        # 判断是否是block1-3
        if stride != 1:
            rep.append(SeparableConv2d_same(planes,
                                            planes, 3, stride=2))  # 定义3X3下采样可分离卷积层
        # 改写原来的stride=1为=2
        if stride == 1 and is_last:
            self.rep = nn.Sequential(*rep)

    def forward(self, input):
        # 输入值放入rep中
        x = self.rep(input)
        # 判断是否产生skip(逻辑结构为32-41行)
        # 如果产生,证明在block1-3中,这时候需要把skip和bn实例化出来
        if self.skip is not None:
            skip = self.skip(input)
            skip = self.skipbn(skip)
        else:
            skip = input
        # skip
        x += skip
        return x


class Xception(nn.Module):
    def __init__(self, inplanes=3):
        super(Xception, self).__init__()
        entry_block3_stride = 2  # block1-3中最下层步长
        middle_block_dilation = 1  # block4-19中空洞率
        exit_block_dilations = (1, 2)  # block20中空洞率
        
        # block1-3
        self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(32)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(64)
        self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
        self.block2 = Block(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
        self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True,
                            is_last=True)
        # block4-19
        self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                            grow_first=True)
        self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                            grow_first=True)
        self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                            grow_first=True)
        self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                            grow_first=True)
        self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                            grow_first=True)
        self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                            grow_first=True)
        self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)
        self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, start_with_relu=True,
                             grow_first=True)

        # block20
        self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
                             start_with_relu=True, grow_first=False, is_last=True)

        self.conv3 = SeparableConv2d_same(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1])
        self.bn3 = nn.BatchNorm2d(1536)

        self.conv4 = SeparableConv2d_same(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1])
        self.bn4 = nn.BatchNorm2d(1536)

        self.conv5 = SeparableConv2d_same(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1])
        self.bn5 = nn.BatchNorm2d(2048)
        
    def forward(self, x):
        # Entry flow
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)

        x = self.block1(x)
        low_level_feat = x  # 四分之一大小
        x = self.block2(x)
        x = self.block3(x)

        # Middle flow
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        x = self.block8(x)
        x = self.block9(x)
        x = self.block10(x)
        x = self.block11(x)
        x = self.block12(x)
        x = self.block13(x)
        x = self.block14(x)
        x = self.block15(x)
        x = self.block16(x)
        x = self.block17(x)
        x = self.block18(x)
        x = self.block19(x)

        # Exit flow
        x = self.block20(x)
        x = self.conv3(x)
        x = self.bn3(x)
        x = self.relu(x)

        x = self.conv4(x)
        x = self.bn4(x)
        x = self.relu(x)

        x = self.conv5(x)
        x = self.bn5(x)
        x = self.relu(x)

        return x, low_level_feat

Xception for DeepLab V3+(含超详细代码注解及论文原图)_第1张图片

你可能感兴趣的:(深度学习,r语言,深度学习,pytorch)