MoGA_A(B、C)代码理解

class MoGaA(nn.Module):
    def __init__(self, n_class=1000, input_size=224):
        super(MoGaA, self).__init__()
        assert input_size % 32 == 0  # 输入数据要是32的整数倍
        # 参数配置
        mb_config = [
            # expansion, out_channel, kernel_size, stride, act(0 RE 1 Hs), se
            # 扩张倍数,输出通道数,卷积大小,步长,激活函数(relu 或 Hswish),se模块
            [6, 24, 5, 2, 0, 0],  # 5x5卷积
            [6, 24, 7, 1, 0, 0],  # 7x7卷积
            [6, 40, 3, 2, 0, 0],  # 3x3卷积
            [6, 40, 3, 1, 0, 1],  # 3x3卷积 se
            [3, 40, 3, 1, 0, 1],  # 3x3卷积 se
            [6, 80, 3, 2, 1, 1],  # 3x3卷积 Hs se
            [6, 80, 3, 1, 1, 0],  # 3x3卷积 Hs
            [6, 80, 7, 1, 1, 0],  # 7x7卷积 Hs
            [3, 80, 7, 1, 1, 1],  # 7x7卷积 Hs se
            [6, 112, 7, 1, 1, 0],  # 7x7卷积 Hs
            [6, 112, 3, 1, 1, 0],  # 3x3卷积 Hs
            [6, 160, 3, 2, 1, 0],  # 3x3卷积 Hs
            [6, 160, 5, 1, 1, 1],  # 5x5卷积 Hs se
            [6, 160, 5, 1, 1, 1],  # 5x5卷积 Hs se
        ]

        first_filter = 16  # 第一个通道数
        second_filter = 16  # 第二个通道数
        second_last_filter = 960  # 倒数第二个通道数
        last_channel = 1280  # 最后一个通道数

        self.last_channel = last_channel  # 1280
        self.stem = stem(3, first_filter, 2)  #  第一个模块
        self.separable_conv = separable_conv(first_filter, second_filter)  # 第二个模块 可分离卷积
        self.mb_module = list()  # mb模块list
        input_channel = second_filter  # 输入通道数16 
        for t, c, k, s, a, se in mb_config:  # 遍历配置参数
        	# 通道数扩张率t,输出通道数c,卷积核大小k,步长s,激活函数选择a,se模块选择se
            output_channel = c  # 输出通道数24
            act = nn.ReLU(inplace=True) if a == 0 else Hswish(inplace=True)  # 激活函数选择
            self.mb_module.append(
                InvertedResidual(input_channel, output_channel, k, s, expand_ratio=t, act=act, se=se != 0))
            input_channel = output_channel  # 更新输入通道数
        self.mb_module = nn.Sequential(*self.mb_module)  # 类型转换为nn.Sequential
        self.conv_before_pooling = conv_before_pooling(input_channel, second_last_filter)  # 在pool前的卷积操作
        self.global_pooling = nn.AdaptiveAvgPool2d(1)  # 自适应均值池化,输出大小为(1,1)
        self.conv_head = conv_head(second_last_filter, last_channel)  #  在pool后的卷积操作
        self.classifier = classifier(last_channel, n_class)  # 分类器
        self._initialize_weights()  # 初始化

    def forward(self, x):
        x = self.stem(x)
        x = self.separable_conv(x)
        x = self.mb_module(x)
        x = self.conv_before_pooling(x)
        x = self.global_pooling(x)
        x = self.conv_head(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x

    def _initialize_weights(self):
    	# 初始化参数
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                n = m.weight.size(0)  # fan-out
                init_range = 1.0 / math.sqrt(n)
                m.weight.data.uniform_(-init_range, init_range)
                m.bias.data.zero_()
MoGaB参数设置
 mb_config = [
            # expansion, out_channel, kernel_size, stride, act(0 RE 1 Hs), se
            [3, 24, 3, 2, 0, 0],
            [3, 24, 3, 1, 0, 0],
            [6, 40, 7, 2, 0, 0],
            [3, 40, 3, 1, 0, 0],
            [6, 40, 5, 1, 0, 0],
            [6, 80, 3, 2, 1, 1],
            [6, 80, 5, 1, 1, 1],
            [3, 80, 3, 1, 1, 0],
            [6, 80, 7, 1, 1, 1],
            [6, 112, 7, 1, 1, 0],
            [3, 112, 5, 1, 1, 0],
            [6, 160, 7, 2, 1, 1],
            [6, 160, 7, 1, 1, 1],
            [6, 160, 3, 1, 1, 1],
        ]
MoGaC参数设置
mb_config = [
            # expansion, out_channel, kernel_size, stride, act(0 RE 1 Hs), se
            [3, 24, 5, 2, 0, 0],
            [3, 24, 3, 1, 0, 0],
            [3, 40, 5, 2, 0, 0],
            [3, 40, 3, 1, 0, 0],
            [3, 40, 5, 1, 0, 0],
            [3, 80, 5, 2, 1, 0],
            [6, 80, 5, 1, 1, 1],
            [3, 80, 5, 1, 1, 0],
            [3, 80, 5, 1, 1, 0],
            [6, 112, 3, 1, 1, 0],
            [6, 112, 3, 1, 1, 1],
            [6, 160, 3, 2, 1, 1],
            [6, 160, 3, 1, 1, 1],
            [6, 160, 3, 1, 1, 1],
        ]
def stem(inp, oup, stride):
	# 输入通道数,输出通道数,步长
    return nn.Sequential(
        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),  # 3x3卷积
        nn.BatchNorm2d(oup),
        # nn.ReLU6(inplace=True)
        Hswish()  # Hs激活函数
    )
def separable_conv(inp, oup):
	# 可分离卷积
    return nn.Sequential(
        nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),  # 3x3分离卷积
        nn.BatchNorm2d(inp),
        nn.ReLU(inplace=True),
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),  # 1x1卷积
        nn.BatchNorm2d(oup),
    )
class InvertedResidual(nn.Module):
    def __init__(self, inp, oup, kernel_size, stride, expand_ratio, act, se):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]
        self.stride = stride
        self.act = act
        self.se = se
        padding = kernel_size // 2  # padding补零
        hidden_dim = round(inp * expand_ratio)  # 中间隐藏层的通道数,扩增expand_ratio倍
        self.use_res_connect = self.stride == 1 and inp == oup    # 步长为1且输入与输出通道数相等,则用残差
        self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)  # 1x1卷积
        self.bn1 = nn.BatchNorm2d(hidden_dim)
        self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim, bias=False)  # kernel_size x kernel_size 分离卷积
        self.bn2 = nn.BatchNorm2d(hidden_dim)
        if self.se:  # se模块
            self.mid_se = SEModule(hidden_dim, act)  
        self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
        self.bn3 = nn.BatchNorm2d(oup)

    def forward(self, x):
        inputs = x  # 原始输入
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.act(x)
        x = self.conv2(x)
        x = self.bn2(x)
        x = self.act(x)
        if self.se:  # se模块计算
            x = self.mid_se(x)
        x = self.conv3(x)
        x = self.bn3(x)
        if self.use_res_connect:  # 残差连接
            return inputs + x
        else:
            return x
class SEModule(nn.Module):
	# se模块
    def __init__(self, channel, act, reduction=4):
    	# reduction = 4 中间层数除以4
        super(SEModule, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)  # 自适应均值池化,输出大小为(1,1)
        self.conv = nn.Sequential(
            nn.Conv2d(channel, channel // reduction, 1, 1, 0, bias=True),  # 1x1卷积
            act
        )
        self.fc = nn.Sequential(
            nn.Conv2d(channel // reduction, channel, 1, 1, 0, bias=True),  # 1x1卷积
            Hsigmoid()  #激活函数
        )

    def forward(self, x):
        y = self.avg_pool(x)
        y = self.conv(y)
        y = self.fc(y)
        return torch.mul(x, y)  # torch.mul是矩阵x和y对应位相乘
class Hswish(nn.Module):
	# Hswish激活函数
    def __init__(self, inplace=True):
        super(Hswish, self).__init__()
        self.inplace = inplace

    def forward(self, x):
        return x * F.relu6(x + 3., inplace=self.inplace) / 6.  # 与Hsigmoid相比多乘了个x


class Hsigmoid(nn.Module):
	# Hsigmoid激活函数
    def __init__(self, inplace=True):
        super(Hsigmoid, self).__init__()
        self.inplace = inplace

    def forward(self, x):
        return F.relu6(x + 3., inplace=self.inplace) / 6.
def conv_before_pooling(inp, oup):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),  # 1x1卷积
        nn.BatchNorm2d(oup),
        # nn.ReLU6(inplace=True)
        Hswish()
    )


def conv_head(inp, oup):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 1, bias=False),  # 1x1卷积
        Hswish(inplace=True),
        nn.Dropout2d(0.2)  # dropout层
    )


def classifier(inp, nclass):
	# 分类器
    return nn.Linear(inp, nclass)

论文链接
代码链接

你可能感兴趣的:(pytorch基本模型理解)