(pytorch-深度学习)SE-ResNet的pytorch实现

SE-ResNet的pytorch实现

残差块:

class Resiual_block(nn.Module):
    def __init__(self, in, middle_out, out, kernel_size=3, padding=1):
        self.out_channel = middle_out
        super(Resiual_block, self).__init__()
        
        self.shortcut = nn.Sequential(
            nn.Conv2d(nin, nout, kernel_size=1),
            nn.BatchNorm2d(nout)
        )
        self.block = nn.Sequential(
        						nn.Conv2d(in, middle_out, kernel_size=kernel_size, padding=padding),
                                nn.BatchNorm2d(middle_out),
                                nn.ReLU(inplace=True),
                                nn.Conv2d(middle_out, out, kernel_size=kernel_size, padding=padding),
                                nn.BatchNorm2d(nout))
        
        
    def forward(self, input):
        x = self.block(input)
        return nn.ReLU(inplace=True)(x + self.shortcut(input))

注意 在input与输出相加前,这里需要一个shortcut层来调整input的通道大小

SE-NET:

class SE(nn.Module):
    def __init__(self, in, middle_out, out, reduce=16):
        super(SE, self).__init__()
        self.block = Resiual_block(in, middle_out, out)
        
		self.shortcut = nn.Sequential(
            nn.Conv2d(nin, nout, kernel_size=1),
            nn.BatchNorm2d(nout)
        )
        self.se = nn.Sequential(
        						nn.Linear(out, out // reduce),
                                nn.ReLU(inplace=True),
                                nn.Linear(out // reduce, out),
                                nn.Sigmoid())
                                
        
    def forward(self, input):
        x = self.block(input)
        batch_size, channel, _, _ = x.size()
        y = nn.AvgPool2d(x.size()[2])(x)
        y = y.view(y.shape[0],-1)
        y = self.se(y).view(batch_size, channel, 1, 1)
        y = x * y.expand_as(x)
        out = y + self.shortcut(input)
        return out

注意 我们使用平均池化层进行下采样,这里的kernel_size是动态的

你可能感兴趣的:(DeepLearning学习,卷积神经网络,卷积)