ACGAN模型——pytorch实现

class Discriminator(nn.Module):  # 定义判别器
    def __init__(self, img_size=(64, 64), num_classes=2):  # 初始化方法
        super(Discriminator, self).__init__()  # 继承初始化方法

        self.img_size = img_size  # 图片尺寸,默认为(64.64)三通道图片
        self.num_classes = num_classes  # 类别数

        self.conv1 = nn.Conv2d(3, 128, 4, 2, 1)  # conv操作
        self.conv2 = nn.Conv2d(128, 256, 4, 2, 1)  # conv操作
        self.bn2 = nn.BatchNorm2d(256)  # bn操作
        self.conv3 = nn.Conv2d(256, 512, 4, 2, 1)  # conv操作
        self.bn3 = nn.BatchNorm2d(512)  # bn操作
        self.conv4 = nn.Conv2d(512, 1024, 4, 2, 1)  # conv操作
        self.bn4 = nn.BatchNorm2d(1024)  # bn操作
        self.leakyrelu = nn.LeakyReLU(0.2)  # leakyrelu激活函数
        self.linear1 = nn.Linear(int(1024 * (self.img_size[0] / 2 ** 4) * (self.img_size[1] / 2 ** 4)), 1)  # linear映射
        self.linear2 = nn.Linear(int(1024 * (self.img_size[0] / 2 ** 4) * (self.img_size[1] / 2 ** 4)),
                                 self.num_classes)  # linear映射
        self.sigmoid = nn.Sigmoid()  # sigmoid激活函数
        self.softmax = nn.Softmax(dim=1)  # softmax激活函数

        self._init_weitghts()  # 模型权重初始化

    def _init_weitghts(self):  # 定义模型权重初始化方法
        for m in self.modules():  # 遍历模型结构
            if isinstance(m, nn.Conv2d):  # 如果当前结构是conv
                nn.init.normal_(m.weight, 0, 0.02)  # w采用正态分布初始化
                nn.init.constant_(m.bias, 0)  # b设为0
            elif isinstance(m, nn.BatchNorm2d):  # 如果当前结构是bn
                nn.init.constant_(m.weight, 1)  # w设为1
                nn.init.constant_(m.bias, 0)  # b设为0
            elif isinstance(m, nn.Linear):  # 如果当前结构是linear
                nn.init.normal_(m.weight, 0, 0.02)  # w采用正态分布初始化
                nn.init.constant_(m.bias, 0)  # b设为0

    def forward(self, x):  # 前传函数
        x = self.conv1(x)  # conv,(n,3,64,64)-->(n,128,32,32)
        x = self.leakyrelu(x)  # leakyrelu激活函数
        x = self.conv2(x)  # conv,(n,128,32,32)-->(n,256,16,16)
        x = self.bn2(x)  # bn操作
        x = self.leakyrelu(x)  # leakyrelu激活函数
        x = self.conv3(x)  # conv,(n,256,16,16)-->(n,512,8,8)
        x = self.bn3(x)  # bn操作
        x = self.leakyrelu(x)  # leakyrelu激活函数
        x = self.conv4(x)  # conv,(n,512,8,8)-->(n,1024,4,4)
        x = self.bn4(x)  # bn操作
        x = self.leakyrelu(x)  # leakyrelu激活函数
        x = torch.flatten(x, 1)  # 三维特征压缩至一位特征向量,(n,1024,4,4)-->(n,1024*4*4)
        # 根据特征向量x,计算图片真假的得分
        validity = self.linear1(x)  # linear映射,(n,1024*4*4)-->(n,1)
        validity = self.sigmoid(validity)  # sigmoid激活函数,将输出压缩至(0,1)
        # 根据特征向量x,计算图片分类的标签
        label = self.linear2(x)  # linear映射,(n,1024*4*4)-->(n,2)
        label = self.softmax(label)  # softmax激活函数,将输出压缩至(0,1)

        return (validity, label)  # 返回(图像真假的得分,图片分类的标签)


class Generator(nn.Module):  # 定义生成器
    def __init__(self, img_size=(64, 64), num_classes=2, latent_dim=100):  # 初始化方法
        super(Generator, self).__init__()  # 继承初始化方法
        self.img_size = img_size  # 图片尺寸,默认为(64.64)三通道图片
        self.num_classes = num_classes  # 类别数
        self.latent_dim = latent_dim  # 输入噪声长度,默认为100

        self.linear = nn.Linear(self.latent_dim, 4 * 4 * 1024)  # linear映射
        self.bn0 = nn.BatchNorm2d(1024)  # bn操作
        self.deconv1 = nn.ConvTranspose2d(1024, 512, 4, 2, 1)  # transconv操作
        self.bn1 = nn.BatchNorm2d(512)  # bn操作
        self.deconv2 = nn.ConvTranspose2d(512, 256, 4, 2, 1)  # transconv操作
        self.bn2 = nn.BatchNorm2d(256)  # bn操作
        self.deconv3 = nn.ConvTranspose2d(256, 128, 4, 2, 1)  # transconv操作
        self.bn3 = nn.BatchNorm2d(128)  # bn操作
        self.deconv4 = nn.ConvTranspose2d(128, 3, 4, 2, 1)  # transconv操作
        self.relu = nn.ReLU(inplace=True)  # relu激活函数
        self.tanh = nn.Tanh()  # tanh激活函数
        self.embedding = nn.Embedding(self.num_classes, self.latent_dim)  # embedding操作

        self._init_weitghts()  # 模型权重初始化

    def _init_weitghts(self):  # 定义模型权重初始化方法
        for m in self.modules():  # 遍历模型结构
            if isinstance(m, nn.ConvTranspose2d):  # 如果当前结构是transconv
                nn.init.normal_(m.weight, 0, 0.02)  # w采用正态分布初始化
                nn.init.constant_(m.bias, 0)  # b设为0
            elif isinstance(m, nn.BatchNorm2d):  # 如果当前结构是bn
                nn.init.constant_(m.weight, 1)  # w设为1
                nn.init.constant_(m.bias, 0)  # b设为0
            elif isinstance(m, nn.Linear):  # 如果当前结构是linear
                nn.init.normal_(m.weight, 0, 0.02)  # w采用正态分布初始化
                nn.init.constant_(m.bias, 0)  # b设为0

    def forward(self, input: tuple):  # 前传函数
        noise, label = input  # 从输入的元组中获取噪声向量和标签信息
        label = self.embedding(label)  # 标签信息经过embedding操作,变成与噪声向量尺寸相同的稠密向量
        z = torch.multiply(noise, label)  # 噪声向量与标签稠密向量相乘,得到带有标签信息的噪声向量
        z = self.linear(z)  # linear映射,(n,100)-->(n,1024*4*4)
        z = z.view((-1, 1024, int(self.img_size[0] / 2 ** 4),
                    int(self.img_size[1] / 2 ** 4)))  # 一维特征向量扩展至三维特征,(n,1024*4*4)-->(n,1024,4,4)
        z = self.bn0(z)  # bn操作
        z = self.relu(z)  # relu激活函数
        z = self.deconv1(z)  # trainsconv操作,(n,1024,4,4)-->(n,512,8,8)
        z = self.bn1(z)  # bn操作
        z = self.relu(z)  # relu激活函数
        z = self.deconv2(z)  # trainsconv操作,(n,512,8,8)-->(n,256,16,16)
        z = self.bn2(z)  # bn操作
        z = self.relu(z)  # relu激活函数
        z = self.deconv3(z)  # trainsconv操作,(n,256,16,16)-->(n,128,32,32)
        z = self.bn3(z)  # bn操作
        z = self.relu(z)  # relu激活函数
        z = self.deconv4(z)  # trainsconv操作,(n,128,32,32)-->(n,3,64,64)
        z = self.tanh(z)  # tanh激活函数,将输出压缩至(-1,1)

        return z  # 返回生成图像

你可能感兴趣的:(pytorch,人工智能,深度学习)