【毕设】基于CycleGAN的风格迁移【四】训练模型的Trick

Trick1:Label平滑

如果有两个目标label:Real=1 和 Fake=0,那么对于每个新样本,如果是real,那么把label替换为0.7~1.2之间的随机值;如果样本是fake,那么把label替换为0.0~0.3之间的随机值。

在models/networks.py中的GANLoss类中的__init__函数中进行修改:

原代码:

    def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
        """ Initialize the GANLoss class.

        Parameters:
            gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
            target_real_label (bool) - - label for a real image
            target_fake_label (bool) - - label of a fake image

        Note: Do not use sigmoid as the last layer of Discriminator.
        LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
        """
        super(GANLoss, self).__init__()
        self.register_buffer('real_label', torch.tensor(target_real_label))
        self.register_buffer('fake_label', torch.tensor(target_fake_label))
        self.gan_mode = gan_mode
        if gan_mode == 'lsgan':
            self.loss = nn.MSELoss()
        elif gan_mode == 'vanilla':
            self.loss = nn.BCEWithLogitsLoss()
        elif gan_mode in ['wgangp']:
            self.loss = None
        else:
            raise NotImplementedError('gan mode %s not implemented' % gan_mode)

修改后的代码:

    def __init__(self, gan_mode):
        """ Initialize the GANLoss class.

        Parameters:
            gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
            target_real_label (bool) - - label for a real image
            target_fake_label (bool) - - label of a fake image

        Note: Do not use sigmoid as the last layer of Discriminator.
        LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
        """
        super(GANLoss, self).__init__()
        target_real_label = random.randint(7, 12) * 0.1
        target_fake_label = random.randint(0, 3) * 0.1
        self.register_buffer('real_label', torch.tensor(target_real_label))
        self.register_buffer('fake_label', torch.tensor(target_fake_label))
        self.gan_mode = gan_mode
        if gan_mode == 'lsgan':
            self.loss = nn.MSELoss()
        elif gan_mode == 'vanilla':
            self.loss = nn.BCEWithLogitsLoss()
        elif gan_mode in ['wgangp']:
            self.loss = None
        else:
            raise NotImplementedError('gan mode %s not implemented' % gan_mode)

Trick2:将图像输入鉴别器之前,将噪声添加到实际图像和生成的图像中

在models/cycle_gan_model.py中的CycleGANModle类中的backward_D_A函数和backward_D_B函数修改

原代码:

    def backward_D_A(self):
        """Calculate GAN loss for discriminator D_A"""
        fake_B = self.fake_B_pool.query(self.fake_B)
        self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)

    def backward_D_B(self):
        """Calculate GAN loss for discriminator D_B"""
        fake_A = self.fake_A_pool.query(self.fake_A)
        self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)

修改后代码:

    def backward_D_A(self):
        """Calculate GAN loss for discriminator D_A"""
        real_B=self.real_B #(B C H W)
        fake_B = self.fake_B_pool.query(self.fake_B) #(B C H W)
        ###给fake_B添加噪点
        BatchSize_fake,C_fake,H_fake,W_fake=fake_B.size()
        img_fake=fake_B.view(H_fake,W_fake,C_fake) #(H W C)
        img_fake_np=img_fake.numpy() #将(H W C)的Tensor转为(H W C)的numpy
        h_fake,w_fake,c_fake=img_fake_np.shape
        Nd = 0.1
        Sd = 1 - Nd
        mask_fake = np.random.choice((0, 1, 2), size=(h_fake, w_fake, 1), p=[Nd / 2.0, Nd / 2.0, Sd])  # 生成一个通道的mask
        mask_fake = np.repeat(mask_fake, c_fake, axis=2)  # 在通道的维度复制,生成彩色的mask
        img_fake_np[mask_fake==0]=0
        img_fake_np[mask_fake==1]=255
        img_fake_Tensor=torch.from_numpy(img_fake_np)   #(H W C)numpy转为(H W C)的Tensor
        H1_fake,W1_fake,C1_fake=img_fake_Tensor.size()
        fake_B=img_fake_Tensor.view(BatchSize_fake,C1_fake,H1_fake,W1_fake) #将(H W C)的Tensor转为(B C H W)的Tensor
        ###给real_B添加噪点
        BatchSize_real, C_real, H_real, W_real = real_B.size()
        img_real = real_B.view(H_real, W_real, C_real)
        img_real_np = img_real.numpy()
        h_real, w_real, c_real = img_real_np.shape
        mask_real = np.random.choice((0, 1, 2), size=(h_real, w_real, 1), p=[Nd / 2.0, Nd / 2.0, Sd])  # 生成一个通道的mask
        mask_real = np.repeat(mask_real, c_real, axis=2)  # 在通道的维度复制,生成彩色的mask
        img_real_np[mask_real == 0] = 0
        img_real_np[mask_real == 1] = 255
        img_real_Tensor=torch.from_numpy(img_real_np)
        H1_real,W1_real,C1_real=img_real_Tensor.size()
        real_B=img_real_Tensor.view(BatchSize_real,C1_real,H1_real,W1_real)
        self.loss_D_A = self.backward_D_basic(self.netD_A, real_B, fake_B)

    def backward_D_B(self):
        """Calculate GAN loss for discriminator D_B"""
        real_A=self.real_A
        fake_A = self.fake_A_pool.query(self.fake_A)
        ###给fake_A添加噪点
        BatchSize_fake, C_fake, H_fake, W_fake = fake_A.size()
        img_fake = fake_A.view(H_fake, W_fake, C_fake)
        img_fake_np = img_fake.numpy()
        h_fake, w_fake, c_fake = img_fake_np.shape
        Nd = 0.1
        Sd = 1 - Nd
        mask_fake = np.random.choice((0, 1, 2), size=(h_fake, w_fake, 1), p=[Nd / 2.0, Nd / 2.0, Sd])  # 生成一个通道的mask
        mask_fake = np.repeat(mask_fake, c_fake, axis=2)  # 在通道的维度复制,生成彩色的mask
        img_fake_np[mask_fake == 0] = 0
        img_fake_np[mask_fake == 1] = 255
        img_fake_Tensor = torch.from_numpy(img_fake_np)
        H1_fake, W1_fake, C1_fake = img_fake_Tensor.size()
        fake_B = img_fake_Tensor.view(BatchSize_fake, C1_fake, H1_fake, W1_fake)
        ###给real_A添加噪点
        BatchSize_real, C_real, H_real, W_real = real_A.size()
        img_real = real_A.view(H_real, W_real, C_real)
        img_real_np = img_real.numpy()
        h_real, w_real, c_real = img_real_np.shape
        mask_real = np.random.choice((0, 1, 2), size=(h_real, w_real, 1), p=[Nd / 2.0, Nd / 2.0, Sd])  # 生成一个通道的mask
        mask_real = np.repeat(mask_real, c_real, axis=2)  # 在通道的维度复制,生成彩色的mask
        img_real_np[mask_real == 0] = 0
        img_real_np[mask_real == 1] = 255
        img_real_Tensor = torch.from_numpy(img_real_np)
        H1_real, W1_real, C1_real = img_real_Tensor.size()
        real_B = img_real_Tensor.view(BatchSize_real, C1_real, H1_real, W1_real)
        self.loss_D_B = self.backward_D_basic(self.netD_B, real_A, fake_A)

加噪点思路:参考了该文章的椒盐噪点

并且由于fake_A等变量是(B,C,H,W)的Tensor,而文章中的算法是基于numpy,因此只需要将fake_A等变量先从Tensor->numpy->算法->Tensor

Trick3: 判别器的优化频率高于生成器。

原代码中判别器的训练次数和生成器的训练次数的比例是1:1,即每一个epoch的时候只训练一次Generator和Discriminator。现在我尝试一个epoch的时候,训练一次Generator,然后用其生成的fake图片训练3次Discriminator。

原理:1)Trick2的噪点会使Discriminator难以训练。

           2)多训练Discriminator,能够刺激Generator模型的训练从而生成好的效果图。

在models/cycle_gan_model.py中的CycleGANModel类中的optimize_parameters函数中进行修改

原代码:

  def optimize_parameters(self):
        """Calculate losses, gradients, and update network weights; called in every training iteration"""
        # forward
        self.forward()      # compute fake images and reconstruction images.
        # G_A and G_B
        self.set_requires_grad([self.netD_A, self.netD_B], False)  # Ds require no gradients when optimizing Gs
        self.optimizer_G.zero_grad()  # set G_A and G_B's gradients to zero
        self.backward_G()             # calculate gradients for G_A and G_B
        self.optimizer_G.step()       # update G_A and G_B's weights
        # D_A and D_B
        self.set_requires_grad([self.netD_A, self.netD_B], True)
        self.optimizer_D.zero_grad()   # set D_A and D_B's gradients to zero
        self.backward_D_A()      # calculate gradients for D_A
        self.backward_D_B()      # calculate graidents for D_B
        self.optimizer_D.step()  # update D_A and D_B's weights

修改后的代码:

 def optimize_parameters(self):
        """Calculate losses, gradients, and update network weights; called in every training iteration"""
        # forward
        self.forward()      # compute fake images and reconstruction images.
        # G_A and G_B
        self.set_requires_grad([self.netD_A, self.netD_B], False)  # Ds require no gradients when optimizing Gs
        self.optimizer_G.zero_grad()  # set G_A and G_B's gradients to zero
        self.backward_G()             # calculate gradients for G_A and G_B
        self.optimizer_G.step()       # update G_A and G_B's weights
        # D_A and D_B
        self.set_requires_grad([self.netD_A, self.netD_B], True)
        for i in range(3):
            self.optimizer_D.zero_grad()   # set D_A and D_B's gradients to zero
            self.backward_D_A()      # calculate gradients for D_A
            self.backward_D_B()      # calculate graidents for D_B
            self.optimizer_D.step()  # update D_A and D_B's weights

你可能感兴趣的:(CycleGAN风格迁移,深度学习,pytorch,机器学习)