cGAN代码实现详解(一)

步骤一:初始化以及定义参数

import argparse
import os
import numpy as np
import math

import torchvision.transforms as transforms
from torchvision.utils import save_image

from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable

import torch.nn as nn
import torch.nn.functional as F
import torch

os.makedirs("images", exist_ok=True)

parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=64, help="dimensionality of the latent space")
parser.add_argument("--n_classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=28, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
opt = parser.parse_args()
print(opt)

二、建立生成器模型

// An highlighted block
 #生成器
class Generator(nn.Module):  #z=torch.Size([100, 100]);labels.shape=torch.Size([100]   100-9)
    def __init__(self):
        super(Generator, self).__init__()

        self.label_emb = nn.Embedding(opt.n_classes, opt.n_classes)

        self.model = nn.Sequential( #74--128--256--512--1024--imgshape  [输入的是(16,100)--->(1,28,28)]
            nn.Linear(opt.latent_dim+opt.n_classes, 128),
            nn.Linear(128, 256),
            torch.nn.ReLU(inplace=True),
            nn.Linear(256, 512),
            torch.nn.ReLU(inplace=True),
            nn.Linear(512, 1024),
            torch.nn.ReLU(inplace=True),
            nn.Linear(1024, int(np.prod(img_shape))),
            nn.Tanh()
        )

    def forward(self,z,labels):
        # Concatenate label embedding and image to produce input
        gen_input=torch.cat((self.label_emb(labels),z),-1)
        img = self.model(gen_input)
        #forward中的z是在程序后面的定义的高斯噪声信号,形状为64 100
        img = img.view(img.size(0), *img_shape)
        #img.size(0)的话,它为64,也就是一批次训练的数目
        return img

验证生成器是否有作用

#查看生成器是否成功运行
use_gpu = True if torch.cuda.is_available() else False
FloatTensor = torch.cuda.FloatTensor if use_gpu else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_gpu else torch.LongTensor

n_row=10
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
# Get labels ranging from 0 to n_classes for n rows
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))

#实例化
generator=Generator()

if use_gpu:
    print("use gpu for training")
    generator = generator.cuda()
    z = z.to("cuda")
    labels=labels.to("cuda")

print(z.shape)
print(labels.shape)

imgs=generator(z,labels)

print(imgs.shape)
 

运行结果如下

Namespace(b1=0.5, b2=0.999, batch_size=64, channels=1, img_size=32, latent_dim=100, lr=0.0002, n_classes=10, n_cpu=8, n_epochs=200, sample_interval=400)
use gpu for training
torch.Size([100, 64])
torch.Size([100])
torch.Size([100, 1, 28, 28])

你可能感兴趣的:(深度学习,人工智能)