DCGAN就是将CNN和原始的GAN结合到了一起,生成模型和判别模型都运用了深度卷积神经网络的生成对抗网络
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
# 处理数据
(images,labels),(_,_)=keras.datasets.mnist.load_data()
images=np.expand_dims(images,-1)
images=images/127.5 -1
images=images.astype('float32')
BATCH_SIZE=128
BUFFER_SIZE=60000
dataset=tf.data.Dataset.from_tensor_slices(images)
dataset=dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
# 定义生成器
def generate_model():
model=keras.Sequential()
model.add(layers.Dense(7*7*256,input_shape=(100,),use_biase=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7,7,256)))
# 第一层转置卷积的strides=(1,1),也就是说没有改变图形大小,经过该层的图像大小为7*7*128
model.add(layers.Conv2DTranspose(128,(3,3),strides=(1,1),padding='same',use_biase=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# 经过该层后图像大小为14*14*64
model.add(layers.Conv2DTranspose(64,(3,3),strides=(2,2),padding='same',use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
# 经过该层后图像大小为28*28*1
model.add(layers.Conv2DTranspose(1,(3,3),strides=(2,2),padding='same',use_bias=False))
return model
#----------------------------------------------------------------------
# 定义判别器
def discriminate_model():
model=keras.Sequential()
model.add(layers.Conv2D(64,(3,3),input_shape=(28,28,1),strides=(2,2),padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128,(3,3),strides=(2,2),padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(256,(3,3),strides=(2,2),padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Flatten())
return model
#--------------------------------------------------------------------
# 生成对象
generator=generate_model()
discriminator=discriminate_model()
# 定义损失函数
cross_entropy=keras.losses.BinaryCrossentropy(from_logits=True)
def gen_loss(fake_out):
return cross_entropy(tf.ones_like(fake_out),fake_out)
def disc_loss(real_out,fake_out):
real_loss=cross_entropy(tf.ones_like(real_out),real_out)
fake_loss=cross_entropy(tf.zeros_like(fake_out),fake_out)
total_loss=real_loss+fake_loss
return total_loss
#---------------------------------------------------------------------
# 定义优化器
gen_optimizer=keras.optimizers.Adam(2e-4)
disc_optimizer=keras.optimizers.Adam(2e-4)
#---------------------------------------------------------------------
# 自定义训练函数
EPOCHS=100
noise_dim=100
num_exp_to_generate=16
seed=tf.random.normal([num_exp_to_generate,noise_dim])
def train_step(images):
noise=tf.random.normal([BATCH_SIZE,noise_dim])
with tf.GradientTape() as gen_tape,tf.GradientTape() as disc_tape:
real_out=discriminator(images,training=True)
gen_image=generator(noise,training=True)
fake_out=discriminator(gen_image,training=True)
generate_loss=gen_loss(fake_out)
discriminate_loss=disc_loss(real_out,fake_out)
gradient_gen=gen_tape.gradient(generate_loss,generator.trainable_variables)
gradient_disc=disc_tape.gradient(discriminate_loss,discriminator.trainalbe_varialbes)
gen_opt.apply_gradient(zip(gradient_gen,generator.trainable_varialbes))
disc_opt.apple_gradient(zip(gradient_disc,discriminator.trainable_variables))
#-------------------------------------------------------------------
# 自定义绘图函数
def generate_plot_image(gen_model,test_noise):
pre_image=gen_model(test_noise,training=False)
fig=plt.figure(figsize=(4,4))
for i in range(pre_image.shape[0]):
plt.subplot(4,4,i+1)
plt.imshow((pre_image[i,:,:]+1)/2,cmap='gray')
plt.axis('off')
plt.show()
#------------------------------------------------------------------
# 启动函数
def train(dataset,epochs):
for epoch in range(epochs):
for image_batch in dataset:
train_step(image_batch)
generate_plot_image(generator,seed)
train(dataset,EPOCHS)