最近在学习生成对抗网络。这里记录一个简单生成对抗网络,来拟合一个高斯数据。其中高斯数据是用代码生成的。
生成对抗网络可以简单分为两个部分,一部分是生成网络,一部分是对抗网络。生成网络用D表示(generator),判别网络用D表示(discriminator)。但是在训练的时候有一个问题是,如果刚开始判别网络的效果并不是很好的话,那么生成网络不论给什么数据,开始都没有办法很好的鉴别赝品。因此,在实际训练之前我们还有一个预训练网络D_pre,用来对判别网络进行预训练,这样判别网络在实际初始化的时候就不是随机初始化了。这样,比较容易可以有对抗的效果。
实际训练的时候,判别网络会接受两个数据,分别是真实数据x和生成网络生成的数据z,而生成网络只有一个数据z。实际代码如下,需要注意的是,代码中有一段定义了parse_args,可以进行参数解析。在实际运行代码的过程中可以用参数解析的办法来设定参数。
import argparse
import numpy as np
from scipy.stats import norm
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
sns.set(color_codes=True)
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
class DataDistribution(object):
def __init__(self):
self.mu = 4 #真实数据分布的均值和标准差
self.sigma = 0.5
def sample(self, N):
samples = np.random.normal(self.mu, self.sigma, N)
samples.sort()
return samples
class GeneratorDistribution(object):
def __init__(self, range):
self.range = range # 随机初始化分布
def sample(self, N):
return np.linspace(-self.range, self.range, N) + \
np.random.random(N) * 0.01
def linear(input, output_dim, scope=None, stddev=1.0): # 用来控制w参数和b参数的初始化
norm = tf.random_normal_initializer(stddev=stddev)
const = tf.constant_initializer(0.0)
with tf.variable_scope(scope or 'linear'):
w = tf.get_variable('w', [input.get_shape()[1], output_dim], initializer=norm) # w参数进行高斯初始化
b = tf.get_variable('b', [output_dim], initializer=const) # b参数初始化为常量0
return tf.matmul(input, w) + b
# 生成网络时一个非常简单的只有两层的网络
def generator(input, h_dim):
h0 = tf.nn.softplus(linear(input, h_dim, 'g0'))
h1 = linear(h0, 1, 'g1')
return h1
def discriminator(input, h_dim):
h0 = tf.tanh(linear(input, h_dim * 2, 'd0')) # input是输入的数据,用来判断是真实的数据还是生成的数据
h1 = tf.tanh(linear(h0, h_dim * 2, 'd1'))
h2 = tf.tanh(linear(h1, h_dim * 2, scope='d2'))
h3 = tf.sigmoid(linear(h2, 1, scope='d3'))
return h3
# 使用学习率不断衰减的学习策略
def optimizer(loss, var_list, initial_learning_rate):
decay = 0.95 # 衰减率是0.95
num_decay_steps = 150 # 每迭代150次进行一个学习率的衰减
batch = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
initial_learning_rate,
batch,
num_decay_steps,
decay,
staircase=True
)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize( #最小化损失函数的优化器
loss,
global_step=batch,
var_list=var_list
)
return optimizer
class GAN(object):
def __init__(self, data, gen, num_steps, batch_size, log_every):
self.data = data
self.gen = gen
self.num_steps = num_steps
self.batch_size = batch_size
self.log_every = log_every
self.mlp_hidden_size = 4 #隐藏层神经元个数
self.learning_rate = 0.03
self._create_model()
def _create_model(self):
with tf.variable_scope('D_pre'):
self.pre_input = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
self.pre_labels = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
D_pre = discriminator(self.pre_input, self.mlp_hidden_size)
self.pre_loss = tf.reduce_mean(tf.square(D_pre - self.pre_labels)) #用减号的形式表示真实值和预测值之间的一个差异
self.pre_opt = optimizer(self.pre_loss, None, self.learning_rate)
# This defines the generator network - it takes samples from a noise
# distribution as input, and passes them through an MLP.
with tf.variable_scope('Gen'):
self.z = tf.placeholder(tf.float32, shape=(self.batch_size, 1)) #初始化噪音的输入,先用placeholder的格式进行位置占坑
self.G = generator(self.z, self.mlp_hidden_size) #self.G是最终输出结果,由generator实现的
# The discriminator tries to tell the difference between samples from the
# true data distribution (self.x) and the generated samples (self.z).
#
# Here we create two copies of the discriminator network (that share parameters),
# as you cannot use the same network with different inputs in TensorFlow.
with tf.variable_scope('Disc') as scope:
self.x = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
self.D1 = discriminator(self.x, self.mlp_hidden_size) #self.x表示的是真实的数据
scope.reuse_variables() # reuse表示这两个网络的变量是一样的,可以进行重用的操作
self.D2 = discriminator(self.G, self.mlp_hidden_size) #self.G表示的是生成的数据
# Define the loss for discriminator and generator networks (see the original
# paper for details), and create optimizers for both
# D1对应的真实的输入,D2对应的生成的输入
self.loss_d = tf.reduce_mean(-tf.log(self.D1) - tf.log(1 - self.D2)) # 对应的是判别网络的损失函数
self.loss_g = tf.reduce_mean(-tf.log(self.D2)) # 对应的是生成网络的损失函数
# 获取初始化的参数
self.d_pre_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='D_pre')
self.d_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Disc')
self.g_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Gen')
# 将初始化的参数填入,不断使用梯度下降方法优化模型
self.opt_d = optimizer(self.loss_d, self.d_params, self.learning_rate)
self.opt_g = optimizer(self.loss_g, self.g_params, self.learning_rate)
def train(self):
with tf.Session() as session:
tf.global_variables_initializer().run()
# pretraining discriminator
num_pretrain_steps = 1000
for step in range(num_pretrain_steps):
d = (np.random.random(self.batch_size) - 0.5) * 10.0
labels = norm.pdf(d, loc=self.data.mu, scale=self.data.sigma)
pretrain_loss, _ = session.run([self.pre_loss, self.pre_opt], {
self.pre_input: np.reshape(d, (self.batch_size, 1)),
self.pre_labels: np.reshape(labels, (self.batch_size, 1))
})
self.weightsD = session.run(self.d_pre_params)
# copy weights from pre-training over to new D network
for i, v in enumerate(self.d_params):
session.run(v.assign(self.weightsD[i]))
for step in range(self.num_steps):
# update discriminator
x = self.data.sample(self.batch_size) # x是真实的数据
z = self.gen.sample(self.batch_size) # z是随机噪音生成的数据
loss_d, _ = session.run([self.loss_d, self.opt_d], { # 优化判别网络,判别网络有两个输入,分别是真实的输入和噪音的输入
self.x: np.reshape(x, (self.batch_size, 1)),
self.z: np.reshape(z, (self.batch_size, 1))
})
# update generator
z = self.gen.sample(self.batch_size)
loss_g, _ = session.run([self.loss_g, self.opt_g], {
self.z: np.reshape(z, (self.batch_size, 1))
})
if step % self.log_every == 0:
print('{}: {}\t{}'.format(step, loss_d, loss_g))
if step % 100 == 0 or step==0 or step == self.num_steps -1 :
self._plot_distributions(session)
def _samples(self, session, num_points=10000, num_bins=100):
xs = np.linspace(-self.gen.range, self.gen.range, num_points)
bins = np.linspace(-self.gen.range, self.gen.range, num_bins)
# data distribution
d = self.data.sample(num_points)
pd, _ = np.histogram(d, bins=bins, density=True)
# generated samples
zs = np.linspace(-self.gen.range, self.gen.range, num_points)
g = np.zeros((num_points, 1))
for i in range(num_points // self.batch_size):
g[self.batch_size * i:self.batch_size * (i + 1)] = session.run(self.G, {
self.z: np.reshape(
zs[self.batch_size * i:self.batch_size * (i + 1)],
(self.batch_size, 1)
)
})
pg, _ = np.histogram(g, bins=bins, density=True)
return pd, pg
def _plot_distributions(self, session):
pd, pg = self._samples(session)
p_x = np.linspace(-self.gen.range, self.gen.range, len(pd))
f, ax = plt.subplots(1)
ax.set_ylim(0, 1)
plt.plot(p_x, pd, label='real data')
plt.plot(p_x, pg, label='generated data')
plt.title('1D Generative Adversarial Network')
plt.xlabel('Data values')
plt.ylabel('Probability density')
plt.legend()
plt.show()
def main(args):
model = GAN(
DataDistribution(), # 真实数据分布
GeneratorDistribution(range=8), # 制造的噪音点
args.num_steps,
args.batch_size,
args.log_every, # 间隔多少次打印一下当前的loss
)
model.train()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num-steps', type=int, default=1200,
help='the number of training steps to take')
parser.add_argument('--batch-size', type=int, default=12,
help='the batch size')
parser.add_argument('--log-every', type=int, default=10,
help='print loss after this many steps')
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())