import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, BatchNormalization, Activation
from tensorflow.keras.layers import Conv2DTranspose, Conv2D, Reshape, LeakyReLU
import matplotlib.pyplot as plt
from PIL import Image
import os
import math
import numpy as np
image_size =64
im = Image.open('C:/Users/faces/1.png')
im = im.resize((image_size,image_size),Image.ANTIALIAS)
plt.imshow(im)
plt.show()
X_train = Image.open('C:/Users/faces/1.png')
X_train = X_train.resize((image_size,image_size),Image.ANTIALIAS)
X_train = np.asanyarray(X_train)
X_train = np.expand_dims(X_train, axis=0)
print(X_train.shape)
model_path = 'C:/Users/model/'
generated_image_path = 'C:/Users/generate/'
result_path = 'C:/Users/generate/results/'
dataset_path = 'C:/Users/faces/'
character = 300
for dirname, _, filenames in os.walk(dataset_path):
for filename in filenames:
if X_train.shape[0] > 3000:
break
try:
im = Image.open(os.path.join(dirname, filename))
im = im.resize((image_size,image_size),Image.ANTIALIAS)
image_array = np.asanyarray(im)
image_array = np.expand_dims(image_array, axis=0)
X_train = np.concatenate((X_train, image_array), axis=0)
except:
pass
print(str(X_train.shape[0]))
def generator_model():
model = Sequential()
model.add(Dense(int(image_size/8)*int(image_size/8)*256, input_shape=(character,)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Reshape((int(image_size/8),int(image_size/8),256)))
model.add(Conv2DTranspose(128,5,strides=2,padding='SAME'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2DTranspose(64,5,strides=2,padding='SAME'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2DTranspose(3,5, strides=2,padding='SAME'))
model.add(Activation('tanh'))
return model
g = generator_model()
g.summary()
def discriminator_model():
model = Sequential()
model.add(Conv2D(64, padding='SAME',kernel_size=5,strides=2, input_shape=(image_size, image_size, 3)))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Conv2D(128,padding='SAME',kernel_size=5,strides=2))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Conv2D(256,padding='SAME',kernel_size=5,strides=2))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
d = discriminator_model()
d.summary()
def combine(g,d):
model = Sequential()
model.add(g)
d.trainable = False
model.add(d)
return model
g = generator_model()
d = discriminator_model()
g_d = combine(g,d)
g_d.summary()
def combine_images(images):
num = images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1], 3),
dtype = images.dtype)
for index,img in enumerate(images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i+1) * shape[0], j * shape[1]:(j+1) * shape[1], 0:3] = img[:,:,:]
return image
if os.path.exists(result_path)==False:
os.makedirs(result_path)
def generated(name):
g = generator_model()
try:
g.load_weights(model_path+"generatorA")
print("生成器权重导入成功")
except:
print("无权重")
noise_need = np.random.normal(-1,1,size=(1,character))
generated_image_need = g.predict(noise_need, verbose=0)
image = combine_images(generated_image_need)
image = image * 127.5 + 127.5
Image.fromarray(image.astype(np.uint8)).save(
result_path+name+".png")
if os.path.exists(model_path)==False:
os.makedirs(model_path)
if os.path.exists(generated_image_path)==False:
os.makedirs(generated_image_path)
def train(BATCH_SIZE, Xtrain):
generated_image_size = 36
Xtrain = ((Xtrain.astype(np.float32)) - 127.5) / 127.5
d = discriminator_model()
g = generator_model()
g_d = combine(g,d)
g.compile(loss='binary_crossentropy', optimizer=tensorflow.keras.optimizers.SGD())
g_d.compile(loss='binary_crossentropy',optimizer=tensorflow.keras.optimizers.RMSprop())
d.trainable = True
d.compile(loss='binary_crossentropy',optimizer=RMSprop())
try:
d.load_weights(model_path+"discriminatorA")
print("判别器权重导入成功")
g.load_weights(model_path+"generatorA")
print("生成器权重导入成功")
except:
print("无权重")
for epoch in range(500):
if epoch % 1 == 0:
print('Epoch is ',epoch)
if epoch % 1 == 0:
noise_need = np.random.normal(-1,1,size=(generated_image_size,character))
generated_image_need = g.predict(noise_need, verbose=0)
image = combine_images(generated_image_need)
image = image * 127.5 + 127.5
Image.fromarray(image.astype(np.uint8)).save(
generated_image_path+str(epoch)+"_"+str(epoch)+".png")
for index in range(Xtrain.shape[0]//BATCH_SIZE):
noise = np.random.normal(-1,1,size=(BATCH_SIZE,character))
train_batch = Xtrain[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
generated_image = g.predict(noise, verbose=0)
if index % 1 == 0:
X = np.concatenate((train_batch,generated_image))
Y = list((np.random.rand(BATCH_SIZE)*10+90)/100) + [0]*BATCH_SIZE
d_loss = d.train_on_batch(X,Y)
noise = np.random.normal(-1,1,size=(BATCH_SIZE,character))
d.trainable = False
g_loss = g_d.train_on_batch(noise, list((np.random.rand(BATCH_SIZE)*10+90)/100))
d.trainable = True
if index % 10 == 0:
print('batch: %d, g_loss: %f, d_loss: %f' % (index, g_loss, d_loss))
if index % 10 == 0:
g.save_weights(model_path+'generatorA', True)
print('Successfully save generatorA')
d.save_weights(model_path+'discriminatorA', True)
print('Successfully save discriminatorA')
train(BATCH_SIZE=128, Xtrain=X_train)