'''
——————————tensorflow实现去噪自编码器对MINIST数据复原——————————
模块版本:tensorflow 1.4.0
Python 3.5
自编码器(DBN):一种无监督学习算法,目的不是聚类,
而是通过提取数据的高阶特征,对数据进行复原
'''
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
## Xavier 函数是让网络的权重初始化的值刚好,不大不小,使得在后续的训练不会衰减或者发散。
##fin_in, fin_out 是输入输出节点的数量
def xavier_init(fan_in,fan_out,constant=1):
low = -constant * np.sqrt(6.0/(fan_in + fan_out))
high = constant * np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in,fan_out),
minval = low,maxval=high,dtype=tf.float32)
#参数说明:
##n_input:输入节点数
##n_hidden:隐含层节点数
##transfer_function:隐含层激活函数,默认是softplus
##optimizer:优化算法,默认是Adam
##scale:高斯噪声系数,默认是0.1
##_initialize_weights是参数初始化函数
class AdditiveGaussianNoiseAutoencoder(object):
def __init__(self,n_input,n_hidden,transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(),scale=0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.traning_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32,[None,self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x
+ scale * tf.random_normal((n_input,)),
self.weights['w1']),self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden,self.weights['w2']),self.weights['b2'])
##自编码器的损失
self.cost = 0.5*tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction,self.x),2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
#参数初始化函数
##w1,b1,w2,b2
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input,self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden],dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input],dtype=tf.float32))
return all_weights
#计算损失cost,函数partial_fit就是做batch数据进行训练并返回当前损失
def partial_fit(self,X):
cost,opt = self.sess.run((self.cost,self.optimizer),
feed_dict={self.x:X, self.scale:self.traning_scale})
return cost
#计算损失cost,用于测试集计算总的损失
def calc_total_cost(self,X):
return self.sess.run((self.cost,self.optimizer),feed_dict={self.x:X, self.scale:self.traning_scale})
#返回自编码器隐含层的输出结果,提供接口返回隐含层高阶特征
def transform(self,X):
return self.sess.run(self.hidden,feed_dict={self.x:X,self.scale:self.training_scale})
#将高阶特征复原为原始数据
def generate(self,hidden=None):
if hidden is None:
hidden = np.random.normal(size=self.weights['b1'])
return self.sess.run(self.reconstruction,feed_dict={self.hidden: hidden})
#输入数据是原始数据,输出数据是复原后的数据
def reconstruct(self,X):
return self.sess.run(self.reconstruction,feed_dict={self.x:X,self.scale:self.training_scale})
#工具函数,获取隐层的参数w1,b1
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
#--------------------------------------------------------------------------------------------
##读入数据,开始训练
minist = input_data.read_data_sets('MNIST_data',one_hot=True)
##数据标准化
def stander_scale(X_train,X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train,X_test
##定义一个获取随机block数据的函数,因为是按照mini-batch的训练方式
##取一个从0到len(data)-batch_size之间的随机整数,以这个随机数作为起始位置,然后顺序取到一个batch size的数据
##注意,这属于不放回抽取
def get_random_block_data(data,batch_size):
start_index = np.random.randint(0,len(data)-batch_size)
return data[start_index:(start_index+batch_size)]
#部分参数设置,训练数据准备
X_train,X_test = stander_scale(minist.train.images,minist.test.images)
n_samples = int(minist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
#图像显示,画出一张图片,输入数据为 array[784,] 类型
def minist_read_plot1(img):
import matplotlib.pyplot as plt
im = img
im = im.reshape(28,28)
##显示图片
fig = plt.figure()
plotwindow = fig.add_subplot(111)
plt.imshow(im , cmap='gray')
plt.show()
#创建自编码器实例
autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=784,
n_hidden=200,
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
scale = 0.01)
#开始训练
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_data(X_train,batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print("Epoch:","%04d" %(epoch+1),"cost=", "{:.9f}".format(avg_cost))
##测试集的损失(平方误差)
print ("Total cost: "+ str(autoencoder.calc_total_cost(X_test)))