采用了L2正则化、滑动平均模型,固定学习率
训练结果为:训练集100%,验证集99.4%,测试集99.43%
第一部分:前向传播和网络参数
# 定义前向传播和神经网络中的参数
import tensorflow as tf
# 配置神经网络参数
INPUT_NODE=784 # 输入层节点个数
OUTPUT_NODE=10 # 输出层节点个数
IMAGE_SIZE=28
NUM_CHANNELS=1
# 第一层卷积的尺寸和深度
CONV1_DEEP=32
CONV1_SIZE=5
# 第二层卷积的尺寸和深度
CONV2_DEEP=64
CONV2_SIZE=5
# 全连接层的节点个数
FC_SIZE=512
# 前向传播
def inference(input_tensor,regularizer,avg_class,train=True,reuse=False):
# 第一层卷积层
with tf.variable_scope('layer_conv1',reuse=reuse):
conv1_weights=tf.get_variable('weights',[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
conv1_biases=tf.get_variable('biases',[CONV1_DEEP],initializer=tf.constant_initializer(0.0))
conv1=tf.nn.conv2d(input_tensor,conv1_weights,[1,1,1,1],padding='SAME')
relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
# 第一层池化层
with tf.name_scope('layer_pool1'):
pool1=tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
# 第二层卷积层
with tf.variable_scope('layer_conv2',reuse=reuse):
conv2_weights=tf.get_variable('weights',[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases=tf.get_variable('biases',[CONV2_DEEP],initializer=tf.constant_initializer(0.0))
conv2=tf.nn.conv2d(pool1,conv2_weights,[1,1,1,1],padding='SAME')
relu2=tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
# 第二层池化层
with tf.name_scope('layer_pool2'):
pool2=tf.nn.max_pool(relu2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
# 将多维向量展开作为密集连接层的输入
pool_shape=pool2.get_shape().as_list()
# pool_shape[0]为BATCH_SIZE维度
nodes=pool_shape[1]*pool_shape[2]*pool_shape[3]
FC_INPUT=tf.reshape(pool2,[pool_shape[0],nodes])
# 声明第一层神经网络的变量并完成前向传播
with tf.variable_scope('layer1',reuse=reuse):
weights=tf.get_variable('weights',[nodes,FC_SIZE],initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
biases=tf.get_variable('biases',[FC_SIZE],initializer=tf.constant_initializer(0.0))
if regularizer != None:
tf.add_to_collection('losses',regularizer(weights))
if avg_class == None:
fc1=tf.nn.relu(tf.matmul(FC_INPUT,weights)+biases)
else:
fc1=tf.nn.relu(tf.matmul(FC_INPUT,avg_class.average(weights))+avg_class.average(biases))
# dropout正则化,降低过拟合
if train:
fc1=tf.nn.dropout(fc1,0.5)
# 声明第二层神经网络的变量并完成前向传播
with tf.variable_scope('layer2',reuse=reuse):
weights=tf.get_variable('weights',[FC_SIZE,OUTPUT_NODE],initializer=tf.truncated_normal_initializer(mean=0,stddev=0.1))
biases=tf.get_variable('biases',[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))
if regularizer != None:
tf.add_to_collection('losses',regularizer(weights))
if avg_class == None:
fc2=tf.matmul(fc1,weights)+biases
else:
fc2=tf.matmul(fc1,avg_class.average(weights))+avg_class.average(biases)
return fc2
第二部分:训练,包括训练集和验证集
# 神经网络训练程序
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
# 配置神经网络参数
BATCH_SIZE=128
LEARNING_RATE_BASE=0.8
LEARNING_RATE_DECAY=0.99
REGULARAZTION_RATE=0.0001
TRAINING_STEP=30000
MOVING_AVERAGE_DECAY=0.99
# 模型保存路径和文件名
MODEL_SAVE_PATH='./model2.ckpt'
# 训练参数
train_acc,valid_acc=[],[]
train_loss,valid_loss=[],[]
epochs=[]
def train(mnist):
x=tf.placeholder(tf.float32,[None,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')
y_=tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NODE],name='y-input')
regularizer=tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
y=mnist_inference.inference(x,regularizer=regularizer,avg_class=None,reuse=False,train=True)
global_step=tf.Variable(0,trainable=False)
variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
variable_averages_op=variable_averages.apply(tf.trainable_variables())
cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
cross_entropy_mean=tf.reduce_mean(cross_entropy)
loss=cross_entropy_mean+tf.add_n(tf.get_collection('losses'))
learning_rate=0.1
# tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step)
with tf.control_dependencies([train_step,variable_averages_op]):
train_op=tf.no_op(name='train')
# 计算使用滑动平均之后的前向传播结果
average_y=mnist_inference.inference(x,regularizer=regularizer,avg_class=variable_averages,train=True,reuse=tf.AUTO_REUSE)
correct_prediction=tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))
#tf.cast为转化数据格式
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# 初始化TensorFlow持久化类
saver=tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(TRAINING_STEP):
xs,ys=mnist.train.next_batch(BATCH_SIZE)
xs=np.reshape(xs,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS])
_,tra_loss,step=sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
val_xs,val_ys=mnist.validation.next_batch(BATCH_SIZE)
val_xs=np.reshape(val_xs,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS])
val_loss=sess.run([loss],feed_dict={x:val_xs,y_:val_ys})
epochs.append(step)
train_acc.append(sess.run(accuracy,feed_dict={x:xs,y_:ys}))
train_loss.append(tra_loss)
valid_acc.append(sess.run(accuracy,feed_dict={x:val_xs,y_:val_ys}))
valid_loss.append(val_loss)
# 每1000轮
if (i+1)%1000==0:
print('<==%d==>,loss on training batch is %g.'%(i+1,tra_loss))
print(train_acc[-1])
print(valid_acc[-1])
plt.figure(1)
plt.grid(True)
plt.subplot(1,2,1)
plt.plot(epochs, train_loss, color='red',label='train')
plt.plot(epochs, valid_loss, color='blue',label='valid')
plt.legend()
plt.xlabel('Epochs',fontsize=15)
plt.ylabel('Y',fontsize=15)
plt.title('Loss',fontsize=15)
plt.subplot(1,2,2)
plt.plot(epochs, train_acc, color='red',label='train')
plt.plot(epochs, valid_acc, color='blue',label='valid')
plt.legend()
plt.xlabel('Epochs',fontsize=15)
plt.ylabel('Y',fontsize=15)
plt.title('Acc',fontsize=15)
plt.show()
saver.save(sess,MODEL_SAVE_PATH)
def main(argv=None):
mnist=input_data.read_data_sets('E:/User-Duanduan/python/Deep-Learning/tensorflow/data/MNIST_data/',one_hot=True)
train(mnist)
if __name__=='__main__':
tf.app.run()
第三部分:测试集
# 测试模型
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import numpy as np
import mnist_inference
import mnist_train
BATCH_SIZE=10000
def evaluate(mnist):
with tf.Graph().as_default() as g:
# 定义输入输出格式
x=tf.placeholder(tf.float32,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')
y_=tf.placeholder(tf.float32,[BATCH_SIZE,mnist_inference.OUTPUT_NODE],name='y-input')
# 测试批量图
y=mnist_inference.inference(x,None,None,train=False, reuse=False)
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# 读取测试图集
xs=mnist.test.images
xs=np.reshape(xs,[BATCH_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS])
test_feed={x:xs,y_:mnist.test.labels}
# 测试单张图片
show_image=mnist.test.images[5000]
label=mnist.test.labels[5000]
flatten_image=np.reshape(show_image,[1,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS])
actual_label=label.tolist().index(max(label.tolist()))
x_test=tf.placeholder(tf.float32,[1,mnist_inference.IMAGE_SIZE,mnist_inference.IMAGE_SIZE,mnist_inference.NUM_CHANNELS],name='x-input')
y_test=mnist_inference.inference(x_test,None,None,train=False,reuse=tf.AUTO_REUSE)
pred_label=tf.argmax(y_test,1)
variable_averages=tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore=variable_averages.variables_to_restore()
saver=tf.train.Saver(variable_to_restore)
with tf.Session() as sess:
# 加载模型
saver.restore(sess,'./model.ckpt')
# 批量测试
accuracy_score=sess.run(accuracy,feed_dict=test_feed)
print('Test accuracy is %g%%'%(accuracy_score*100))
# 单张测试
result=sess.run(pred_label,feed_dict={x_test:flatten_image})
print('Actual:%g,predtion:%g'%(actual_label,result))
show_image=tf.reshape(show_image,[28,28])
plt.figure('Show')
plt.imshow(show_image.eval())
plt.show()
def main(argv=None):
mnist=input_data.read_data_sets('E:/User-Duanduan/python/Deep-Learning/tensorflow/data/MNIST_data/',one_hot=True)
evaluate(mnist)
if __name__=='__main__':
tf.app.run()