分为三部分:前向传播,反向传播,数据测试。适应一下简单的结构化编程
第一部分:前向传播(mnist_forward.py)
#前向传播,两层神经网络
import tensorflow as tf
import numpy as np
input_data = 784
output_data = 10
layer_data = 50
def get_weight(shape,reg):
w = tf.Variable(tf.truncated_normal(shape = shape,stddev=0.1))
if reg != None: tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(reg)(w))
return w
def get_bais(shape):
b = tf.Variable(tf.zeros(shape))
return b
def forward(x,reg):
w1 = get_weight([input_data,layer_data],reg)
b1 = get_bais([layer_data])
y1 = tf.nn.relu(tf.matmul(x,w1)+b1)
w2 = get_weight([layer_data,output_data],reg)
b2 = get_bais(output_data)
y = tf.matmul(y1,w2)+b2
return y
第二部分:反向传播(mnist_backward.py)
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os
batch_size = 200
learning_rate_base = 0.1
learning_rate_decay = 0.99
regularizer = 0.0001
steps = 50000
moving_average_decay = 0.99
model_save_path = "./model/"
model_name = "mnist_model"
def backward(mnist):
x = tf.placeholder(tf.float32,[None,mnist_forward.input_data])
y_ = tf.placeholder(tf.float32,[None,mnist_forward.output_data])
y = mnist_forward.forward(x,regularizer)
global_step = tf.Variable(0,trainable=False)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(learning_rate_base,global_step,mnist.train.num_examples/batch_size,learning_rate_decay,staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step = global_step)
ema = tf.train.ExponentialMovingAverage(moving_average_decay,global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step,ema_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(steps):
xs,ys = mnist.train.next_batch(batch_size)
_,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
if (i+1) % 1000 == 0:
print('after %d steps,loss on training batch is %g' %(step,loss_value))
saver.save(sess,os.path.join(model_save_path,model_name),global_step = global_step)
def main():
mnist = input_data.read_data_sets("./data/", one_hot=True)
backward(mnist)
if __name__ == '__main__':
main()
部分训练结果:
第三部分:进行测试(mnist_test.py)
#coding utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
test_interval_secs = 5
def test(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32,[None,mnist_forward.input_data])
y_ = tf.placeholder(tf.float32,[None,mnist_forward.output_data])
y = mnist_forward.forward(x,None)
ema = tf.train.ExponentialMovingAverage(mnist_backward.moving_average_decay)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
correct_prediction = tf.equal(tf.arg_max(y,1),tf.arg_max(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.model_save_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_store = sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels})
print("after %s training steps,test accuary = %g"%(global_step,accuracy_store))
else:
print('no checkpoint found')
time.sleep(test_interval_secs)
def main():
mnist = input_data.read_data_sets("./data/",one_hot=True)
test(mnist)
if __name__=='__main__':
main()
部分测试结果(反向传播与test函数同时执行):
代码是在pycharm中执行的:
数据下载到data文件中,模型保存到model文件夹中(并不会全部保存,而是会保存最后更新的几个模型)