'''
#2018-06-25 272015 June Monday the 26 week, the 176 day SZ
手写字体识别程序文件1:
定义了前向传播的过程以及神经网络中的参数,无论训练还是测试,都可以直接调用inference这个函数
问题代码:
#regularizer正则化矩阵,变量属性:维度,shape;
tf.truncated_normal_initializer 从截断的正态分布中输出随机值。
seed:一个Python整数。用于创建随机种子。查看 tf.set_random_seed 行为。
tf.nn.relu() 激活函数实现去线性化
神经网络结果加上激活函数和偏置项:f(Wx +b); f(x)是激活函数,b是偏置项
每个神经元的输出经过非线性函数,整个模型就不是非线性了。这个非线性函数就是激活函数。
三个常见激活函数:ReLU激活函数,Sigmoid激活函数,tanh函数;
'''
import tensorflow as tf
#定义输入,输出,隐藏层1的节点个数
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
def get_weight_variable(shape, regularizer): #regularizer正则化矩阵,变量属性:维度,shape;truncated缩短了的;被删节的;切去顶端的
weights = tf.get_variable('weights',shape, initializer = tf.truncated_normal_initializer(stddev=0.1))
#张量加入集合losses
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights
#定义前向传播过程
def inference(input_tensor, regularizer):
#声明第一层神经网络的过程并完成前向传播的过程
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer) #[INPUT_NODE, LAYER1_NODE]之间的权重
biases = tf.get_variable('biases', [LAYER1_NODE], initializer = tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
#声明第2层神经网络的过程并完成前向传播的过程
with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer) #[LAYER1_NODE, OUTPUT_NODE]之间的权重
biases = tf.get_variable('biases', [OUTPUT_NODE], initializer = tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
#返回前向传播结果
return layer2
###########################################以下是训练部分###########################################
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import os
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = 'D:\\ST\\Python_work\\program\\手写识别'
MODEL_NAME = "mnist_model"
def train(mnist):
# 定义输入输出placeholder。
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)
# 定义损失函数、学习率、滑动平均操作以及训练过程。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
# 初始化TensorFlow持久化类。
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main(argv=None):
mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot=True)
train(mnist)
if __name__ == '__main__':
main()
'''
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting D:\ST\Python_work\program\手写识别\train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting D:\ST\Python_work\program\手写识别\train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-labels-idx1-ubyte.gz
2018-06-25 19:14:55.952000: I C:\tf_jenkins\home\workspace\rel-win\M\windows\PY\35\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
After 1 training step(s), loss on training batch is 2.96337.
After 1001 training step(s), loss on training batch is 0.21122.
After 2001 training step(s), loss on training batch is 0.195296.
After 3001 training step(s), loss on training batch is 0.147966.
After 4001 training step(s), loss on training batch is 0.121113.
After 5001 training step(s), loss on training batch is 0.104925.
After 6001 training step(s), loss on training batch is 0.0969063.
After 7001 training step(s), loss on training batch is 0.0967676.
After 8001 training step(s), loss on training batch is 0.0805094.
After 9001 training step(s), loss on training batch is 0.0758026.
After 10001 training step(s), loss on training batch is 0.0662473.
After 11001 training step(s), loss on training batch is 0.0667674.
After 12001 training step(s), loss on training batch is 0.0615224.
After 13001 training step(s), loss on training batch is 0.0548805.
After 14001 training step(s), loss on training batch is 0.0576472.
After 15001 training step(s), loss on training batch is 0.0558432.
After 16001 training step(s), loss on training batch is 0.050817.
After 17001 training step(s), loss on training batch is 0.04974.
After 18001 training step(s), loss on training batch is 0.0424435.
After 19001 training step(s), loss on training batch is 0.0423194.
After 20001 training step(s), loss on training batch is 0.0413847.
After 21001 training step(s), loss on training batch is 0.0433296.
After 22001 training step(s), loss on training batch is 0.0370582.
After 23001 training step(s), loss on training batch is 0.0422068.
After 24001 training step(s), loss on training batch is 0.0377206.
After 25001 training step(s), loss on training batch is 0.0377879.
After 26001 training step(s), loss on training batch is 0.0397268.
After 27001 training step(s), loss on training batch is 0.035891.
After 28001 training step(s), loss on training batch is 0.0405907.
After 29001 training step(s), loss on training batch is 0.0337722.
[Finished in 479.9s]
'''
'''
'''
###########################################我的问题代码训练部分###########################################
我的问题代码2
源码地址:
https://github.com/caicloud/tensorflow-tutorial/tree/master/Deep_Learning_with_TensorFlow
#2018-06-25 272015 June Monday the 26 week, the 176 day SZ
手写字体识别程序文件1:
定义了神经网络的训练过程
'''
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#加载mnist_inference.py中定义的常量和前向传播的函数
import mnist_inference
#配置神经网络的参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
#模型保存路径和文件名
MODEL_SAVE_PATH = 'D:\\ST\\Python_work\\program\\手写识别'
MODEL_NAME = 'model.ckpt'
def train(mnist):
#定义输入输出placeholder.
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],name = 'x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name = 'y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
#直接使用mnist_inference.py中定义的前向传播过程
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable = False)
#定义损失函数,学习率,滑动平均操作以及训练过程
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(y, tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean +tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step = global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
#初始化tf持久化类
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
tf.global_variables_initializer().run()
#在训练过程中不再测试模型在验证数据上的表现,验证和测试的过程有一个独立程序完成
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict = {x:xs, y_:ys})
#每1000轮保存一次模型
if i % 1000 ==0:
#输出当前损失函数
print('After %d training step(s) , loss on training batch is %g'%(step, loss_value))
#保存模型,文件尾部加上训练轮数
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
def main(argv = None):
mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot = True)
train(mnist)
if __name__ == '__main__':
tf.app.run()
'''
###########################################以下是预测部分###########################################
#老师代码3
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train
# 加载的时间间隔。
EVAL_INTERVAL_SECS = 50
def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
y = mnist_inference.inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
while True:
with tf.Session() as sess:
#MODEL_SAVE_PATH = 'D:\\ST\\Python_work\\program\\手写识别'
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
#print(ckpt) #None找不到该文件,我明明看到有这个文件呢。
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS)
def main(argv=None):
mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot=True)
evaluate(mnist)
if __name__ == '__main__':
main()
'''
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting D:\ST\Python_work\program\手写识别\train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting D:\ST\Python_work\program\手写识别\t10k-labels-idx1-ubyte.gz
2018-06-25 19:35:27.932000: I C:\tf_jenkins\home\workspace\rel-win\M\windows\PY\35\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
No checkpoint file found
[Finished in 19.6s]
'''
'''
###########################################我的问题代码预测部分###########################################
#自己代码
'''
#2018-06-25 272015 June Monday the 26 week, the 176 day SZ
手写字体识别程序文件3:
定义了前向传播的过程以及神经网络中的参数
'''
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#加载mnist_inference.py 和mnist_train.py中定义的常量和函数
import mnist_inference
import mnist_train
#10秒加载一次最新的模型,并且在测试集上测试最新模型的正确率
EVAL_INTERVAL_SECS = 10
def evaluate(mnist):
with tf.Graph().as_default() as g:
#定义输入输出格式
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name = 'x- input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name = 'y- input')
validate_feed = {x:mnist.validation.images, y_:mnist.validation.labels}
#直接调用其他文档的函数进行计算前向传播结果
y = mnist_inference.inference(x, None)
#使用前向结果计算正确率
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#通过变量重命名的方式加载模型,这样在前向传播过程中就不要调用求滑动平均的函数来获取平均值了。这样可以完全共用
#mnist_inference.py中定义的前向传播过程
variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
#每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程来检测训练过程中正确率的变化
while True:
with tf.Session() as sess:
#tf.train.get_checkpoint_state()会通过checkpoint文件自动找到目录中最新模型的文件名字
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#加载模型
saver.restore(sess, ckpt.model_checkpoint_path)
#通过文件名字得到模型保存时候迭代的轮数
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict = validation_feed)
print('After %s training steps, validation accuracy = %g'%(global_step, accuracy_score))
else:
print('no checkpoint file found')
return time.sleep(EVAL_INTERVAL_SECS)
def main(argv=None):
mnist = input_data.read_data_sets('D:\\ST\\Python_work\\program\\手写识别', one_hot = True)
evaluate(mnist)
if __name__ == '__main__':
tf.app.run()
'''