#0前向传播过程
import tensorflow as tf
#输入为28*28像素图像,输出为0-9之间的数字
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
#获取权重
def get_weight(shape,regularizer):
w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
#w = tf.Variable(tf.random_normal(shape,stddev=0.1))
if regularizer != None: tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))#正则化
return w
#获取偏置
def get_bias(shape):
b = tf.Variable(tf.zeros(shape))
return b
#生成神经网络
def forward(x,regularizer):
w1 = get_weight([INPUT_NODE,LAYER1_NODE],regularizer)
b1 = get_bias([LAYER1_NODE])
y1 = tf.nn.relu(tf.matmul(x,w1) + b1)#激活函数
w2 = get_weight([LAYER1_NODE,OUTPUT_NODE],regularizer)
b2 = get_bias([OUTPUT_NODE])
y = tf.matmul(y1,w2) + b2
return y
#1数据集生成过程
import tensorflow as tf
import numpy as np
from PIL import Image
import os
image_train_path='./mnist_data_jpg/mnist_train_jpg_60000/'
label_train_path='./mnist_data_jpg/mnist_train_jpg_60000.txt'
tfRecord_train='./data/mnist_train.tfrecords'
image_test_path='./mnist_data_jpg/mnist_test_jpg_10000/'
label_test_path='./mnist_data_jpg/mnist_test_jpg_10000.txt'
tfRecord_test='./data/mnist_test.tfrecords'
data_path='./data'
resize_height = 28
resize_width = 28
#生成tfrecords文件
def write_tfRecord(tfRecordName,image_path,label_path):
# 新建一个writer,打开文件路径并创建一个TFRecordWriter写入文件。
writer = tf.python_io.TFRecordWriter(tfRecordName)
num_pic = 0 #轮数计数器
f = open(label_path,'r')#以只读的方式打开label_path
contents = f.readlines()#用于读取所有行(直到结束符 EOF)并返回列表,该列表可以由 Python 的 for... in ... 结构进行处理。
f.close()
# 循环遍历每张图和标签
for content in contents: # 经常用于遍历字符串、列表,元组,字典等,content为contents的元素
value = content.split() # 用“空格”分割每行内容,分割后组成列表value
img_path = image_path + value[0] # 得到每张图片的具体路径
img = Image.open(img_path) # 专接图片路径,用来直接读取该路径指向的图片。
img_raw = img.tobytes()#进制转化
labels = [0] * 10 # 把labels的每一个元素赋值为0
labels[int(value[1])] = 1 # 把labels对应的标签位赋值为1:例如0010000000:代表“2”
# 用tf.train.Example协议存储数据
example = tf.train.Example(features=tf.train.Features(feature={
'img_raw':tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),#以二进制的形式对img_raw进行封装
'label':tf.train.Feature(int64_list=tf.train.Int64List(value=labels))
}))#以二进制的形式对labels进行封装
writer.write(example.SerializeToString())#格式转化
num_pic += 1
print("the number of picture:", num_pic)
writer.close()
print("write tfrecord successful")
def generate_tfRecord():
isExists = os.path.exists(data_path)#判断括号里的文件是否存在的意思,括号内的可以是文件路径
if not isExists:
os.makedirs(data_path)#创建data_path文件
print('The directory was created successfully')
else:
print('directory already exists')
write_tfRecord(tfRecord_train,image_train_path,label_train_path)
write_tfRecord(tfRecord_test,image_test_path,label_test_path)
def read_tfRecord(tfRecord_path):
#该函数会生成一个先入先出的队列,文件阅读器会使用它来读取数据
filename_queue = tf.train.string_input_producer([tfRecord_path],shuffle=True)
#新建一个用来读取“tfRecord”文件的reader
reader = tf.TFRecordReader()
_,serialized_example = reader.read(filename_queue)# 解序列化
#将tf.train.Example协议内存块(protocol buffer)解析为张量
features = tf.parse_single_example(serialized_example,
features={
'label':tf.FixedLenFeature([10],tf.int64),
'img_raw':tf.FixedLenFeature([],tf.string)
})#标签和图片的键名应该和制作tfrecords的键名相同,其中标签给出几分类。
#将img_raw字符串转化为8位无符号整形
img = tf.decode_raw(features['img_raw'],tf.uint8)
#将形状变为一行784列
img.set_shape([784])
#变换为0到1之间的浮点数
img = tf.cast(img,tf.float32)*(1./255)
label = tf.cast(features['label'],tf.float32)
#返回图片和标签
return img,label
def get_tfrecord(num,isTrain = True):#num代表每次读取数据个数,True为训练集,False为测试集
if isTrain:
tfRecord_path = tfRecord_train
else:
tfRecord_path = tfRecord_test
img,label = read_tfRecord(tfRecord_path)
#随机读取一个batch的数据
img_batch,label_batch = tf.train.shuffle_batch([img, label],
batch_size = num,
num_threads = 2,
capacity = 1000,
min_after_dequeue = 700)#起到了一个储水罐的作用,容量1000,每次取出num组数据,储水量小于700时,从总样本中蓄水
return img_batch,label_batch
def main():
generate_tfRecord()
if __name__ == '__main__':
main()
#2反向传播过程
import tensorflow as tf
import mnist_forward
import os
from tensorflow.examples.tutorials.mnist import input_data
import mnist_generateds
BATCH_SIZE = 200
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"
train_num_examples = 60000
def backward():
x = tf.placeholder(tf.float32,[None,mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32,[None,mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x,REGULARIZER)
global_step = tf.Variable(0,trainable=False)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
train_num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase=True)
#train_step = tf.train.MomentumOptimizer(learning_rate).minimize(loss,global_step=global_step)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step,ema_op]):#此函数指定操作执行的依赖关系,先执行[]内的内容,再执行train_op
train_op = tf.no_op(name = 'train')#tf.no_op()表示执行完 train_step, ema_op 操作之后什么都不做
saver = tf.train.Saver()
img_batch,label_batch = mnist_generateds.get_tfrecord(BATCH_SIZE,isTrain=True)#获取操作
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
# 利用多线程提高图片和标签的批获取效率
coord =tf.train.Coordinator()
# 启动输入队列的线程
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(STEPS):
#执行图片和标签的获取
xs,ys = sess.run([img_batch,label_batch])
_,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
if i % 1000 == 0:
print("After %d training steps,loss on training batch is %g."%(step,loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
#关闭线程协调器
coord.request_stop()
coord.join(threads)
def main():
backward()
if __name__ == '__main__':
main()
#3测试环节
当训练完模型后,给神经网络模型输入测试集验证网络的准确性和泛化性。注意,
所用的测试集和训练集是相互独立的。
注释:泛化性,程序在测试未知数据时的准确性
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
import mnist_generateds
TEST_INTERVAL_SECS = 5
# 手动给出测试的总样本数1万
TEST_NUM = 10000 # 1
def test():
with tf.Graph().as_default() as g:#复现神经网络
#利用 placeholder 给训练数据 x 和标签 y_占位
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
#调用mnist_forward 文件中的前向传播过程 forword()函数,计算训练数据集上的预测结果y。
y = mnist_forward.forward(x, None)
#实例化具有滑动平均的 saver 对象,从而在会话被加载时模型中的所有参数被赋值为各自的滑动平均值,增强模型的稳定性
ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
ema_restore = ema.variables_to_restore()
saver = tf.train.Saver(ema_restore)
#计算模型在测试集上的准确率。
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 用函数get_tfrecord替换读取所有测试集1万张图片
img_batch, label_batch = mnist_generateds.get_tfrecord(TEST_NUM, isTrain=False) # 2
while True:
#在 with 结构中,加载指定路径下的 ckpt,若模型存在,则加载出模型到当前对话,在测试数据集上进行准确率验证,并打印出当前轮数下的准确率,若模型不存在,则打印出模型不存在的提示,从而 test()函数完成。
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
# 利用多线程提高图片和标签的批获取效率
coord = tf.train.Coordinator() # 3
# 启动输入队列的线程
threads = tf.train.start_queue_runners(sess=sess, coord=coord) # 4
# 执行图片和标签的批获取
xs, ys = sess.run([img_batch, label_batch]) #xs[200,784],代表200张图片产生的img数据,ys[200,10]代表200张图片产生的label数据
accuracy_score = sess.run(accuracy, feed_dict={x: xs, y_: ys})
print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
# 关闭线程协调器
coord.request_stop() # 6
coord.join(threads) # 7
else:
print('No checkpoint file found')
return
time.sleep(TEST_INTERVAL_SECS)
#通过主函数 main(),加载指定路径下的测试数据集,并调用规定的 test 函数,进行模型在测试集上的准确率验证。
def main():
test() # 8
if __name__ == '__main__':
main()
#4实现手写数字的识别 import tensorflow as tf import numpy as np from PIL import Image import mnist_forward import mnist_backward def restore_model(testPicArr): with tf.Graph().as_default() as tg: x = tf.placeholder(tf.float32,[None,mnist_forward.INPUT_NODE]) y = mnist_forward.forward(x,None) preValue = tf.argmax(y,1)#此处preValue并不是一个具体的值,是声明了preValue的获取方式 #实现滑动平均模型,参数MOVING_AVERAGE_DECAY用于控制模型更新速度。 #训练过程中会对每一个变量维护一个影子变量,这个影子变量的初始变量就是相应的初始变量,每次变量更新,影子随之更新 variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess,ckpt.model_checkpoint_path) preValue = sess.run(preValue,feed_dict={x:testPicArr})#更新后重新赋值 return preValue else: print("No checkpoint file found") return -1 def pre_pic(picName): img = Image.open(picName)#读取picName的图片到img reIm = img.resize((28,28),Image.ANTIALIAS)#调整img为28*28格式 im_arr = np.array(reIm.convert('L'))#np.arr()用来产生矩阵,reIm.convert('L')转化为灰度图,'L'代表lena
threshold = 50 for i in range(28): for j in range(28): im_arr[i][j] = 255 - im_arr[i][j]#反色 if(im_arr[i][j] < threshold):#图比较黑时,threshold要比较大 im_arr[i][j] = 0 else: im_arr[i][j] = 255#二值化处理 nm_arr = im_arr.reshape([1,784]) nm_arr = nm_arr.astype(np.float32) img_ready = np.multiply(nm_arr,1.0/255.0)#格式处理 return img_ready def application(): testNum = input("input the number of test pictures:") for i in range(int(testNum)): testPic = input("the path of test picture:") testPicArr = pre_pic(testPic)#图片预处理过程 preValue = restore_model(testPicArr)#数字识别过程 print("The prediction number is:", preValue) def main(): application() if __name__ == '__main__': main()