Tensorflow笔记(5):MNIST数据集输出手写数字识别准确率

本篇博文的内容是:搭建神经网络,在mnist数据集上训练模型,输出手写数字识别准确率

 

5.1
√t mnist  数据集 :包含 7 7  万张 黑底白字手写数字 图片, 其中 55000  张为训练集 ,5000  张为验证集,1 10 0000  张 为测试集 。每张图片大小为28*28  像素,图片中 纯 黑色像素值为0 , 纯 白色像素值为1 。数据集的标签是长度为 10  的一维数组,数组中每个元素 索引号表示对应数字出现的概率。

在将 mnist 数据集作为输入喂入神经网络时,需先将数据集中每张图片变为长度784 一维数组,将该数组作为神经网络输入特征喂入神经网络。

例如:
一张数字手写体图片变成长度为 784 的一维数组[0.0.0.0.0.231 0.235 0.459……0.219 0.0.0.0.]输入神经网络。该图片对应的标签为[0.0.0.0.0.0.1.0.0.0],标签中索引号为 6 的元素为 1,表示是数字 6 出现的概率为 100%,则该图
片对应的识别结果是 6。

 

其他废话就不多说直接上程序,手写数字识别的程序总共有四个文件,分别是mnist_forward.py,mnist_backward.py,mnist_test.py,mnist_app.py分别是前向传播,反向传播,准确率测试和我们的手写数字识别这四个文件。

1.首先看前向传播:mnist_forward.py

#coding:utf-8
'''BP神经前向传播'''
import tensorflow as tf

INPUT_NODE = 784#神经网络的输入节点是784个,正好是28*28*1
OUTPUT_NODE = 10#输出点
LAYER1_NODE = 500#隐藏层节点的个数
#得到我们的权值矩阵函数
def get_weight(shape,regularizer):
    w = tf.Variable(tf.truncated_normal(shape,stddev = 0.1))
    '''如果使用正则化,则将每一个w的正则化记录到总loss中的这一行代码'''
    if regularizer !=None: tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w
#得到偏置项函数
def get_bias(shape):
    b = tf.Variable(tf.zeros(shape))
    return b
#定义前向传播的函数
def forward(x,regularizer):
    w1 = get_weight([INPUT_NODE,LAYER1_NODE],regularizer)
    b1 = get_bias([LAYER1_NODE])
    y1 = tf.nn.relu(tf.matmul(x,w1)+b1)

    w2 = get_weight([LAYER1_NODE,OUTPUT_NODE],regularizer)
    b2 = get_bias([OUTPUT_NODE])
    y = tf.matmul(y1,w2)+b2
    return y

2.反向传播:mnist_backward.py

#coding:utf-8
#反向传播
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data  #导入模块
import mnist_forward
import os

BATCH_SIZE = 200  #一次性喂入神经网络的数据大小
LEARNING_RATE_BASE = 0.1   #初始的学习率
LEARNING_RATE_DECAY = 0.99  #衰减率
REGULARIZER = 0.0001 #正则化参数
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./MODEL/"
#MODEL_SAVE_PATH = "./home/lyw/eclipse-workspace/testpython1/test"
#MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"#模型的保存的文件名
#反向传播,输入mnist
def backward(mnist):
    #输入x,y_进行占位
    x = tf.placeholder(tf.float32,[None,mnist_forward.INPUT_NODE])
    y_ = tf.placeholder(tf.float32,[None,mnist_forward.OUTPUT_NODE])
    #首先进行前向传播
    y = mnist_forward.forward(x,REGULARIZER)
    '''轮数计数器赋值0,设定为不可训练'''
    global_step = tf.Variable(0,trainable = False)
    '''定义损失函数,将softmax和交叉商协同使用'''
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels= tf.argmax(y_,1))
    cem = tf.reduce_mean(ce)#对向量求均值
    loss = cem+tf.add_n(tf.get_collection('losses'))#将参数w的正则化加入到总loss中
    '''函数实现指数衰减学习率'''
    learning_rate = tf.train.exponential_decay(
            LEARNING_RATE_BASE,#初始的学习率
            global_step,
            mnist.train.num_examples/BATCH_SIZE,#训练这么多轮之后开始衰减
            LEARNING_RATE_DECAY,#衰减指数
            staircase=True)

    '''训练函数'''
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    '''采用滑动平均的方法进行参数的更新'''
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step,ema_op]):
        train_op = tf.no_op(name='train')
    
    
    '''保存模型'''
    saver = tf.train.Saver()#实例化saver对象
    

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        for i in range(STEPS):
            xs,ys = mnist.train.next_batch(BATCH_SIZE)
            _,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
            if i%1000 ==0:
                #每1000轮打印出当前的loss值
                print("After %d training step(s),loss on training batchis %g" %(step,loss_value))
                #循环1000轮保存模型到当前会话
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step = global_step)


def main():
    mnist = input_data.read_data_sets("./data",one_hot = True)
    backward(mnist)

if __name__ =='__main__':
    main()


3.测试程序:mnist_test.py

# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 12:46:26 2018

@author: Ad
"""

import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TEST_INTERVAL_SECS=5 #定义程序循环的间隔时间为5seconds

def test(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) # placeholder占位符,None指可以赋很多组数据,每组有前向传播中定义的input_node输入特征的个数
        y_ = tf.placeholder(tf.float32, [None,mnist_forward.OUTPUT_NODE])
        y = mnist_forward.forward(x, None)
        #定义滑动平均
        ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        # 先实例化saver()方法   
        saver = tf.train.Saver(ema_restore)
        # 计算正确率方法
        curracy_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))#选取最大值的操作只在第一个维度进行,返回每一行最大值所对应的列表索引号
        accuracy = tf.reduce_mean(tf.cast(curracy_prediction, tf.float32))
        
        while True:
            with tf.Session() as sess:
                #将滑动平均值赋给ckpt
                ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
                #判断是否有模型
                if ckpt and ckpt.model_checkpoint_path:# 若有,则恢复模型保存在当前会话
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    #saver.restore(sess, ckpt.model_checkpoint_path)
                    # 恢复轮数
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    print mnist.test.images.shape
                    print mnist.test.labels.shape
                    accuracy_score = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
                    # 打印准确率
                    print("After %s training steps, test accuracy = %g" %(global_step, accuracy_score))
                    # 若没找到模块,则显示没找到
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(TEST_INTERVAL_SECS)
def main():
    # main()函数读取数据集,调用test函数,将读取的数据集传入
    mnist = input_data.read_data_sets("./data/", one_hot = True) # read_data_sets()方法
    test(mnist)

if __name__ =='__main__':
    main()


4.应用程序:mnist_app.py

#coding:utf-8
#对于图片进行与处理函数
import tensorflow as tf
import numpy as np
from PIL import Image
import mnist_backward
import mnist_forward

def restore_model(testPicArr):
    with tf.Graph().as_default() as tg:
        x = tf.placeholder(tf.float32,[None,mnist_forward.INPUT_NODE])#只需要给x占位
        y = mnist_forward.forward(x,None)#计算输出y
        preValue = tf.argmax(y,1)#y的最大值对应的列表索引号就是对以值
        
        '''实例化带有滑动平均值的saver'''
        variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        
        with tf.Session() as sess :
            '''断电续训'''
            ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess,ckpt.model_checkpoint_path)
                
                preValue = sess.run(preValue,feed_dict = {x:testPicArr})
                return preValue
            else:
                print("No checkponit file found")
                return -1
            
            
def pre_pic(picName):
    img = Image.open(picName)
    reIm = img.resize((28,28),Image.ANTIALIAS)#用消除锯齿的方法将图片处理成28*28
    im_arr = np.array(reIm.convert('L'))#将reIm以灰度的形式转换成矩阵
    for i in range(28):
        for j in range(28):
            im_arr[i][j] = 255-im_arr[i][j]
            if (im_arr[i][j]<50):
                im_arr[i][j] = 0
            else:im_arr[i][j] = 255
    nm_arr = im_arr.reshape([1,784])
    nm_arr = nm_arr.astype(np.float32)
    img_ready = np.multiply(nm_arr,1.0/255.0)
    return img_ready   


def application():
    testNum = input("input the number of test picture:")
    for i in range(testNum):
        testPic = raw_input("the path of test picture:")
        '''将图片转换成能够输入神经网络的值'''
        testPicArr = pre_pic(testPic)
        '''将处过后的图片输入神经网络'''
        preValue = restore_model(testPicArr)
        print "The prediction number is:",preValue
        
        
        
        
def main():
    application()
    
if __name__ == '__main__':
    main()

 

你可能感兴趣的:(深度学习,算法)