python bp神经网络 mnist_使用tensorflow实现BP神经网络的Mnist手写体识别

【python—tensorflow】神经网络实现Mnist手写体的识别

#先导入识别所需要的包

import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets(r"path",one_hot=True)

batch_size=100

n_batch=mnist.train.num_examples//batch_size

def variable_summaries(var):

with tf.name_scope('summaries'):

mean=tf.reduce_mean(var)

tf.summary.scalar('mean',mean)#平均值

with tf.name_scope('stddev'):

stddev=tf.sqrt(tf.reduce_mean(tf.square(var - mean)))

tf.summary.scalar('stddev',stddev)#标准差

tf.summary.scalar('max',tf.reduce_max(var))#最大值

tf.summary.scalar('min',tf.reduce_min(var))#最小值

tf.summary.histogram('%histogram',var)#直方图

#数据与标签的占位

with tf.name_scope('input'):

x = tf.placeholder(tf.float32,shape = [None,784],name='x-input')

y = tf.placeholder(tf.float32,shape=[None,10],name='y-input')

#keep_prob=tf.placeholder(tf.float32)

#lr=tf.Variable(0.001,dtype=tf.float32)

with tf.name_scope('layer1'):

#创建一个简单的神经网络

with tf.name_scope('wight'):

W1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1),name='W1')

variable_summaries(W1)

with tf.name_scope('biases'):

b1=tf.Variable(tf.zeros([500])+0.1,name='b1')

variable_summaries(b1)

L1=tf.nn.tanh(tf.matmul(x,W1)+b1)

#第二层

with tf.name_scope('layer2'):

with tf.name_scope('wight2'):

W2=tf.Variable(tf.truncated_normal([500,10],stddev=0.1),name='W2')

variable_summaries(W2)

with tf.name_scope('biases2'):

b2=tf.Variable(tf.zeros([10])+0.1,name='b2')

variable_summaries(b2)

with tf.name_scope('wx'):

y_predict = tf.nn.softmax(tf.matmul(L1,W2) + b2)

#求交叉熵得到残差

with tf.name_scope('loss'):

loss = tf.reduce_mean(tf.square(y-y_predict))#(这个与下面acc那个可以相互交替)

tf.summary.scalar('loss',loss)

#梯度下降法使得残差最小

#train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

with tf.name_scope('train'):

train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

#loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_predict))

#梯度下降法使得残差最小

#train_step = tf.train.Adam(lr).minimize(loss)

#测试阶段,测试准确度计算

with tf.name_scope('accuracy'):

with  tf.name_scope('correct_prediction'):

correct_prediction = tf.equal(tf.argmax(y_predict,1),tf.argmax(y,1))

with  tf.name_scope('accuracy'):

accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#多个批次的准确度均值

tf.summary.scalar('accuracy',accuracy)

#合并所有的summary

merged=tf.summary.merge_all()

init = tf.global_variables_initializer()

with tf.Session() as sess:

sess.run(init)

writer=tf.summary.FileWriter(r"C:\Users\Administrator\Desktop\logs",sess.graph)

for i in range(100):

for batch in range(n_batch):

batch_xs,batch_ys = mnist.train.next_batch(batch_size)

summary,_=sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})

#若想同时打印多个,则用中括号括起来

writer.add_summary(summary,i)

test_acc=sess.run(accuracy,feed_dict={x: mnist.test.images, y: mnist.test.labels})

train_acc=sess.run(accuracy,feed_dict={x: mnist.train.images, y: mnist.train.labels})

print('Iter:'+ str(i)+ 'testing accuracy '+ str(test_acc) +'training accuracy '+ str(train_acc))

你可能感兴趣的:(python,bp神经网络,mnist)