python3下tensorflow练习(二)
这篇文章是对上篇博文的补充和改进,上篇博文建立了单层神经网络对MNIST手写字进行了识别,准确率只有85%
本文建立多层神经网络,用改进的交叉熵函数计算损失梯度,用改进的优化器Adam,用55000个数据,分批进行训练,提高了识别率。
1.建立多层网络
h1= add_layer(xs, 784, 256,activation_function=tf.nn.relu)#隐藏层,256个激活单元,激活函数选用RELU
prediction = add_layer(h1, 256, 10, activation_function=None)#输出层,10个输出,不选用激活函数,因为在交叉熵计算时候会对prediction用softmax进行预测
2.改进的交叉熵
cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits
(logits=prediction,labels=ys) )
3.改进的优化器
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
4.对55000个样本分batch进行训练
for i in range(55000):
#读取批次数据,为了更快的收敛
#每次读取1001项批次训练数据,读取的训练数据存放在 batch_xs, batch_ys ,准备训练使用
batch_xs, batch_ys = mnist.train.next_batch(100)#10万个样本
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})#训练模型
全文代码:
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 22:06:04 2018
@author: jiangcheng
"""
#多层神经网络MINIST识别
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt#导入plt绘图模块
# number 1 to 10 data提前下下来了再“MNIST”文件夹下
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)#one_hot一种编码格式0000000100(0-9数字)
def add_layer(inputs, in_size, out_size, activation_function=None,):
# #biases和Weights并不是纯随机数,一定的初始值跟有利于寻找到最优解
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1,)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b,)
return outputs
def compute_accuracy(v_xs, v_ys):
global prediction#全局变量
y_pre = sess.run(prediction, feed_dict={xs: v_xs})#预测值
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))#100个样本,就是1*100的向量
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#将上面的bool类型转为float,求得矩阵中所有元素的平均值 如:98/100=.98
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})#run()运行才能有结果
return result
xs = tf.placeholder(tf.float32, [None, 784]) # 28x28,输入特征784个
ys = tf.placeholder(tf.float32, [None, 10])#输出10个数字
# add output layer多层神经网络
#数字1对应的矩阵就是[0,1,0,0,0,0,0,0,0,0],输出结果为10个矩阵
h1= add_layer(xs, 784, 256,activation_function=tf.nn.relu)
prediction = add_layer(h1, 256, 10, activation_function=None)#错误:其实就是二分类问题,输出的10个数{0,1};多分类:tf.nn.softmax
#显示样本值
########该优化器至关重要
cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits
(logits=prediction,labels=ys) )
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
# reduction_indices=[1])) # loss
#train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)#一次正向传播+反向传播,得到模型
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
sess = tf.Session()
# important step
sess.run(tf.initialize_all_variables())
#plot_images_labels_prediction(mnist.train.images,
# mnist.train.labels,prediction,0)
for i in range(55000):
#读取批次数据,为了更快的收敛
#每次读取1001项批次训练数据,读取的训练数据存放在 batch_xs, batch_ys ,准备训练使用
batch_xs, batch_ys = mnist.train.next_batch(100)#10万个样本
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})#训练模型
if i % 5500 == 0:
print(compute_accuracy(
mnist.test.images, mnist.test.labels))
训练效果: