TensorFlow实战三:实现多层感知机(multi-layer-perceptron)

# -*- coding: utf-8 -*-
"""

import input_data
import tensorflow as tf
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
sess=tf.InteractiveSession()

#给隐含层参数设置Variable并进行初始化
in_units=784#输入节点数
h1_units=300#隐含层输出节点数(此模型节点数在200-1000范围内结果区别不大)
W1=tf.Variable(tf.truncated_normal([in_units,h1_units],stddev=0.1))#隐含层的权重,初始化为截断的正态分布,其标准差为0.1
b1=tf.Variable(tf.zeros([h1_units]))#隐含层的偏置,全部赋值为0
W2=tf.Variable(tf.zeros([h1_units,10]))#输出层权重
b2=tf.Variable(tf.zeros([10]))#输出层的偏置
#定义输入x的placeholder
x=tf.placeholder(tf.float32,[None,in_units])
keep_prob=tf.placeholder(tf.float32)#dropout的比率(即保留节点的概率),也作为计算图的输入
#定义一个激活函数为ReLU的隐含层hidden1
hidden1=tf.nn.relu(tf.matmul(x,W1)+b1)
#实现dropout功能,即随机将一部分节点置为0.其中keep_prob即为保留数据而不置为0的比例
hidden1_drop=tf.nn.dropout(hidden1,keep_prob)#dropout以后的隐含层
y=tf.nn.softmax(tf.matmul(hidden1_drop,W2)+b2)
#定义损失函数
y_=tf.placeholder(tf.float32,[None,10])
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))#依然使用交叉熵
#选择自适应优化器Adagrad,把学习速率设为0.3
train_step=tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(3000):
    batch_xs,batch_ys=mnist.train.next_batch(100)#采用3000个batch,每个batch包含100条样本
    train_step.run({x:batch_xs,y_:batch_ys,keep_prob:0.75})
#对模型准确率进行评测
correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accurancy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#tf.cast将correct_prediction的数据类型转换为float32
print(accurancy.eval({x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))#因为是预测部分,所以令keep_prob=1
    
    

参考:《TensorFlow实战》

你可能感兴趣的:(TensorFlow)