神经网络:代码实现

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('data/', one_hot=True)
# 神经网络的框架(输入层,隐层(两层),输出层)
n_hidden_1 = 256 #隐层第一层神经元的个数
n_hidden_2 = 128 #隐层第二层神经元的个数
n_input    = 784 #样本特征(像素点)
n_classes  = 10  #分类的类别

# 输入和输出 
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
    
# 神经网络的参数(w,b)
std = 0.1#标准方差
weights = {
    'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=std)),
    'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=std)),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=std))
}#w1大小为784*256矩阵;w2大小为256*128矩阵;高斯初始化
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}#b1大小256;b2大小128;out大小10



#前向传播(输入X,权重参数W,偏置项b)
def multilayer_forward(_X, _weights, _biases):
    layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) #计算W*X+b之后在进行Sigmoid激活
    layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2']))
    return (tf.matmul(layer_2, _weights['out']) + _biases['out'])#输出层,无激活函数


#一次前向传播的结果(预测值)
pred = multilayer_forward(x, weights, biases)

# 损失函数(cost)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(y,pred)) #交叉熵函数(预测值,标签(真实值))

#梯度下降,进行优化求解
optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost) 

#计算精度(准确率)
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))    
accr = tf.reduce_mean(tf.cast(corr, "float"))



# 初始化
init = tf.global_variables_initializer()
training_epochs = 30
batch_size      = 46
display_step    = 5
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# 优化
for epoch in range(training_epochs):
    avg_cost = 0.0
    total_batch = int(mnist.train.num_examples/batch_size)
    # 迭代
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        feeds = {x: batch_xs, y: batch_ys}#填充对应的值
        sess.run(optm, feed_dict=feeds)#不断优化求解
        avg_cost += sess.run(cost, feed_dict=feeds)
    avg_cost = avg_cost / total_batch
    # 显示
    if epoch % display_step == 0:
        print ("Epoch: %02d/%02d cost: %.6f" % (epoch, training_epochs, avg_cost))
        feeds = {x: batch_xs, y: batch_ys}
        train_acc = sess.run(accr, feed_dict=feeds)#训练集的精度
        print ("TRAIN ACCURACY: %.3f" % (train_acc))
        feeds = {x: mnist.test.images, y: mnist.test.labels}
        test_acc = sess.run(accr, feed_dict=feeds)#测试集的精度
        print ("TEST ACCURACY: %.3f" % (test_acc))
print("Done")

运行结果:
神经网络:代码实现_第1张图片
可以看出,损失值在不断减少,训练集和测试集的精度也在逐步改善。

你可能感兴趣的:(TensorFlow框架学习)