TensorFlow教程(1):MNIST数据的单层逻辑回归代码

单层回归代码

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

# 初始化变量
X = tf.placeholder(tf.float32, [None, 784], name='image')
Y = tf.placeholder(tf.float32, [None, 10], name='label')

w = tf.get_variable(name="weights", shape=(784, 10), initializer=tf.random_normal_initializer())
b = tf.get_variable(name="bias", shape=(1, 10), initializer=tf.zeros_initializer())

# 定义计算
logits = tf.matmul(X, w) + b

# 定义损失函数
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name="loss")
loss = tf.reduce_mean(entropy)

# 定义optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

# 计算机准确率
preds = tf.nn.softmax(logits)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_preds, tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(10000):
        batch = mnist.train.next_batch(50)
        _, batch_loss = sess.run([optimizer, loss], feed_dict={X: batch[0], Y: batch[1]})
        print("epochs:{0}:loss:{1}".format(i, batch_loss))
    test_accuracy = sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels})
    print(test_accuracy)

输出结果

TensorFlow教程(1):MNIST数据的单层逻辑回归代码_第1张图片

你可能感兴趣的:(TensorFlow教程(1):MNIST数据的单层逻辑回归代码)