tf 实现 LR

# 参数初始化
x = tf.placeholder('float', [None, 784]) #None表示数量未知或者无限
y = tf.placeholder('float', [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# 逻辑回归模型
actv = tf.nn.softmax(tf.matmul(x, W) + b)

# 代价函数
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(actv), reduction_indices=1))

# 优化器
learning_rate = 0.01
optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

#预测
pred = tf.equal(tf.argmax(actv, 1), tf.argmax(y, 1))

#精度
accr = tf.reduce_mean(tf.cast(pred, 'float'))

init = tf.global_variables_initializer()

training_epochs = 200
batch_size      = 100
display_step    = 10

sess = tf.Session()
sess.run(init)

# MINI-BATCH LEARNING
for epoch in range(training_epochs):
    avg_cost = 0.
    num_batch = int(mnist.train.num_examples / batch_size)
    for i in range(num_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        sess.run(optm, feed_dict = {x: batch_xs, y:batch_ys})
        feeds = {x: batch_xs, y: batch_ys}
        avg_cost += sess.run(cost, feed_dict= feeds)/num_batch
    if epoch % display_step == 0:
        feeds_train = {x: batch_xs, y: batch_ys}
        feeds_test = {x: mnist.test.images, y: mnist.test.labels}
        train_acc = sess.run(accr, feed_dict=feeds_train)
        test_acc = sess.run(accr, feed_dict=feeds_test)
        print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f test_acc: %.3f" 
               % (epoch, training_epochs, avg_cost, train_acc, test_acc))
print ("DONE")

你可能感兴趣的:(tf 实现 LR)