from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf


#定义InteractiveSession

sess = tf.InteractiveSession()


#定义占位符

x = tf.placeholder("float", shape=[None, 784])

y_ = tf.placeholder("float", shape=[None, 10])


#定义向量

W = tf.Variable(tf.zeros([784,10]))

b = tf.Variable(tf.zeros([10]))


#向量需要在session中做初始化才能使用

sess.run(tf.initialize_all_variables())


#定义回归模型

y = tf.nn.softmax(tf.matmul(x,W) + b)

cross_entropy = -tf.reduce_sum(y_*tf.log(y))

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

mnist = input_data.read_data_sets('MNIST/', one_hot=True)

for i in range(1000):

  batch = mnist.train.next_batch(50)

  train_step.run(feed_dict={x: batch[0], y_: batch[1]})

accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

print accuracy.eval(feed_dict={x: mnist.test.p_w_picpaths, y_: mnist.test.labels})

0.9092


#定义两个初始化函数

def weight_variable(shape):

  initial = tf.truncated_normal(shape, stddev=0.1)

  return tf.Variable(initial)

def bias_variable(shape):

  initial = tf.constant(0.1, shape=shape)

  return tf.Variable(initial)


#卷积使用1步长(stride size),0边距(padding size)的模板

#使用2x2的模板做max pooling

def conv2d(x, W):

  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):

  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')


#卷积的权重张量形状是[5, 5, 1, 32]

W_conv1 = weight_variable([5, 5, 1, 32])

b_conv1 = bias_variable([32])


#把x变成一个4d向量,其第2、第3维对应图片的宽、高,最后一维代表图片的颜色通道数(因为是灰度图所以这里的通道数为1,如果是rgb彩×××,则为3)

#把x_p_w_picpath和权值向量进行卷积,加上偏置项,然后应用ReLU激活函数,最后进行max pooling

x_p_w_picpath = tf.reshape(x, [-1,28,28,1])

h_conv1 = tf.nn.relu(conv2d(x_p_w_picpath, W_conv1) + b_conv1)

h_pool1 = max_pool_2x2(h_conv1)


#第二层卷积,每个5x5的patch会得到64个特征

W_conv2 = weight_variable([5, 5, 32, 64])

b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

h_pool2 = max_pool_2x2(h_conv2)


#密集连接层,图片尺寸减小到7x7,我们加入一个有1024个神经元的全连接层

W_fc1 = weight_variable([7 * 7 * 64, 1024])

b_fc1 = bias_variable([1024])

ool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])

h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)


#dropout层减轻过拟合

keep_prob = tf.placeholder("float")

h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)


#输出层,softmax层

W_fc2 = weight_variable([1024, 10])

b_fc2 = bias_variable([10])


#训练和评估

y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))

train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

sess.run(tf.initialize_all_variables())

for i in range(20000):

  batch = mnist.train.next_batch(50)

  if i%100 == 0:

    train_accuracy = accuracy.eval(feed_dict={

        x:batch[0], y_: batch[1], keep_prob: 1.0})

    print "step %d, training accuracy %g"%(i, train_accuracy)

  train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})


下面的时间会相当长,当然这个和CPU的数量频率也有关系


step 0, training accuracy 0.1

step 100, training accuracy 0.8

step 200, training accuracy 0.94

step 300, training accuracy 0.88

step 400, training accuracy 0.96

step 500, training accuracy 0.88

step 600, training accuracy 0.98

step 700, training accuracy 0.92

step 800, training accuracy 0.96

step 900, training accuracy 0.94

step 1000, training accuracy 0.98

……