import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777)
mnist = input_data.read_data_sets(‘MNIST_data’, one_hot=True)
training_epoch = 15
batch_size = 100
dropout = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
x_img = tf.reshape(x,[-1,28,28,1])
con_1 = tf.layers.conv2d(x_img,32,(3,3),strides=[2,2],padding=‘same’,activation=tf.nn.relu,name=‘con_1’)
pool_1 = tf.layers.max_pooling2d(con_1,[2,2],strides=[2,2],padding=‘same’)
pool_1 = tf.nn.dropout(pool_1,keep_prob=dropout)
con_2 = tf.layers.conv2d(pool_1,64,(3,3),strides=[2,2],padding=‘same’,
activation=tf.nn.relu,)
pool_2 = tf.layers.max_pooling2d(con_2,[2,2],strides=[2,2]
,padding=‘same’)
pool_2 = tf.nn.dropout(pool_2,keep_prob=dropout)
con_3 = tf.layers.conv2d(pool_2,128,(3,3),strides=[2,2],padding=‘same’,
activation=tf.nn.relu)
pool_3 = tf.layers.max_pooling2d(con_3,[2,2],strides=[2,2]
,padding=‘same’)
pool_3 = tf.nn.dropout(pool_3,keep_prob=dropout)
flatten = tf.layers.flatten(pool_3)
a1 = tf.layers.dense(flatten,625,activation=tf.nn.relu)
a1 = tf.nn.dropout(a1,keep_prob=dropout)
a2 = tf.layers.dense(a1,10,activation=tf.nn.softmax)
cost = -tf.reduce_mean(tf.reduce_sum(y*tf.log(a2), axis=1))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(a2, 1), tf.argmax(y, 1)), tf.float32))
sess = tf.Session()
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
sess.run(tf.global_variables_initializer())
for i in range(15):
avg_cost = 0
m = int(mnist.train.num_examples/batch_size)
for j in range(m):
xdata, ydata = mnist.train.next_batch(batch_size)
cost_var, _ = sess.run([cost, optimizer], feed_dict={x:xdata, y:ydata,dropout:0.7})
avg_cost += cost_var/m
print(i+1,’----’,+avg_cost)
print(sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels,dropout:1}))