卷积神经网络调库实现手写数字识别

import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data

随机种子

tf.set_random_seed(777)

加载数据

mnist = input_data.read_data_sets(‘MNIST_data’, one_hot=True)

training_epoch = 15
batch_size = 100

dropout = tf.placeholder(tf.float32)

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])

图片维度

x_img = tf.reshape(x,[-1,28,28,1])

调库根据一个图片维度x_img, 卷积核个数32,进行(3,3)的一个卷积,步长strides=[2,2], 与之前原始图片维度变成一样padding=‘same’, 调用激活函数(可以使用别的)activation=tf.nn.relu, 定义名字(随便定义的)name=‘con_1’

con_1 = tf.layers.conv2d(x_img,32,(3,3),strides=[2,2],padding=‘same’,activation=tf.nn.relu,name=‘con_1’)

池化层 对卷积进行con_1,进行[2,2]的池化,步长为strides=[2,2],与上面一样padding=‘same’

pool_1 = tf.layers.max_pooling2d(con_1,[2,2],strides=[2,2],padding=‘same’)

pool_1 = tf.nn.dropout(pool_1,keep_prob=dropout)

con_2 = tf.layers.conv2d(pool_1,64,(3,3),strides=[2,2],padding=‘same’,
activation=tf.nn.relu,)
pool_2 = tf.layers.max_pooling2d(con_2,[2,2],strides=[2,2]
,padding=‘same’)
pool_2 = tf.nn.dropout(pool_2,keep_prob=dropout)

con_3 = tf.layers.conv2d(pool_2,128,(3,3),strides=[2,2],padding=‘same’,
activation=tf.nn.relu)
pool_3 = tf.layers.max_pooling2d(con_3,[2,2],strides=[2,2]
,padding=‘same’)
pool_3 = tf.nn.dropout(pool_3,keep_prob=dropout)

con_4 = tf.layers.conv2d(pool_3,256,(3,3),strides=[2,2],padding=‘same’,

activation=tf.nn.relu)

pool_4 = tf.layers.max_pooling2d(con_4,[2,2],strides=[2,2]

,padding=‘same’)

pool_4 = tf.nn.dropout(pool_4,keep_prob=True)

con_5 = tf.layers.conv2d(pool_4,32,(3,3),strides=[2,2],padding=‘same’,

activation=tf.nn.relu)

pool_5 = tf.layers.max_pooling2d(con_5,[2,2],strides=[2,2]

,padding=‘same’)

pool_5 = tf.nn.dropout(pool_5,keep_prob=True)

flatten = tf.layers.flatten(pool_3)

下面几乎去拿时调库可以自行查阅各种库函数

a1 = tf.layers.dense(flatten,625,activation=tf.nn.relu)
a1 = tf.nn.dropout(a1,keep_prob=dropout)
a2 = tf.layers.dense(a1,10,activation=tf.nn.softmax)

cost = -tf.reduce_mean(tf.reduce_sum(y*tf.log(a2), axis=1))

accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(a2, 1), tf.argmax(y, 1)), tf.float32))

sess = tf.Session()

optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
sess.run(tf.global_variables_initializer())
for i in range(15):
avg_cost = 0
m = int(mnist.train.num_examples/batch_size)
for j in range(m):
xdata, ydata = mnist.train.next_batch(batch_size)
cost_var, _ = sess.run([cost, optimizer], feed_dict={x:xdata, y:ydata,dropout:0.7})
avg_cost += cost_var/m
print(i+1,’----’,+avg_cost)

print(sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels,dropout:1}))

你可能感兴趣的:(卷积神经网络)