Tensorflow学习:简单实现卷积神经网络(CNN)

# -*- coding: utf-8 -*-
"""
Created on Thu Aug 17 16:24:55 2017
Project: Convolution Neural Network (with two convolution layers, one full conection layer and one softmax layer)
E-mail: [email protected]
Reference: 《Tensorflow实战》P74-P83
@author: DidiLv
"""

import tensorflow as tf
import numpy as np

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)


def weight_variable(shape):
    # the shape must be an array
    intial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(intial)

def bias(shape):
    # shape must be an array
    return tf.Variable(tf.constant(0.1,shape = shape))

def conv2d(x,W):
    # W means the filter
    return tf.nn.conv2d(x, W, strides = [1,1,1,1], padding ='SAME')

def max_pool_2x2(x):
    # ksize means the kernel size
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides = [1,2,2,1], padding = 'SAME')

# wating for the data input
x = tf.placeholder(tf.float32, [None,784])
# the 
y_ = tf.placeholder(tf.float32, [None,10])
# put the mnist 1d data into the 2d form
x_image = tf.reshape(x, [-1,28,28,1])

# the kernel size is [5,5] the chanel is 1--> 32
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias([32])
# the first convolution layer with pooling
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# the kernel size is [5,5] the chanel is 32--> 64

W_conv2 = weight_variable([5,5,32,64])
b_conv2 = weight_variable([64])
# the second convolution layer with pooling
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# next layer is the full connection layer
# reshape layer
h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])

W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = weight_variable([1024])

h_fc1 = tf.nn.relu( tf.matmul(h_pool2_flat, W_fc1) + b_fc1 )  
# dropout setting
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)


# next layer is the softmax layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = weight_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

# define the loss function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))

# start traning
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

# prediction
correct_prediction = tf.equal(tf.arg_max(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# graph 
sess = tf.InteractiveSession()

tf.global_variables_initializer().run()
for i in range(200001):
    batch = mnist.train.next_batch(50)
    if i % 100 == 0:
        train_accuarcy = accuracy.eval(feed_dict = {x:batch[0], y_:batch[1], keep_prob: 1.0})
        print("step %d, training accuracy %g"  %(i, train_accuarcy))
    train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

print("test accuracy %g" %(accuracy.eval(feed_dict = {x: mnist.test.images,y_: mnist.test.labels,keep_prob:1.0})))





















你可能感兴趣的:(Tensorflow,机器学习,算法,数据挖掘)