TensorFlow代码实现(一)[MNIST手写数字识别]

最简单的神经网络结构:

  1. 数据源准备:数据在之前的文章中分析过了
  2. 在这里我们就构造一层神经网络:
    • 前提准备:
      • 参数:
        • train images:因为图片是28*28的个数,换算成一维数组就是784,因此我们定义x = tf.placeholder(tf.float32,[None,784])
        • train labels:因为图片最终要输出10个分类,所以我们定义为y_ = tf.placeholder(tf.float32,[None,10])
        • weight:因为我们需要将输入的784转换成输出对的10,因此我们将权重定义为W = tf.Variable(tf.zeros([784,10]))
        • biases:因为我们分类后的结果是10类,所以我们将偏差定义为b = tf.Variable(tf.zeros([10]))
        • 成本函数:我们使用交叉熵:cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tf.log(y),labels=y_))
        • 预测值:我们将预测值,也就是输出通过softmax函数进行处理后输出,因此:y = tf.nn.softmax(tf.matmul(x,W)+b)
    • 流程:
      TensorFlow代码实现(一)[MNIST手写数字识别]_第1张图片

代码实现:

import tensorflow as tf
import data.input_data as input_data
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
x = tf.placeholder(tf.float32, [None,784])
y_ = tf.placeholder(tf.float32,[None,10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W)+b)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=tf.log(y)))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
sess.run(init)
for i in range(1000):
    batch_x,batch_y = mnist.train.next_batch(100)
    sess.run(train_step,feed_dict={x:batch_x,y_:batch_y})
correct_prediction = tf.equal(tf.arg_max(y, 1),tf.arg_max(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))
print(sess.run(accuracy,feed_dict = {x:mnist.test.images,y_:mnist.test.labels}))
sess.close()

CNN

  1. 流程如下:

    • Layer1:
      input【28x28x1】->conv【5x5,(1:32)】->relu【28x28x32】->max_pool【2x2,14x14x32】->dropout【0.75】
    • Layer2:
      【14x14x32】->conv【5x5,(32:64)】->relu【14x14x64】->max_pool【2x2,7x7x64】->dropout【0.75】
    • Layer3:

    【7x7x64】->FC【1024】->relu->dropout->y = wx+b【10】

    2

  2. 代码如下:
import tensorflow as tf
import data.input_data as input_data
from numpy import outer
mnist = input_data.read_data_sets("MNIST/",one_hot=True)
#Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
#Network Parameters
n_input = 784#MNIST data input(image shape = [28,28])
n_classes = 10#MNIST total classes (0-9digits)
dropout = 0.75# probability to keep units

#tf Graph input
x = tf.placeholder(tf.float32,[None,n_input])
y = tf.placeholder(tf.float32,[None,n_classes])
keep_prob = tf.placeholder(tf.float32)#drop(keep probability)

#Create model
def conv2d(image,w,b):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(image,w,strides=[1,1,1,1],padding='SAME'),b))
def max_pooling(image,k):
    return tf.nn.max_pool(image, ksize=[1,k,k,1], strides=[1,k,k,1], padding='SAME')

weights = {
    'wc1':tf.Variable(tf.random_normal([5,5,1,32])),
    'wc2':tf.Variable(tf.random_normal([5,5,32,64])),
    'wd1':tf.Variable(tf.random_normal([7*7*64,1024])),
    'out':tf.Variable(tf.random_normal([1024,n_classes]))
}
biases = {
    'bc1':tf.Variable(tf.random_normal([32])),
    'bc2':tf.Variable(tf.random_normal([64])),
    'bd1':tf.Variable(tf.random_normal([1024])),
    'out':tf.Variable(tf.random_normal([n_classes]))
}
def conv_net(_X,_weights,_biases,_dropout):
    #Layer 1
    _X = tf.reshape(_X,[-1,28,28,1])
    conv1 = conv2d(_X,_weights['wc1'],_biases['bc1'])
    conv1 = max_pooling(conv1, k = 2)
    conv1 = tf.nn.dropout(conv1, keep_prob=_dropout)
    #Layer 2
    conv2 = conv2d(conv1,_weights['wc2'],_biases['bc2'])
    conv2 = max_pooling(conv2, k=2)
    conv2 = tf.nn.dropout(conv2,keep_prob=_dropout)
    #Fully Connected
    dense1 = tf.reshape(conv2,[-1,_weights['wd1'].get_shape().as_list()[0]])
    dense1 = tf.nn.relu(tf.add(tf.matmul(dense1,_weights['wd1']),_biases['bd1']))
    dense1 = tf.nn.dropout(dense1,_dropout)
    out = tf.add(tf.matmul(dense1,_weights['out']),_biases['out'])
    print(out)
    return out
#Construct model
pred = conv_net(x, weights, biases, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

#Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))
init = tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_sizeif step %display_step==0:
            acc = sess.run(accuracy,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.})
            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))

        step += 1
    print("Optimization Finished!")
    print("Testing Accuracy:",sess.run(accuracy,feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))

修改后的CNN

  1. 流程如下:
    • 卷积的流程:conv->max_pool->norm->dropout
    • 总流程:卷积层1->卷积层2->卷积层3->卷积层4->全连接层->全连接层->输出:softmax
    • shape的变化:[28x28x1]->[14x14x64]->[7x7x128]->[4x4x256]->[2x2x512]->[1024]->[1024]->[10]
  2. 代码如下:
import data.input_data as input_data
mnist = input_data.read_data_sets("MNIST/", one_hot=True)

import tensorflow as tf

# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20

# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.8 # Dropout, probability to keep units

# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)

# Create custom model
def conv2d(name, l_input, w, b):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input,w,strides=[1,1,1,1],padding='SAME'),b),name = name)
def max_pool(name, l_input, k):
    return tf.nn.max_pool(l_input,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME',name=name)

def norm(name, l_input, lsize=4):
    return tf.nn.lrn(l_input,lsize,bias=1.0,alpha=0.001/9.0,beta=0.75,name=name)
def customnet(_X, _weights, _biases, _dropout):
    # Reshape input picture
    _X = tf.reshape(_X,shape=[-1,28,28,1])
    # Convolution Layer
    conv1 = conv2d('conv1',_X,_weights['wc1'],_biases['bc1'])
    # Max Pooling (down-sampling)
    pool1 = max_pool('conv1',conv1,k=2)
    # Apply Normalization
    norm1 = norm('conv1',pool1,lsize=4)
    # Apply Dropout
    norm = tf.nn.dropout(norm1, _dropout)
    # Convolution Layer
    conv2 = conv2d('conv2',norm1,_weights['wc2'],_biases['bc2'])
    pool2 = max_pool('conv2', conv2, k=2)
    norm2 = norm('conv2',pool2,lsize=4)
    norm2 = tf.nn.dropout(norm2,_dropout)
    # Convolution Layer
    conv3 = conv2d('conv3',norm2,_weights['wc3'],_biases['bc3'])
    # Max Pooling (down-sampling)
    pool3 = max_pool('conv3', conv3, k=2)
    # Apply Normalization
    norm3 = norm('norm3',pool3,lsize = 4)
    # Apply Dropout
    norm3 = tf.nn.dropout(norm3, _dropout)
    #conv4
    conv4 = conv2d('conv4',norm3,_weights['wc4'],_biases['bc4'])
    # Max Pooling (down-sampling)
    pool4 = max_pool('pool4', conv4, k=2)
    # Apply Normalization
    norm4 = norm('norm4',pool4,lsize=4)
    # Apply Dropout
    norm4 = tf.nn.dropout(norm4,_dropout)
    # Fully connected layer
    dense1 = tf.reshape(norm4,[-1,_weights['wd1'].get_shape().as_list()[0]])
    dense1 = tf.nn.relu(tf.matmul(dense1,_weights['wd1'])+_biases['bd1'],name='fc1')
    dense2 = tf.nn.relu(tf.matmul(dense1,_weights['wd2'])+_biases['bd2'],name='fc2')
    # Output, class prediction
    out = tf.matmul(dense2,_weights['out'])+_biases['out']
# Store layers weight & bias
weights = {
    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 64])),
    'wc2': tf.Variable(tf.random_normal([5, 5, 64, 128])),
    'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
    'wc4': tf.Variable(tf.random_normal([2, 2, 256, 512])),
    'wd1': tf.Variable(tf.random_normal([2*2*512, 1024])), 
    'wd2': tf.Variable(tf.random_normal([1024, 1024])),
    'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
    'bc1': tf.Variable(tf.random_normal([64])),
    'bc2': tf.Variable(tf.random_normal([128])),
    'bc3': tf.Variable(tf.random_normal([256])),
    'bc4': tf.Variable(tf.random_normal([512])),
    'bd1': tf.Variable(tf.random_normal([1024])),
    'bd2': tf.Variable(tf.random_normal([1024])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

# Construct model
pred = customnet(x, weights, biases, keep_prob)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        # Fit training using batch data
        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
        step += 1
    print ("Optimization Finished!")
    # Calculate accuracy for 256 mnist test images
    print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))

DNN

DNN与CNN的本质差不多,只不过CNN加上了卷积层C以及池化层。做了卷积操作,参数共享减少了网络之间的连接参数。

DNN就是去掉C之后,使用全连接层+dropout下降+relu激活,一层一层的WX+B的网络模式。
1. 流程:
两层全连接(y = wx+b)
2. 代码实现:

'''
@author: smile
'''
import tensorflow as tf
import data.input_data as input_data
mnist = input_data.read_data_sets("MNIST/",one_hot=True)
#Paramters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

#Network parameters
n_input = 784
n_classes = 10
dropout = 0.8
#tf Graph input
x = tf.placeholder(tf.float32,[None,n_input])
y = tf.placeholder(tf.float32,[None,n_classes])
keep_prob = tf.placeholder(tf.float32)
#Create model
def conv2d(image,w,b):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(image,w,strides=[1,1,1,1],padding='SAME'),b))
def max_pool(image,k):
    return tf.nn.max_pool(image,ksize=[1,k,k,1],strides=[1,k,k,1],padding='SAME')

def dnn(_X,_weights,_biases,_dropout):
    _X = tf.nn.dropout(_X,_dropout)
    d1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(_X, _weights['wd1']),_biases['bd1']),name='d1')
    d2x = tf.nn.dropout(d1,_dropout)
    d2 = tf.nn.relu(tf.nn.bias_add(tf.matmul(d2x,_weights['wd2']),_biases['bd2']),name='d2')
    dout = tf.nn.dropout(d2,_dropout)
    out = tf.matmul(dout,weights['out'])+_biases['out']
    return out
weights = {
    'wd1':tf.Variable(tf.random_normal([784,600],stddev=0.01)),
    'wd2':tf.Variable(tf.random_normal([600,480],stddev=0.01)),
    'out':tf.Variable(tf.random_normal([480,10]))
}
biases = {
    'bd1':tf.Variable(tf.random_normal([600])),
    'bd2':tf.Variable(tf.random_normal([480])),
    'out':tf.Variable(tf.random_normal([10])),
}
#Construct model
pred = dnn(x, weights, biases, keep_prob)

#Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
#Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))

init = tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_size < training_iters:
        batch_xs,batch_ys = mnist.train.next_batch(batch_size)
        sess.run(optimizer,feed_dict={x:batch_xs,y:batch_ys,keep_prob:dropout})
        if step % display_step == 0:
            acc = sess.run(accuracy,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.})
            loss = sess.run(cost,feed_dict = {x:batch_xs,y:batch_ys,keep_prob:1.})
            print("Iter "+str(step*batch_size)+",Minibatch Loss = "+"{:.6f}".format(loss)+", Training Accuracy = "+"{:.5f}".format(acc))
        step += 1
    print("Optimization Finished!")
    print("Testing Accuarcy : ",sess.run(accuracy,feed_dict={x:mnist.test.images[:256],y:mnist.test.labels[:256]}))

ANN

  1. 解释:
    具体的解释可以参考深度学习笔记——深度学习框架TensorFlow之MLP(十四)
  2. 代码实现:
'''
@author: smile
'''
import tensorflow as tf
import data.input_data as input_data
from pyexpat import features
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
#NetWork parameters
n_hidden_1 = 256#1st layer num features
n_hidden_2 = 256#2nd layer num features
n_input = 784
n_classses = 10

x = tf.placeholder("float", [None,n_input])
y = tf.placeholder("float",[None,n_classses])

def multilayer_perceptron(_X,_weights,_biases):
    layer1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
    layer2 = tf.nn.relu(tf.add(tf.matmul(layer1,_weights['h2']),_biases['b2']))
    return tf.matmul(layer2,_weights['out'])+_biases['out']

weights = {
    'h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
    'h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
    'out':tf.Variable(tf.random_normal([n_hidden_2,n_classses]))
}
biases = {
    'b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'b2':tf.Variable(tf.random_normal([n_hidden_2])),
    'out':tf.Variable(tf.random_normal([n_classses]))
}
pred = multilayer_perceptron(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)
    #Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train._num_examples/batch_size)
        for i in range(total_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
            avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
    print("Optimization Finished!")
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

RNN

  1. 流程:

    对LSTM的解读:http://www.jianshu.com/p/9dc9f41f0b29

  2. 代码:

import data.input_data as input_data
from src.MNIST_ANN import pred, cost
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn

"""
To classify images using a rnn,we consider every image row as a sequence of pixels
becaues MNIST image shape is 28*28px,we will then handle 28 sequences of 28 steps for every sample
"""
#Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10

#Network parameters
n_input = 28
n_steps = 28
n_hidden = 128#hidden layer num
n_classes = 10

#tf Graph input
x = tf.placeholder("float",[None,n_steps,n_input])
#Tensorflow LSTM cell requires 2xn_hidden length(state&cell)
istate = tf.placeholder("float",[None,2*n_hidden])
#output
y = tf.placeholder("float",[None,n_classes])
#random initialize biases and weights
weights = {
    "hidden":tf.Variable(tf.random_normal([n_input,n_hidden])),
    "out":tf.Variable(tf.random_normal([n_hidden,n_classes]))
}
biases = {
    "hidden":tf.Variable(tf.random_normal([n_hidden])),
    "out":tf.Variable(tf.random_normal([n_classes]))
}
#RNN
def RNN(_X,_istate,_weights,_biases):
    _X = tf.transpose(_X,[1,0,2])
    _X = tf.reshape(_X,[-1,n_input])
    #input Layer to hidden Layer
    _X = tf.matmul(_X,_weights['hidden'])+_biases['hidden']
    #LSTM cell
    lstm_cell = rnn.BasicLSTMCell(n_hidden,state_is_tuple=False)
    #28 sequence need to splite 28 time
    _X = tf.split(_X,n_steps,0)
    #start to run rnn
    outputs,states = rnn.static_rnn(lstm_cell,_X,initial_state = _istate)
    return tf.matmul(outputs[-1],weights['out'])+biases['out']

pred = RNN(x,istate,weights,biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)

correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))

init = tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init)
    step = 1
    while step * batch_size < training_iters:
        batch_xs,batch_ys = mnist.train.next_batch(batch_size)
        batch_xs = batch_xs.reshape((batch_size,n_steps,n_input))
        sess.run(optimizer,feed_dict={x:batch_xs,y:batch_ys,
                                      istate:np.zeros((batch_size,2*n_hidden))})
        if step % display_step == 0:
            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys,  
                                                istate: np.zeros((batch_size, 2 * n_hidden))})  
            # Calculate batch loss  
            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys,  
                                             istate: np.zeros((batch_size, 2 * n_hidden))})  
            print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
        step += 1  
    print ("Optimization Finished!")  
    test_len = 256
    test_data = mnist.test.images[:test_len].reshape((-1,n_steps,n_input))
    test_label = mnist.test.labels[:test_len]
    print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label,  
                                                             istate: np.zeros((test_len, 2 * n_hidden))}))          

你可能感兴趣的:(深度学习)