TensorFlow实战——day5(CNN)

以下神经网络是基于LeNet-5的结构,处理mnist数据集识别分类


输入:m*28*28*3

卷积核1:5*5*3*32,步长1,padding为0填充

池化核1:2*2

卷积核2:5*5*32*64,步长1,padding为0填充

池化核2:2*2

池化2后得到m*7*7*64大小张量,拉直成m*3136的矩阵

接下来是两个全连接层,分别是512个结点,10个结点

import tensorflow as tf
# 神经网络参数
# 输入层大小(28*28*1),mnist数据集
INPUT_NODE=784
OUTPUT_NODE=10
IMAGE_SIZE=28
NUM_CHANNELS=1
NUM_LABELS=10
# 卷积层尺寸和深度
CONV1_SIZE=5
CONV1_DEEP=32
CONV2_SIZE=5
CONV2_DEEP=64
# 全连接层节点个数
FC_SIZE=512


# 向前传播算法,train参数区分是用于训练还是测试
def FP(input_tensor,train,regularizer):
    # 使用变量命名空间
    # 第一层卷积层(输入为m*28*28*1,输出为m*28*28*32)
    with tf.variable_scope("layer1-conv1"):
        # 第一层卷积层的卷积核
        conv1_weights=tf.get_variable("weight",shape=[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP]
                                      ,initializer=tf.truncated_normal_initializer(stddev=0.1))
        # 第一层卷积层的偏置
        conv1_biases=tf.get_variable("bias",shape=[CONV1_DEEP],initializer=tf.constant_initializer(0.1))

        conv1=tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding="SAME")
        relu1=tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))

    # 第二层池化层(输入为m*28*28*32,输出为m*14*14*32)
    with tf.variable_scope("layer2-pool1"):
        pool1=tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="VALID")

    # 第三层卷积层(输入为m*14*14*32,输出为m*14*14*64)
    with tf.variable_scope("layer3-conv2"):
        # 第三层卷积层的卷积核
        conv2_weights = tf.get_variable("weight", shape=[CONV2_SIZE, CONV2_SIZE, CONV1_DEEP,CONV2_DEEP]
                                        , initializer=tf.truncated_normal_initializer(stddev=0.1))
        # 第三层卷积层的偏置
        conv2_biases = tf.get_variable("bias", shape=[CONV2_DEEP], initializer=tf.constant_initializer(0.1))

        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding="SAME")
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    # 第四层池化层(输入为m*14*14*64,输出为m*7*7*64)
    with tf.variable_scope("layer4-pool2"):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")

    # 第五层是全连接层,需要得输入格式是向量,所以要将每个7*7*64张量拉成一个向量3136维的向量

    # 得到池化层2输出后的shape,列表格式
    # pool2_shape[0]就是batch的大小,pool2_shape[1]~pool2_shape[3]分别是的7,7,64
    pool2_shape=pool2.get_shape().as_list()
    nodes= pool2_shape[1]* pool2_shape[2]* pool2_shape[3]
    # reshape池化层输出4维张量为2维矩阵
    reshaped=tf.reshape(pool2,[pool2_shape[0],nodes])

    # 第五层全连接层
    with tf.variable_scope("layer5-fc1"):
        fc1_weights=tf.get_variable("weight",shape=[nodes,FC_SIZE],initializer=tf.truncated_normal_initializer(stddev=0.1))
        fc1_biases=tf.get_variable("bias",shape=[FC_SIZE],initializer=tf.constant_initializer(0.1))
        fc1=tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biases)

        # 训练的时候采用dropout
        if train:
            fc1=tf.nn.dropout(fc1,keep_prob=0.5)
        # 只有全连接的权值需要加入正则化
        if regularizer!=None:
            tf.add_to_collection("losses",regularizer(fc1_weights))

    # 第六层全连接层
    with tf.variable_scope("layer6-fc2"):
        fc2_weights = tf.get_variable("weight", shape=[FC_SIZE,NUM_LABELS],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        fc2_biases = tf.get_variable("bias", shape=[NUM_LABELS], initializer=tf.constant_initializer(0.1))
        # 最后一层输出不要加激活函数,softmax中会处理
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases

        # 只有全连接的权值需要加入正则化
        if regularizer != None:
            tf.add_to_collection("losses", regularizer(fc2_weights))
    # 返回最后一层输出
    return logit

然后就可以欢快的训练,测试了。

你可能感兴趣的:(神经网络,ŤTensorFlow)