卷积神经网络1完全解析

#coding:utf-8

import tensorflow as tf

IMAGE_SIZE = 28#图片分辨率28*28

NUM_CHANNELS = 1#图片通道数

CONV1_SIZE = 5#第一层卷积核大小5

CONV1_KERNEL_NUM = 32#第一层卷积核个数

CONV2_SIZE = 5#第二层卷积核大小5

CONV2_KERNEL_NUM = 64#第二层卷积核个数

FC_SIZE = 512#第一层为512个神经元

OUTPUT_NODE = 10#10分类输出


def get_weight(shape, regularizer):#生成权重

w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))#shape生成张量的维度,mean是均值,stddev是标准差

if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))

return w  #tf.add_to_collection:把变量放入一个集合,把很多变量变成一个列表


def get_bias(shape): #生成偏执b

b = tf.Variable(tf.zeros(shape))#tf.zeros([3, 4], tf.int32)即 [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] 

return b

          #tf.nn.conv2d(输入描述x,卷积核描述w,步长,填充模式)

def conv2d(x,w): 

return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')

          #tf.nn.conv2d([100,28,28,1],[5,5,1,6],[1, 1, 1, 1],SAME全零填充)

def max_pool_2x2(x):#tf.nn.max_pool(输入描述,池化核描述,滑动步长,全零填充)

return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


def forward(x, train, regularizer):#前向传播过程

    conv1_w = get_weight([5, 5, 1, 32], regularizer) #第一层权重5*5*32

    conv1_b = get_bias([32]) #生成偏执

    conv1 = conv2d(x, conv1_w) #计算卷积

    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b))

    pool1 = max_pool_2x2(relu1) #最大池化


    conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM],regularizer)

    conv2_b = get_bias([CONV2_KERNEL_NUM])

    conv2 = conv2d(pool1, conv2_w)#计算卷积

    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))#ReLu加偏执b

    pool2 = max_pool_2x2(relu2)#最大池化


    pool_shape = pool2.get_shape().as_list() #获取张量大小存入列表

    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3] #矩阵的长*宽*深度的乘积

    reshaped = tf.reshape(pool2, [pool_shape[0], nodes]) #将pool2转换成一个


    fc1_w = get_weight([nodes, FC_SIZE], regularizer) #初始化全连接权重并正则化

    fc1_b = get_bias([FC_SIZE]) #初始化偏执

    fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b) #乘法运算后加偏执,后Relu激活

    if train: fc1 = tf.nn.dropout(fc1, 0.5)#避免过拟合使用dropout函数


    fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)#初始化全连接权重并正则化

    fc2_b = get_bias([OUTPUT_NODE])#初始化偏执

    y = tf.matmul(fc1, fc2_w) + fc2_b#乘法运算后加偏执

    return y #返回y

你可能感兴趣的:(卷积神经网络1完全解析)