应要求,贴上inference模块供大家参考。
里面的网络结构可以自己定义。
分离inference模块,有利于程序的可读性和操作性。高内聚,低耦合?
import tensorflow as tf
# 定义神经网络结构相关的参数
INPUT_NODE = 128*128
OUTPUT_NODE = 62
IMAGE_SIZE = 128
NUM_CHANNELS = 1
NUM_LABELS = OUTPUT_NODE
# 第一层卷积层的尺寸和深度
CONV1_SIZE = 5
CONV1_DEEP = 32
# 第二层卷积层的尺寸和深度
CONV2_SIZE = 5
CONV2_DEEP = 64
# 全连接层的节点个数
FC_SIZE = 512
# 定义神经网络的前向传播过程
def inference(input_tensor, train, regularizer):
# 第一层卷积层,输出28*28*32的矩阵
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
# 使用边长为5,深度为32的过滤器,过滤器移动的步长为1,且使用0填充
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
# 第二层池化层(最大池化),过滤器边长为2,步长为2,输出14*14*32的矩阵
with tf.name_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 第三层卷积层,输出14*14*64的矩阵
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable("weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
#使用边长为5,深度为64的过滤器,步长为1,使用0填充
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
# 第四层池化层(最大池化),过滤器边长为2,步长为2,输出7*7*64的矩阵
with tf.name_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# 第五层全连接层,输入7*7*64矩阵
# 先拉成一个长向量(扁平化)
# 也可以直接 reshaped = tf.reshape(pool2, [-1, 7*7*64])
pool_shape = pool2.get_shape().as_list()
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
reshaped = tf.reshape(pool2, [-1, nodes])
# 第五次全连接层,输入向量长度为7*7*64=3136,输出512长度的向量。这里将加入dropout。dropout一般使用在全连接层
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable('weight', [nodes, FC_SIZE], initializer=tf.truncated_normal_initializer(stddev=0.1))
# 只有全连接层的权重需要加入正则化
if regularizer is not None:
tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable('bias', [FC_SIZE], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train:
fc1 = tf.nn.dropout(fc1, 0.5)
# 第六层全连接层。输入512,输出10,
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable('weight', [FC_SIZE, NUM_LABELS], initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer is not None:
tf.add_to_collection('losses',regularizer(fc2_weights))
fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1, fc2_weights) + fc2_biases
return logit