VGGNet是牛津大学计算机视觉组和Google DeepMind公司的研究员一起研发的深度卷积神经网络。VGGNet探索了卷积神经网络的深度与其性能之间的关系,通过反复堆叠的小型卷积核和的最大池化层,VGGNet成功地构造了16~19层深的卷积神经网络。VGGNet的错误率大幅下降,取得了ILSVRC 2014比赛分类项目的第2名和定位项目的第1名。同时,VGGNet的拓展性很强,迁移到其他图片数据上的泛化性非常好。VGG的结构非常简洁,整个网络都使用了同样大小的卷积核尺寸()和最大池化尺寸()。目前为止,VGGNet依然经常别用来提取图像特征。
VGGNet拥有5段卷积,每一段内有2~3个卷积层,同时每段尾部会连接一个最大池化层用来缩小图片的尺寸。每一段的卷积核数量相同,越往后的卷积核数量越多:64-128-256-512-512.经常出现多个完全一样的的卷积层堆叠在一起的情况。两个的卷积层串联相当于1个的卷积层,即一个像素会跟周围的像素产生关联,可以说感受野大小为。3个的卷积层串联的效果相当于1个7的卷积层,除此之外,它的参数量变小了,并且拥有比1个的卷积层更多的非线性变换(前者使用三次ReLU,后者使用一次),使得CNN对特征的学习能力更强。
VGGNet网络的训练,我们可以先训练一个层数比较小的网络,然后在训练一个层数比较多的网络,不断的增加层数迭代训练。通过VGGNet的训练,总结了一下几点:
我们实现VGGNet-16,也就是说一共16层,其中有13层卷积层,3层全连接层。我们依然是评测forward和backward的运行性能,并不进行实质的训练和预测。
# 导包
from datetime import datetime
import math
import time
import tensorflow as tf
# 创建卷积层,并把参数存入列表
def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
"""
input_op:输入的tensor
name:这一层的名字
kh:卷积核的高
kw:卷积核的宽
n_out:卷积核数量即输出通道数
dh:是步长的高
dw:是步长的宽
p:是参数列表
"""
n_in = input_op.get_shape()[-1].value # 获取输入的通道数
with tf.name_scope(name) as scope:
# 卷积核
kernel = tf.get_variable(scope + 'w', shape=[kh, kw, n_in, n_out], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())
# 卷积操作
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')
# 偏置
bias_int_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
biases = tf.Variable(bias_int_val, trainable=True, name='b')
# 卷积加偏置
z = tf.nn.bias_add(conv, biases)
# 激活
activation = tf.nn.relu(z, name=scope)
# 存参数
p += [kernel, biases]
return activation
# 创建全连接层,并把参数传入列表
def fc_op(input_op, name, n_out, p):
"""
input_out:输入的tensor
name:这一层的名字
n_out:输出的通道数
p:是参数列表
"""
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope + 'w', shape=[n_in, n_out], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) # 参数
biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name='b') # 偏置
activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
p += [kernel, biases]
return activation
# 创建池化层
def mpool_op(input_op, name, kh, kw, dh, dw):
"""
input_op:输入的tensor
name:这层的名字
kh:池化核的长度
kw:池化核的宽度
dh:步长的长度
dw:步长的宽度
"""
return tf.nn.max_pool(input_op, ksize=[1, kh, kw, 1], strides=[1, dh, dw, 1], padding='SAME', name=name)
# 搭建VGGNet-16网络
def inference_op(input_op, keep_prob):
p = []
# 第一段卷积网络
conv1_1 = conv_op(input_op, name='conv1_1', kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
conv1_2 = conv_op(conv1_1, name='conv1_2', kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
pool1 = mpool_op(conv1_2, name='pool1', kh=2, kw=2, dw=2, dh=2)
# 第二段卷积网络
conv2_1 = conv_op(pool1, name='conv2_1', kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
conv2_2 = conv_op(conv2_1, name='conv2_2', kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
pool2 = mpool_op(conv2_2, name='pool2', kh=2, kw=2, dh=2, dw=2)
# 第三段卷积网络
conv3_1 = conv_op(pool2, name='conv3_1', kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_2 = conv_op(conv3_1, name='conv3_2', kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_3 = conv_op(conv3_2, name='conv3_3', kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
pool3 = mpool_op(conv3_3, name='pool3', kh=2, kw=2, dh=2, dw=2)
# 第四段卷积网络
conv4_1 = conv_op(pool3, name='conv4_1', kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_2 = conv_op(conv4_1, name='conv4_2', kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_3 = conv_op(conv4_2, name='conv4_3', kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool4 = mpool_op(conv4_3, name='pool4', kh=2, kw=2, dh=2, dw=2)
# 第五段卷积网络
conv5_1 = conv_op(pool4, name='conv5_1', kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_2 = conv_op(conv5_1, name='conv5_2', kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_3 = conv_op(conv5_2, name='conv5_2', kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool5 = mpool_op(conv5_3, name='pool5', kh=2, kw=2, dw=2, dh=2)
# 扁平化,将pool5转化为一维向量
shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name='resh1')
# 第一个全连接层
fc6 = fc_op(resh1, name='fc6', n_out=4096, p=p)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name='fc6_drop')
# 第二个全连接层
fc7 = fc_op(fc6_drop, name='fc7', n_out=4096, p=p)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name='fc7_drop')
# 第三个全连接层,输入到softmax中,输出概率最大的类别
fc8 = fc_op(fc7_drop, name='fc8', n_out=1000, p=p)
softmax = tf.nn.softmax(fc8)
predictions = tf.argmax(softmax, 1)
return predictions, softmax, fc8, p
# 评测函数
def time_tensorflow_run(session, target, feed, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target, feed_dict=feed)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / num_batches
vr = total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, num_batches, mn, sd))
# 主函数
def run_benchmark():
with tf.Graph().as_default():
image_size = 224
images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))
# 创建keep_prob的占位符
keep_prob = tf.placeholder(tf.float32)
predictions, softmax, fc8, p = inference_op(images, keep_prob)
# 创建session
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# 执行评测
time_tensorflow_run(sess, predictions, {keep_prob: 1.0}, "Forward")
objective = tf.nn.l2_loss(fc8)
grad = tf.gradients(objective, p)
time_tensorflow_run(sess, grad, {keep_prob: 0.5}, "Forward-backward")
batch_size = 32
num_batches = 100
run_benchmark()
从结果我们可以看出,forward花费的每十步花费的时间为8.64秒,backward花费的时间为28.30秒。可以看出VGGNet的复杂度要比AlexNet复杂很多。但是在比赛中的错误率最终达到了7.3%。相比AlexNet进步很大。