ResNet
import tensorflow as tf
import collections
import time
from datetime import datetime
import math
slim = tf.contrib.slim
WARNING: Logging before flag parsing goes to stderr.
W0823 11:38:07.735526 5364 lazy_loader.py:50]
The TensorFlow contrib module will not be included in TensorFlow 2.0.
For more information, please see:
* https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
* https://github.com/tensorflow/addons
* https://github.com/tensorflow/io (for I/O related ops)
If you depend on functionality not listed there, please file an issue.
'''使用collections.namedtuple设计ResNet基本Block模块组的named tuple,并用它创建Block类'''
'''一个典型的Block
需要输入参数,分别是scope、unit_fn、args
以Block('block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)])为例,
它可以定义一个典型的Block
其中
1、block1就是这个Block的名称(或scope)
2、bottleneck是ResNet V2中的残差学习单元
3、[(256, 64, 1)] * 2 + [(256, 64, 2)]时这个Block的args,args是一个列表,
其中每一个元素都对应一个bottleneck残差学习单元,
前面两个都是(256, 64, 1),最后一个是(256, 64, 2)。
每个元素都是一个三元tuple,即(depth, depth_bottleneck, stride)
比如(256, 64, 3),代表构建的bottleneck残差学习单元(每个残差学习单元包含三个卷积层)中,
第三层卷积输出通道数为256,
前两层卷积输出通道数depth_bottleneck为64,且中间那层的步长stride为3
'''
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
'a named tuple decribing a ResNet block.'
'''降采样的方法'''
def subsample(inputs, factor, scope=None):
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
'''创建卷积层'''
def conv2d_same(inputs, num_outputs, kernel_size, stride, scope=None):
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME', scope=scope)
else:
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, padding='VALID', scope=scope)
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, outputs_collections=None):
'''堆叠Blocks的函数,net为输入,bloks为Block类的列表,
outputs_collections时用来收集各个end_points
'''
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
unit_depth, unit_depth_bottleneck, unit_stride = unit
net = block.unit_fn(net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=unit_stride)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
'''创建ResNet通用的arg_scope, arg_scope的功能是定义某些函数的参数默认值'''
def resnet_arg_scope(is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_parmas = {
'is_training':is_training,
'decay':batch_norm_decay,
'epsilon':batch_norm_epsilon,
'scale':batch_norm_scale,
'updates_collections':tf.GraphKeys.UPDATE_OPS
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer = slim.l2_regularizer(weight_decay),
weights_initializer = slim.variance_scaling_initializer(),
activation_fn = tf.nn.relu,
normalizer_fn = slim.batch_norm,
normalizer_params = batch_norm_parmas):
with slim.arg_scope([slim.batch_norm], **batch_norm_parmas):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
'''bottleneck残差学习单元'''
'''
知识点:
并不是所有的方法都能用arg_scope设置默认参数, 只有用@slim.add_arg_scope修饰过的方法才能使用arg_scope.
例如conv2d方法, 它就是被修饰过的(见源码).
所以, 要使slim.arg_scope正常运行起来, 需要两个步骤:
1、用@add_arg_scope修饰目标函数
2、用with arg_scope(...) 设置默认参数.
'''
@slim.add_arg_scope
def bottleneck(inputs,
depth, depth_bottleneck, stride,
outputs_collections=None,
scope=None):
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
'''定义shortcut,即旁路的弯曲的支线'''
if depth == depth_in:
shortcut = subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None,
activation_fn=None, scope='shortcut')
'''残差residual,三层卷积'''
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = conv2d_same(residual, depth_bottleneck, 3, stride, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None,
activation_fn=None, scope='conv3')
output = shortcut + residual
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
return net
'''生成ResNet的主函数,
只要预先定义好网络的残差学习模块组blocks,它就可以生成对应的完整的ResNet'''
def resnet_v2(inputs,
blocks,
num_classes=None,
global_pool=True,
include_root_block=True,
reuse=None,
scope=None):
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck, stack_blocks_dense], outputs_collections=end_points_collection):
net = inputs
if include_root_block:
with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
net = conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = stack_blocks_dense(net, blocks)
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
if global_pool:
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
'''设计层数为152层的ResNet
Resnet不断使用步长为2的层来缩减尺寸,同时输出通道数也在持续增加
'''
def resnet_v2_152(inputs,
num_classes=None,
global_pool=True,
reuse=None,
scope='resnet_v2_152'):
blocks = [
Block('block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
Block('block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, global_pool,
include_root_block=True, reuse=reuse, scope=scope)
'''评估ResNet_V2每轮计算所用时间'''
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i>= num_steps_burn_in:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / num_batches
vr = total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, num_batches, mn, sd))
batch_size = 8
height, width = 224, 224
num_batches = 20
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net, end_points = resnet_v2_152(inputs, 1000)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
time_tensorflow_run(sess, net, "Forward")
2019-08-23 11:38:27.401126: step 0, duration = 0.984
2019-08-23 11:38:37.404926: step 10, duration = 1.016
2019-08-23 11:38:46.296727: Forward across 20 steps, 0.994 +/- 0.012 sec / batch