Tensorflow slim resnet v2源码阅读笔记

主要涉及:
https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py
https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_utils.py

首先建议阅读Tensorflow和slim的官方文档,获取arg_scope,variable_scope,outputs_collections等的基础知识。

本文主要是记录一下代码的大概逻辑,备忘。

resnet_utils.py

''' 需了解collections.namedtuple,相当于一个命名了的元组,且可以通过属性的名字访问属性值。 '''
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):

''' 简单,用最大池化来降采样(其实是通过stride) '''
def subsample(inputs, factor, scope=None):

''' 1. net = conv2d_same(inputs, num_outputs, 3, stride=stride) 2. net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') net = subsample(net, factor=stride) 3. net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') 1和2是等价的,当inputs的高或宽是偶数时3与1,2不一样。 "VALID" only ever drops the right-most columns (or bottom-most rows). 默认的"SAME" tries to pad evenly left and right, but if the amount of columns to be added is odd, it will add the extra column to the right, as is the case in this example (the same logic applies vertically: there may be an extra row of zeros at the bottom). 在testConv2DSameEven中可以清楚看出来由于对偶数大小的边进行pad会造成最终结果的不同,细节不写了。 所以在这个函数中先手动进行padding,然后再调用conv2d。 '''
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):

''' 比较核心的函数,作用是将不同的block(一般一共四个)组装起来,当然肚子里也将同一个block的bottleneck组装起来。block.unit_fn调用的就是该block的bottleneck,原因见 class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args']))。 '''
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None, outputs_collections=None):

''' 为resnet定义了arg_scope,方便定义各种层的时候少写代码。 '''
def resnet_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True, activation_fn=tf.nn.relu, use_batch_norm=True):
resnet_v2.py

''' Bottleneck residual unit variant with BN before convolutions. 就是三个连续的卷积层(BN和relu都被arg_scope定义了),另外当通道数变化时会有一个额外的shortcut卷积层,利用1*1卷积修正通道数,使得shortcut过来的输入可以直接和三个卷积分支的输出相加。 '''
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
               outputs_collections=None, scope=None)

'''构建整个resnet的函数。 看懂这个就知道end_points为什么可以获取各层输出了: with slim.arg_scope([slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense], outputs_collections=end_points_collection) 这个函数调用了resnet_utils.stack_blocks_dense,上面介绍过,用来组装四个大block。 '''
def resnet_v2(inputs,
              blocks,
              num_classes=None,
              is_training=True,
              global_pool=True,
              output_stride=None,
              include_root_block=True,
              spatial_squeeze=True,
              reuse=None,
              scope=None)

'''这个函数就是封装了创建class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args']))对象的代码。 '''
def resnet_v2_block(scope, base_depth, num_units, stride)

'''这个函数全抄过来了,是对外的接口,主要调用了resnet_v2_block和resnet_v2这两个函数。 '''
def resnet_v2_152(inputs,
                  num_classes=None,
                  is_training=True,
                  global_pool=True,
                  output_stride=None,
                  spatial_squeeze=True,
                  reuse=None,
                  scope='resnet_v2_152'):
  blocks = [
      resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
      resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
      resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
      resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
  ] return resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) resnet_v2_152.default_image_size = resnet_v2.default_image_size

你可能感兴趣的:(TensorFlow)