DeepLab v3 tensorflow 源码

DeepLab v3 tensorflow 源码

标签(空格分隔): tensorflow 源码

import tensorflow as tf

from config import *
import  resnet_utils
from resnet_v1 import bottleneck, resnet_arg_scope

slim = tf.contrib.slim

@slim.add_arg_scope
def bottleneck_hdc(inputs, depth, depth_bottleneck, stride, rate=1, multi_grid=(1,2,4), outputs_collections=None, scope=None, use_bounded_activations=False):

""" 该结构是参照ResNet-v1的残差单元,修改的,姑且叫做多尺度空洞卷积残差单元吧 关键参数说明: depth: 该结构的输出通道; depth_bottleneck: 残差部分的通道数; stride: 该结构的输出相对于输入的stride rate: 实现空洞卷积,空洞卷积的采样率 multi_grid: 空洞空间池化金字塔(Atrous Spatial Pyramid Pooling) """
  """Hybrid Dilated Convolution Bottleneck. Multi_Grid = (1,2,4) See Understanding Convolution for Semantic Segmentation. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. multi_grid: multi_grid sturcture. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. use_bounded_activations: Whether or not to use bounded activations. Bounded activations better lend themselves to quantized inference. Returns: The ResNet unit's output. """
  with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
    depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)

    ##------------shortcut部分--------------------##
    if depth == depth_in:
      shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
    else:
      shortcut = slim.conv2d(
          inputs,
          depth, [1, 1],
          stride=stride,
          activation_fn=tf.nn.relu6 if use_bounded_activations else None,
          scope='shortcut')


    ##-----------------残差部分--------------##
    residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, 
      rate=rate*multi_grid[0], scope='conv1')
    residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
      rate=rate*multi_grid[1], scope='conv2')
    residual = slim.conv2d(residual, depth, [1, 1], stride=1, 
      rate=rate*multi_grid[2], activation_fn=None, scope='conv3')

    if use_bounded_activations:
      # Use clip_by_value to simulate bandpass activation.
      residual = tf.clip_by_value(residual, -6.0, 6.0)
      output = tf.nn.relu6(shortcut + residual)
    else:
      output = tf.nn.relu(shortcut + residual)

    return slim.utils.collect_named_outputs(outputs_collections,
                                            sc.name,
                                            output)

def deeplabv3(inputs, num_classes, depth=50, #定义残差基网络的深度 aspp=True, reuse=None, is_training=True):
  """DeepLabV3 Args: inputs: A tensor of size [batch, height, width, channels]. depth: The number of layers of the ResNet. aspp: Whether to use ASPP module, if True, will use 4 blocks with multi_grid=(1,2,4), if False, will use 7 blocks with multi_grid=(1,2,1). reuse: Whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. end_points: A dictionary from components of the network to the corresponding activation. """
  #是否采用ASPP结构
  if aspp:
    multi_grid = (1,2,4)
  else:
    multi_grid = (1,2,1)
  scope ='resnet{}'.format(depth)
  with tf.variable_scope(scope, [inputs], reuse=reuse) as sc:
    end_points_collection = sc.name + '_end_points'
    with slim.arg_scope(resnet_arg_scope(weight_decay=args.weight_decay, 
      batch_norm_decay=args.bn_weight_decay)):
      with slim.arg_scope([slim.conv2d, bottleneck, bottleneck_hdc],
                          outputs_collections=end_points_collection):
        with slim.arg_scope([slim.batch_norm], is_training=is_training):
          net = inputs
          net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
          net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')

#----------第一个残差块r=1,输入:b,w/4,h/4,64,输出:b,w/8,h/8,256-------#
          with tf.variable_scope('block1', [net]) as sc:
            base_depth = 64
            for i in range(2):
              with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                net = bottleneck(net, depth=base_depth * 4, 
                  depth_bottleneck=base_depth, stride=1)
            with tf.variable_scope('unit_3', values=[net]):
              net = bottleneck(net, depth=base_depth * 4, 
                depth_bottleneck=base_depth, stride=2)
            net = slim.utils.collect_named_outputs(end_points_collection, 
              sc.name, net)

#----------第二个残差块r=1,输入:b,w/8,h/8,256,输出:b,w/16,h/16,512-------#
          with tf.variable_scope('block2', [net]) as sc:
            base_depth = 128
            for i in range(3):
              with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                net = bottleneck(net, depth=base_depth * 4, 
                  depth_bottleneck=base_depth, stride=1)
            with tf.variable_scope('unit_4', values=[net]):
              net = bottleneck(net, depth=base_depth * 4, 
                depth_bottleneck=base_depth, stride=2)
            net = slim.utils.collect_named_outputs(end_points_collection, 
              sc.name, net)
#----------第三个残差块r=1,输入:b,w/16,h/16,512,输出:b,w/16,h/16,1024-------#
          with tf.variable_scope('block3', [net]) as sc:
            base_depth = 256
            #根据定义的depth确定该残差单元的个数
            num_units = 6
            if depth == 101:
              num_units = 23
            elif depth == 152:
              num_units = 36

            for i in range(num_units):
              with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                net = bottleneck(net, depth=base_depth * 4, 
                  depth_bottleneck=base_depth, stride=1)
            net = slim.utils.collect_named_outputs(end_points_collection, 
              sc.name, net)
#----------第四个残差块r=2,输入:b,w/16,h/16,1024,输出:b,w/16,h/16,2048-------#
          with tf.variable_scope('block4', [net]) as sc:
            base_depth = 512

            for i in range(3):
              with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                net = bottleneck_hdc(net, depth=base_depth * 4, 
                  depth_bottleneck=base_depth, stride=1, rate=2, 
                  multi_grid=multi_grid)
            net = slim.utils.collect_named_outputs(end_points_collection, 
              sc.name, net)
#----------采用ASPP,rate=[0,6,12,18,24],输入:b,w/16,h/16,2048,输出:b,w/16,h/16,256-------#
          if aspp:
            with tf.variable_scope('aspp', [net]) as sc:
              aspp_list = []
              branch_1 = slim.conv2d(net, 256, [1,1], stride=1, 
                scope='1x1conv')
              branch_1 = slim.utils.collect_named_outputs(
                end_points_collection, sc.name, branch_1)
              aspp_list.append(branch_1)

              for i in range(3):
                branch_2 = slim.conv2d(net, 256, [3,3], stride=1, rate=6*(i+1), scope='rate{}'.format(6*(i+1)))
                branch_2 = slim.utils.collect_named_outputs(end_points_collection, sc.name, branch_2)
                aspp_list.append(branch_2)

              aspp = tf.add_n(aspp_list)
              aspp = slim.utils.collect_named_outputs(end_points_collection, sc.name, aspp)
              net = aspp
#请看论文ParseNet
            with tf.variable_scope('img_pool', [net]) as sc:
              """Image Pooling See ParseNet: Looking Wider to See Better """
              pooled = tf.reduce_mean(net, [1, 2], name='avg_pool', 
                keep_dims=True) #b,1,1,256
              pooled = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, pooled)

              pooled = slim.conv2d(pooled, 256, [1,1], stride=1, scope='1x1conv')
              pooled = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, pooled)# b,1,1,256

              pooled = tf.image.resize_bilinear(pooled, tf.shape(net)[1:3])
              pooled = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, pooled)#b,w/16,h/16,256

#主网络与ParseNet融合
            with tf.variable_scope('fusion', [aspp, pooled]) as sc:
              net = tf.concat([aspp, pooled], 3)
              net = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, net)

              net = slim.conv2d(net, 256, [1,1], stride=1, scope='1x1conv')
              net = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, net)
          else:
          #不采用ASPP 比较简单和前面差不多
            with tf.variable_scope('block5', [net]) as sc:
              base_depth = 512

              for i in range(3):
                with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                  net = bottleneck_hdc(net, depth=base_depth * 4, 
                    depth_bottleneck=base_depth, stride=1, rate=4)
              net = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, net)

            with tf.variable_scope('block6', [net]) as sc:
              base_depth = 512

              for i in range(3):
                with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                  net = bottleneck_hdc(net, depth=base_depth * 4, 
                    depth_bottleneck=base_depth, stride=1, rate=8)
              net = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, net)

            with tf.variable_scope('block7', [net]) as sc:
              base_depth = 512

              for i in range(3):
                with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
                  net = bottleneck_hdc(net, depth=base_depth * 4, 
                    depth_bottleneck=base_depth, stride=1, rate=16)
              net = slim.utils.collect_named_outputs(end_points_collection, 
                sc.name, net)
        #进行图像像素点分类,分割
          net = slim.conv2d(net, num_classes, [1,1], stride=1, 
            activation_fn=None, normalizer_fn=None, scope='logits')
          net = slim.utils.collect_named_outputs(end_points_collection, 
            sc.name, net)

          end_points = slim.utils.convert_collection_to_dict(
              end_points_collection)

          return net, end_points

if __name__ == "__main__":
  x = tf.placeholder(tf.float32, [None, 512, 512, 3])

  net, end_points = deeplabv3(x, 21)
  for i in end_points:
    print(i, end_points[i])

你可能感兴趣的:(深度学习与计算机视觉,tensorflow,源码,图像分割)