unet详解_Unet 论文解读 代码解读

论文解读

network

Architecture:

a. U-net建立在FCN的网络架构上,作者修改并扩大了这个网络框架,使其能够使用很少的训练图像就得到很 精确的分割结果。

b.添加上采样阶段,并且添加了很多的特征通道,允许更多的原图像纹理的信息在高分辨率的layers中进行传播。

c. U-net没有FC层,且全程使用valid来进行卷积,这样的话可以保证分割的结果都是基于没有缺失的上下文特征得到的,因此输入输出的图像尺寸不太一样(但是在keras上代码做的都是same convolution),对于图像很大的输入,可以使用overlap-strategy来进行无缝的图像输出。

tile strategy

d.为了预测输入图像的边缘部分,通过镜像输入图像来外推丢失的上下文(不懂),实则输入大图像也是可以的,但是这个策略基于GPU内存不够的情况下所提出的。

e.细胞分割的另外一个难点在于将相同类别且互相接触的细胞分开,因此作者提出了weighted loss,也就是赋予相互接触的两个细胞之间的background标签更高的权重。

weighted loss

代码解读

layers

初始化weights 和 bias

def weight_variable(shape, stddev=0.1, name="weight"):

initial = tf.truncated_normal(shape, stddev=stddev)

return tf.Variable(initial, name=name)

def weight_variable_devonc(shape, stddev=0.1, name="weight_devonc"):

return tf.Variable(tf.truncated_normal(shape, stddev=stddev), name=name)

def bias_variable(shape, name="bias"):

initial = tf.constant(0.1, shape=shape)

return tf.Variable(initial, name=name)

创建卷积层和池化层

这里的padding使用的是VALID,和论文里面所指出的是一样的。deconv2d是反卷积,也就是upsampling,以第一个upsample为例,输如的x的shape为[None,28,28,1024],则输出的shape为[None,52,52,512]。反卷积的计算细节参考https://blog.csdn.net/nijiayan123/article/details/79416764。

def conv2d(x, W, b, keep_prob_):

with tf.name_scope("conv2d"):

conv_2d = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')

conv_2d_b = tf.nn.bias_add(conv_2d, b)

return tf.nn.dropout(conv_2d_b, keep_prob_)

def deconv2d(x, W,stride):

with tf.name_scope("deconv2d"):

x_shape = tf.shape(x)

output_shape = tf.stack([x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]//2])

return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='VALID', name="conv2d_transpose")

def max_pool(x,n):

return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID')

连接前面部分的池化层和后面的反卷积层

def crop_and_concat(x1,x2):

with tf.name_scope("crop_and_concat"):

x1_shape = tf.shape(x1)

x2_shape = tf.shape(x2)

# offsets for the top left corner of the crop

offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]

size = [-1, x2_shape[1], x2_shape[2], -1]

x1_crop = tf.slice(x1, offsets, size)

return tf.concat([x1_crop, x2], 3)

计算pixel-wise softmax和cross entropy

注意到这里一个pixel相当于一个预测目标,在通常的分类任务中,最后输出结果通常都是一个一维向量[1,class_nums],然后取softmax运算后得分最高的class标签。在这里,最后输出结果是一个三维向量[width,height,class_nums],每一个pixel都要单独进行标签的预测,故叫pixel-wise softmax。

def pixel_wise_softmax(output_map):

with tf.name_scope("pixel_wise_softmax"):

max_axis = tf.reduce_max(output_map, axis=3, keepdims=True)

exponential_map = tf.exp(output_map - max_axis)

normalize = tf.reduce_sum(exponential_map, axis=3, keepdims=True)

return exponential_map / normalize

def cross_entropy(y_,output_map):

return -tf.reduce_mean(y_*tf.log(tf.clip_by_value(output_map,1e-10,1.0)), name="cross_entropy")

unet

网络分为四个主要部分:preprocessing、down convolution、up convolution、Output Map

preprocessing

def create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2,

summaries=True):

"""

Creates a new convolutional unet for the given parametrization.

:param x: input tensor, shape [?,nx,ny,channels]

:param keep_prob: dropout probability tensor

:param channels: number of channels in the input image

:param n_class: number of output labels

:param layers: number of layers in the net

:param features_root: number of features in the first layer

:param filter_size: size of the convolution filter

:param pool_size: size of the max pooling operation

:param summaries: Flag if summaries should be created

"""

logging.info(

"Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}".format(

layers=layers,

features=features_root,

filter_size=filter_size,

pool_size=pool_size))

# Placeholder for the input image

with tf.name_scope("preprocessing"):

nx = tf.shape(x)[1]

ny = tf.shape(x)[2]

x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))

in_node = x_image

batch_size = tf.shape(x_image)[0]

weights = []

biases = []

convs = []

pools = OrderedDict()

deconv = OrderedDict()

dw_h_convs = OrderedDict()

up_h_convs = OrderedDict()

in_size = 1000

size = in_size

down convolution

layers=3,有三次下卷积层,一个下卷积层实际包括两次下卷积和一次pooling。

# down layers

for layer in range(0, layers):

with tf.name_scope("down_conv_{}".format(str(layer))):

features = 2 ** layer * features_root

stddev = np.sqrt(2 / (filter_size ** 2 * features))

if layer == 0:

w1 = weight_variable([filter_size, filter_size, channels, features], stddev, name="w1")

else:

w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev, name="w1")

w2 = weight_variable([filter_size, filter_size, features, features], stddev, name="w2")

b1 = bias_variable([features], name="b1")

b2 = bias_variable([features], name="b2")

conv1 = conv2d(in_node, w1, b1, keep_prob)

tmp_h_conv = tf.nn.relu(conv1)

conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)

dw_h_convs[layer] = tf.nn.relu(conv2)

weights.append((w1, w2))

biases.append((b1, b2))

convs.append((conv1, conv2))

size -= 4

if layer < layers - 1:

pools[layer] = max_pool(dw_h_convs[layer], pool_size)

in_node = pools[layer]

size /= 2

in_node = dw_h_convs[layers - 1]

up convolution

layers=3,有三次反卷积层,一个反卷积层实际包括一个反卷积,一个连接操作和两次下卷积。

# up layers

for layer in range(layers - 2, -1, -1):

with tf.name_scope("up_conv_{}".format(str(layer))):

features = 2 ** (layer + 1) * features_root

stddev = np.sqrt(2 / (filter_size ** 2 * features))

wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev, name="wd")

bd = bias_variable([features // 2], name="bd")

h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)

h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)

deconv[layer] = h_deconv_concat

w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev, name="w1")

w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev, name="w2")

b1 = bias_variable([features // 2], name="b1")

b2 = bias_variable([features // 2], name="b2")

conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)

h_conv = tf.nn.relu(conv1)

conv2 = conv2d(h_conv, w2, b2, keep_prob)

in_node = tf.nn.relu(conv2)

up_h_convs[layer] = in_node

weights.append((w1, w2))

biases.append((b1, b2))

convs.append((conv1, conv2))

size *= 2

size -= 4

Output Map

# Output Map

with tf.name_scope("output_map"):

weight = weight_variable([1, 1, features_root, n_class], stddev)

bias = bias_variable([n_class], name="bias")

conv = conv2d(in_node, weight, bias, tf.constant(1.0))

output_map = tf.nn.relu(conv)

up_h_convs["out"] = output_map

if summaries:

with tf.name_scope("summaries"):

for i, (c1, c2) in enumerate(convs):

tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))

tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

for k in pools.keys():

tf.summary.image('summary_pool_%02d' % k, get_image_summary(pools[k]))

for k in deconv.keys():

tf.summary.image('summary_deconv_concat_%02d' % k, get_image_summary(deconv[k]))

for k in dw_h_convs.keys():

tf.summary.histogram("dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

for k in up_h_convs.keys():

tf.summary.histogram("up_convolution_%s" % k + '/activations', up_h_convs[k])

variables = []

for w1, w2 in weights:

variables.append(w1)

variables.append(w2)

for b1, b2 in biases:

variables.append(b1)

variables.append(b2)

return output_map, variables, int(in_size - size)

你可能感兴趣的:(unet详解)