论文地址:http://www.arxiv.org/pdf/1505.04597.pdf
网络
架构:
a。U-net建立在FCN的网络架构上,作者修改并扩大了这个网络框架,使其能够使用很少的训练图像就得到很精确的分割结果
.b。添加上采样阶段,并且添加了很多的特征通道,允许更多的原图像纹理的信息在高分辨率的层中进行传播
。U型网没有FC层,且全程使用有效的来进行卷积,这样的话可以保证分割的结果都是基于没有缺失的上下文特征得到的,因此输入输出的图像尺寸不太一样(但是在keras上代码做的都是相同的卷积),对于图像很大的输入,可以使用overlap-strategy来进行无缝的图像输出。
平铺策略
d。为了预测输入图像的边缘部分,通过镜像输入图像来外推丢失的上下文(不懂),实则输入大图像也是可以的,但是这个策略基于GPU内存不够的情况下所提出的
.e。细胞分割的另外一个难点在于将相同类别和互相接触的细胞分开,因此作者提出了加权损失,也就是赋予相互接触的两个细胞之间的背景标签更高的权重。
加权损失
代码来源:https://github.com/jakeret/tf_unet
初始化weights 和 bias
def weight_variable(shape, stddev=0.1, name="weight"):
initial = tf.truncated_normal(shape, stddev=stddev)
return tf.Variable(initial, name=name)
def weight_variable_devonc(shape, stddev=0.1, name="weight_devonc"):
return tf.Variable(tf.truncated_normal(shape, stddev=stddev), name=name)
def bias_variable(shape, name="bias"):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
创建卷积层和池化层
这里的填充使用的是VALID,和论文里面所指出的是一样的.deconv2d是反卷积,也就是上采样,以第一个上采样为例,输如的X的形状为[无,28,28, 1024],则输出的形状为[无,52,52,512]。反卷积的计算细节参考https://blog.csdn.net/nijiayan123/article/details/79416764。
def conv2d(x, W, b, keep_prob_):
with tf.name_scope("conv2d"):
conv_2d = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
conv_2d_b = tf.nn.bias_add(conv_2d, b)
return tf.nn.dropout(conv_2d_b, keep_prob_)
def deconv2d(x, W,stride):
with tf.name_scope("deconv2d"):
x_shape = tf.shape(x)
output_shape = tf.stack([x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]//2])
return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='VALID', name="conv2d_transpose")
def max_pool(x,n):
return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID')
连接前面部分的池化层和后面的反卷积层
def crop_and_concat(x1,x2):
with tf.name_scope("crop_and_concat"):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], -1]
x1_crop = tf.slice(x1, offsets, size)
return tf.concat([x1_crop, x2], 3)
计算pixel-wise softmax和cross entropy
注意到这里一个像素相当于一个预测目标,在通常的分类任务中,最后输出结果通常都是一个一维向量[1,class_nums],然后取SOFTMAX运算后得分最高的类标签。在这里,最后输出结果是一个三维向量[width,height,class_nums],每一个像素都要单独进行标签的预测,故叫像素的softmax。
def pixel_wise_softmax(output_map):
with tf.name_scope("pixel_wise_softmax"):
max_axis = tf.reduce_max(output_map, axis=3, keepdims=True)
exponential_map = tf.exp(output_map - max_axis)
normalize = tf.reduce_sum(exponential_map, axis=3, keepdims=True)
return exponential_map / normalize
def cross_entropy(y_,output_map):
return -tf.reduce_mean(y_*tf.log(tf.clip_by_value(output_map,1e-10,1.0)), name="cross_entropy")
preprocessing
def create_conv_net(x, keep_prob, channels, n_class, layers=3, features_root=16, filter_size=3, pool_size=2,
summaries=True):
"""
Creates a new convolutional unet for the given parametrization.
:param x: input tensor, shape [?,nx,ny,channels]
:param keep_prob: dropout probability tensor
:param channels: number of channels in the input image
:param n_class: number of output labels
:param layers: number of layers in the net
:param features_root: number of features in the first layer
:param filter_size: size of the convolution filter
:param pool_size: size of the max pooling operation
:param summaries: Flag if summaries should be created
"""
logging.info(
"Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}".format(
layers=layers,
features=features_root,
filter_size=filter_size,
pool_size=pool_size))
# Placeholder for the input image
with tf.name_scope("preprocessing"):
nx = tf.shape(x)[1]
ny = tf.shape(x)[2]
x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
in_node = x_image
batch_size = tf.shape(x_image)[0]
weights = []
biases = []
convs = []
pools = OrderedDict()
deconv = OrderedDict()
dw_h_convs = OrderedDict()
up_h_convs = OrderedDict()
in_size = 1000
size = in_size
down convolution
层= 3,有三次下卷积层,一个下卷积层实际包括两次下卷积和一次汇集。
# down layers
for layer in range(0, layers):
with tf.name_scope("down_conv_{}".format(str(layer))):
features = 2 ** layer * features_root
stddev = np.sqrt(2 / (filter_size ** 2 * features))
if layer == 0:
w1 = weight_variable([filter_size, filter_size, channels, features], stddev, name="w1")
else:
w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev, name="w1")
w2 = weight_variable([filter_size, filter_size, features, features], stddev, name="w2")
b1 = bias_variable([features], name="b1")
b2 = bias_variable([features], name="b2")
conv1 = conv2d(in_node, w1, b1, keep_prob)
tmp_h_conv = tf.nn.relu(conv1)
conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
dw_h_convs[layer] = tf.nn.relu(conv2)
weights.append((w1, w2))
biases.append((b1, b2))
convs.append((conv1, conv2))
size -= 4
if layer < layers - 1:
pools[layer] = max_pool(dw_h_convs[layer], pool_size)
in_node = pools[layer]
size /= 2
in_node = dw_h_convs[layers - 1]
up convolution
层= 3,有三次反卷积层,一个反卷积层实际包括一个反卷积,一个连接操作和两次下卷积。
# up layers
for layer in range(layers - 2, -1, -1):
with tf.name_scope("up_conv_{}".format(str(layer))):
features = 2 ** (layer + 1) * features_root
stddev = np.sqrt(2 / (filter_size ** 2 * features))
wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev, name="wd")
bd = bias_variable([features // 2], name="bd")
h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
deconv[layer] = h_deconv_concat
w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev, name="w1")
w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev, name="w2")
b1 = bias_variable([features // 2], name="b1")
b2 = bias_variable([features // 2], name="b2")
conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
h_conv = tf.nn.relu(conv1)
conv2 = conv2d(h_conv, w2, b2, keep_prob)
in_node = tf.nn.relu(conv2)
up_h_convs[layer] = in_node
weights.append((w1, w2))
biases.append((b1, b2))
convs.append((conv1, conv2))
size *= 2
size -= 4
Output Map
# Output Map
with tf.name_scope("output_map"):
weight = weight_variable([1, 1, features_root, n_class], stddev)
bias = bias_variable([n_class], name="bias")
conv = conv2d(in_node, weight, bias, tf.constant(1.0))
output_map = tf.nn.relu(conv)
up_h_convs["out"] = output_map
if summaries:
with tf.name_scope("summaries"):
for i, (c1, c2) in enumerate(convs):
tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))
for k in pools.keys():
tf.summary.image('summary_pool_%02d' % k, get_image_summary(pools[k]))
for k in deconv.keys():
tf.summary.image('summary_deconv_concat_%02d' % k, get_image_summary(deconv[k]))
for k in dw_h_convs.keys():
tf.summary.histogram("dw_convolution_%02d" % k + '/activations', dw_h_convs[k])
for k in up_h_convs.keys():
tf.summary.histogram("up_convolution_%s" % k + '/activations', up_h_convs[k])
variables = []
for w1, w2 in weights:
variables.append(w1)
variables.append(w2)
for b1, b2 in biases:
variables.append(b1)
variables.append(b2)
return output_map, variables, int(in_size - size)
小礼物走一走,来简书关注我
作者:golfgang
链接:https://www.jianshu.com/p/f9b0c2c74488
來源:简书
简书著作权归作者所有,任何形式的转载都请联系作者获得授权并注明出处。