Convolution layer
# convolution
layer {
name: "loss1/conv"
type: "Convolution"
bottom: "loss1/ave_pool"
top: "loss1/conv"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
kernel_size: 1
stride:1 # default: stride=1
pad: 1
weight_filler {
# xavier type
type: "xavier"
# gaussian type
#type: "gaussian"
#std: 0.01
}
bias_filler {
type: "constant"
value: 0.2
}
}
}
Deconvolution
layer {
name: "score2"
type: "Deconvolution"
bottom: "score"
top: "score2"
param {
lr_mult: 1
}
convolution_param {
num_output: 21
kernel_size: 4
stride: 2
weight_filler: { type: "bilinear" }
}
}
Dilation Convolution
layer {
name: "conv5_3"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
kernel_size: 3
pad: 2
dilation: 2 # Actually pad = dilation
}
}
Pooling
max pool
layer {
name: "pool1_3x3_s2"
type: "Pooling"
bottom: "conv1_3_3x3"
top: "pool1_3x3_s2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
pad: 1
}
}
ave pool
layer {
name: "conv5_3_pool1"
type: "Pooling"
bottom: "conv5_3"
top: "conv5_3_pool1"
pooling_param {
pool: AVE
kernel_size: 60
stride: 60
}
}
Upsample
layer {
name: "upsample4"
type: "Upsample"
bottom: "conv5_1_D"
top: "pool4_D"
bottom: "pool4_mask"
upsample_param {
scale: 2
upsample_w: 60
upsample_h: 45
}
}
Eltwise
layer {
bottom: "conv4_3"
bottom: "res_conv4"
top: "fusion_res_cov4"
name: "fusion_res_cov4"
type: "Eltwise"
eltwise_param { operation: SUM } # PROD SUM MAX
}
Concat
layer {
name: "inception_4a/output"
type: "Concat"
bottom: "inception_4a/1x1"
bottom: "inception_4a/3x3"
bottom: "inception_4a/5x5"
bottom: "inception_4a/pool_proj"
top: "inception_4a/output"
}
InnerProduct
layer {
name: "imagenet_fc"
type: "InnerProduct"
bottom: "fc7"
top: "imagenet_fc"
param {
lr_mult: 1
decay_mult: 250
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: ${NUM_LABELS}
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.7
}
}
}
Dropout
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
Batch Normaliztion
# BatchNorm2
layer {
name: "BatchNorm2"
#type: "LRN"
type: "BatchNorm" include { phase: TRAIN}
bottom: "Concat1"
top: "BatchNorm2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
batch_norm_param {
use_global_stats: false
}
}
# BatchNorm
layer {
name: "bn3"
type: "BatchNorm"
bottom: "conv3"
top: "bn3"
param {
lr_mult: 0
}
param {
lr_mult: 0
}
param {
lr_mult: 0
}
}
layer {
name: "spp3_bn"
type: "BN"
bottom: "conv_spp_3_ave_pool"
top: "spp3_bn"
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 1
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
bn_param {
slope_filler {
type: "constant"
value: 1
}
bias_filler {
type: "constant"
value: 0
}
frozen: true
momentum: 0.95
}
}
LRN
layer {
name: "norm1"
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
Scale
layer {
bottom: "conv1/7x7_s2/bn"
top: "conv1/7x7_s2/bn/sc"
name: "conv1/7x7_s2/bn/sc"
type: "Scale"
scale_param {
bias_term: true
}
}
label interpolation
Threshold
layer {
name: "threshold"
type: "Threshold"
bottom: "soft_prob_s1"
top: "threshold"
threshold_param {
threshold: 1e-36
}
}
SigmoidGateLayer
layer {
name: "gate"
type: "SigmoidGate"
bottom: "soft_prob_s1"
top: "gate"
gate_param {
threshold: 0.5
}
}
#
ReLU
layer {
name: "relu1_1"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
PReLU
layer {
name: "relu6"
bottom: "fc6"
top: "relu6"
type: "PReLU"
prelu_param {
filler {
type: "constant"
value: 0.3
}
channel_shared: false
}
}
label interpolation
layer {
bottom: "label"
top: "label_shrink"
name: "label_shrink"
type: "Interp"
interp_param {
shrink_factor: 8
pad_beg: 0
pad_end: 0
}
}
data interpolation
layer {
name: "fc8_interp"
type: "Interp"
bottom: "fc8_voc12"
top: "fc8_interp"
interp_param {
zoom_factor: 8
}
}