本文原创,转载请注明出处:https://blog.csdn.net/dan_teng/article/details/81532013
ssd网络一大特点是,为了提高检测准确率,在不同尺度的特征图上进行预测,这种预测就需要prior box layer。
prior box 是干嘛的呢?其实非常类似于Faster R-CNN中的Anchors,就是候选框,这种候选框的选取不需要像R-CNN那样通过复杂处理产生。在ssd中,priorbox层只需要bottom层feature map的大小,就可以给出候选框。假设输入的feature map大小是W×H,生成的prior box中心就有W×H个,均匀分布在整张图上。在每个中心上,可以生成多个不同长宽比的prior box,如[1/3, 1/2, 1, 2, 3],每个点就可以生成length_of_aspect_ratio个框,所以在一个feature map上可以生成的prior box总数是W×H×length_of_aspect_ratio。
如上图b)所示,在8x8的feature map上,每个中心点生成了4个预选框。(c)是4x4的feature map,假设(c)是(b)后面的层,prior box参数相同,那么显然,从(b)中提取的预选框更有利于小尺度对象的检出,而从(c)中提取的预选框更有利于大尺度对象的检出(4x4的feature map 只有8x8 feature map长宽的1/2,相同参数的prior box圈出的面积相同,但是两个feature map对应于相同的原图,因此4x4 feature map上相同面积映射回原图相当于圈出了更大的面积,是8x8面积的4倍)。
如上所述,SSD提取了不同尺度的feature map来做检测,大尺度特征图(较靠前的特征图)可以用来检测小物体,而小尺度特征图(较靠后的特征图)用来检测大物体。从下图可以看出,ssd网络会从6层feature map中提取特征,priorbox相应也会跟在这6层后面。
计算思路:以feature map上每个点的中点为中心(offset=0.5),生成一些列同心的prior box(然后中心点的坐标会乘以step,相当于从feature map位置映射回原图位置),最后会归一化处理
正方形prior box最小边长为min_size,最大边长为
#include
#include
#include
#include
#include "caffe/layers/prior_box_layer.hpp"
namespace caffe {
template <typename Dtype>
void PriorBoxLayer::LayerSetUp(const vector *>& bottom,
const vector *>& top) {
const PriorBoxParameter& prior_box_param =
this->layer_param_.prior_box_param();
CHECK_GT(prior_box_param.min_size_size(), 0) << "must provide min_size.";
// 取min_size数据
for (int i = 0; i < prior_box_param.min_size_size(); ++i) {
min_sizes_.push_back(prior_box_param.min_size(i));
CHECK_GT(min_sizes_.back(), 0) << "min_size must be positive.";
}
aspect_ratios_.clear();
aspect_ratios_.push_back(1.);//1是默认比例
flip_ = prior_box_param.flip();
//存放aspect_ratios数据
for (int i = 0; i < prior_box_param.aspect_ratio_size(); ++i) {
float ar = prior_box_param.aspect_ratio(i);
bool already_exist = false;
for (int j = 0; j < aspect_ratios_.size(); ++j) {
if (fabs(ar - aspect_ratios_[j]) < 1e-6) {
already_exist = true;
break;
}
}
if (!already_exist) {
aspect_ratios_.push_back(ar);
if (flip_) {//是否取倒数
aspect_ratios_.push_back(1./ar);
}
}
}
// prior box 数量。注意:还需加上max_size数量才是最终数量
num_priors_ = aspect_ratios_.size() * min_sizes_.size();
if (prior_box_param.max_size_size() > 0) {
CHECK_EQ(prior_box_param.min_size_size(), prior_box_param.max_size_size());
for (int i = 0; i < prior_box_param.max_size_size(); ++i) {
max_sizes_.push_back(prior_box_param.max_size(i));
CHECK_GT(max_sizes_[i], min_sizes_[i])
<< "max_size must be greater than min_size.";
num_priors_ += 1;// 这里增加num_priors数量
}
}
clip_ = prior_box_param.clip();
// variance与后期真实框计算有关,要么给1个值,要么给4个值
if (prior_box_param.variance_size() > 1) {
// Must and only provide 4 variance.
CHECK_EQ(prior_box_param.variance_size(), 4);
for (int i = 0; i < prior_box_param.variance_size(); ++i) {
CHECK_GT(prior_box_param.variance(i), 0);
variance_.push_back(prior_box_param.variance(i));
}
} else if (prior_box_param.variance_size() == 1) {
CHECK_GT(prior_box_param.variance(0), 0);
variance_.push_back(prior_box_param.variance(0));
} else {
// Set default to 0.1.
variance_.push_back(0.1);
}
if (prior_box_param.has_img_h() || prior_box_param.has_img_w()) {
CHECK(!prior_box_param.has_img_size())
<< "Either img_size or img_h/img_w should be specified; not both.";
img_h_ = prior_box_param.img_h();
CHECK_GT(img_h_, 0) << "img_h should be larger than 0.";
img_w_ = prior_box_param.img_w();
CHECK_GT(img_w_, 0) << "img_w should be larger than 0.";
} else if (prior_box_param.has_img_size()) {
const int img_size = prior_box_param.img_size();
CHECK_GT(img_size, 0) << "img_size should be larger than 0.";
img_h_ = img_size;
img_w_ = img_size;
} else {
img_h_ = 0;
img_w_ = 0;
}
if (prior_box_param.has_step_h() || prior_box_param.has_step_w()) {
CHECK(!prior_box_param.has_step())
<< "Either step or step_h/step_w should be specified; not both.";
step_h_ = prior_box_param.step_h();
CHECK_GT(step_h_, 0.) << "step_h should be larger than 0.";
step_w_ = prior_box_param.step_w();
CHECK_GT(step_w_, 0.) << "step_w should be larger than 0.";
} else if (prior_box_param.has_step()) {
const float step = prior_box_param.step();
CHECK_GT(step, 0) << "step should be larger than 0.";
step_h_ = step;
step_w_ = step;
} else {
step_h_ = 0;
step_w_ = 0;
}
offset_ = prior_box_param.offset();
reduce_boxes_ = prior_box_param.reduce_boxes();
}
//该层输出大小为【1,2,layer_width * layer_height * num_priors_ * 4】
// c的第一维,存放每个框的四个点
// c的第二维,存放variance(每个框都一样)
template <typename Dtype>
void PriorBoxLayer::Reshape(const vector *>& bottom,
const vector *>& top) {
// 取feature map大小
const int layer_width = bottom[0]->width();
const int layer_height = bottom[0]->height();
vector<int> top_shape(3, 1);
// Since all images in a batch has same height and width, we only need to
// generate one set of priors which can be shared across all images.
top_shape[0] = 1;
// 2 channels. First channel stores the mean of each prior coordinate.
// Second channel stores the variance of each prior coordinate.
top_shape[1] = 2;
top_shape[2] = layer_width * layer_height * num_priors_ * 4;
CHECK_GT(top_shape[2], 0);
top[0]->Reshape(top_shape);
}
template <typename Dtype>
void PriorBoxLayer::Forward_cpu(const vector *>& bottom,
const vector *>& top) {
// 取feature map大小
const int layer_width = bottom[0]->width();
const int layer_height = bottom[0]->height();
int img_width, img_height;
if (img_h_ == 0 || img_w_ == 0) {
img_width = bottom[1]->width();// 输入图像的宽高
img_height = bottom[1]->height();
} else {
img_width = img_w_;
img_height = img_h_;
}
float step_w, step_h;
if (step_w_ == 0 || step_h_ == 0) {
step_w = static_cast<float>(img_width) / layer_width;// 缩放比例
step_h = static_cast<float>(img_height) / layer_height;
} else {
step_w = step_w_;
step_h = step_h_;
}
Dtype* top_data = top[0]->mutable_cpu_data();
// 最后一维输出大小
int dim = layer_height * layer_width * num_priors_ * 4;
int idx = 0;
for (int h = 0; h < layer_height; ++h) {
for (int w = 0; w < layer_width; ++w) {
// 取feature map 每个点为中心点,进行处理
// offset默认值是0.5,可理解为一个小的偏移量
// 这里将中心点映射回了原图
float center_x = (w + offset_) * step_w;
float center_y = (h + offset_) * step_h;
float box_width, box_height;
for (int s = 0; s < min_sizes_.size(); ++s) {
int min_size_ = min_sizes_[s];
// first prior: aspect_ratio = 1, size = min_size
if (reduce_boxes_) {
box_width = box_height = min_size_ / 2.0;// for mobilenet, conv11 featuremap
}
else{
box_width = box_height = min_size_;
}
// min_size确定的正方形框,大小进行了归一化
// xmin
top_data[idx++] = (center_x - box_width / 2.) / img_width;
// ymin
top_data[idx++] = (center_y - box_height / 2.) / img_height;
// xmax
top_data[idx++] = (center_x + box_width / 2.) / img_width;
// ymax
top_data[idx++] = (center_y + box_height / 2.) / img_height;
if (max_sizes_.size() > 0) {
CHECK_EQ(min_sizes_.size(), max_sizes_.size());
int max_size_ = max_sizes_[s];
// second prior: aspect_ratio = 1, size = sqrt(min_size * max_size)
box_width = box_height = sqrt(min_size_ * max_size_);
// max_size确定的正方形框
// xmin
top_data[idx++] = (center_x - box_width / 2.) / img_width;
// ymin
top_data[idx++] = (center_y - box_height / 2.) / img_height;
// xmax
top_data[idx++] = (center_x + box_width / 2.) / img_width;
// ymax
top_data[idx++] = (center_y + box_height / 2.) / img_height;
}
// rest of priors
for (int r = 0; r < aspect_ratios_.size(); ++r) {
float ar = aspect_ratios_[r];
if (fabs(ar - 1.) < 1e-6) {
continue;
}
// 根据定义,由aspect_ratio和min_size共同确定的矩形框
box_width = min_size_ * sqrt(ar);
box_height = min_size_ / sqrt(ar);
// xmin
top_data[idx++] = (center_x - box_width / 2.) / img_width;
// ymin
top_data[idx++] = (center_y - box_height / 2.) / img_height;
// xmax
top_data[idx++] = (center_x + box_width / 2.) / img_width;
// ymax
top_data[idx++] = (center_y + box_height / 2.) / img_height;
}
}
}
}
// clip默认值是false,是否进行越界处理
// clip the prior's coordidate such that it is within [0, 1]
if (clip_) {
for (int d = 0; d < dim; ++d) {
top_data[d] = std::min(std::max(top_data[d], 0.), 1.);
}
}
// 前面提到过,输出c维大小是2,第一部分存放预选框数据,第二部分存放variance
// set the variance.
top_data += top[0]->offset(0, 1);// 通过偏移拿到第二部分的地址
if (variance_.size() == 1) {
caffe_set(dim, Dtype(variance_[0]), top_data);
} else {
int count = 0;
for (int h = 0; h < layer_height; ++h) {
for (int w = 0; w < layer_width; ++w) {
for (int i = 0; i < num_priors_; ++i) {
for (int j = 0; j < 4; ++j) {
top_data[count] = variance_[j];
++count;
}
}
}
}
}
}
INSTANTIATE_CLASS(PriorBoxLayer);
REGISTER_LAYER_CLASS(PriorBox);
} // namespace caffe
caffe中的定义:
// Message that store parameters used by PriorBoxLayer
message PriorBoxParameter {
// Encode/decode type.
enum CodeType {// 编码方式,与训练有关,且与最后一层detection out解码有关
CORNER = 1;
CENTER_SIZE = 2;
CORNER_SIZE = 3;
}
// Minimum box size (in pixels). Required!
repeated float min_size = 1;// 最小尺寸
// Maximum box size (in pixels). Required!
repeated float max_size = 2;// 最大尺寸
// Various of aspect ratios. Duplicate ratios will be ignored.
// If none is provided, we use default ratio 1.
repeated float aspect_ratio = 3;// 变换比例
// If true, will flip each aspect ratio.
// For example, if there is aspect ratio "r",
// we will generate aspect ratio "1.0/r" as well.
optional bool flip = 4 [default = true]; // 对每个变换比例是否再加上他们的倒数
// If true, will clip the prior so that it is within [0, 1]
optional bool clip = 5 [default = false]; // 是否裁剪到[0,1]
// Variance for adjusting the prior bboxes.
repeated float variance = 6;
// By default, we calculate img_height, img_width, step_x, step_y based on
// bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely
// provided.
// Explicitly provide the img_size.
optional uint32 img_size = 7;
// Either img_size or img_h/img_w should be specified; not both.
optional uint32 img_h = 8;
optional uint32 img_w = 9;
// Explicitly provide the step size.
optional float step = 10;
// Either step or step_h/step_w should be specified; not both.
optional float step_h = 11;
optional float step_w = 12;
// Offset to the top left corner of each cell.
optional float offset = 13 [default = 0.5]; // 偏移量
optional bool reduce_boxes = 14 [default = false];
}