写在最前面:“Holistically-Nested Edge Detection ”(HED)是Zhuowen Tu老师组的博士生Saining Xie发表在ICCV15年的做边缘检测的文章,获得了ICCV马尔奖提名。
Paper: http://arxiv.org/abs/1504.06375
Github: https://github.com/s9xie/hed
补充:HED在新版caffe中的实现可以直接使用https://github.com/yun-liu/rcf 提供的版本。
写在前面:HED的作者Saining Xie提供的caffe版本比较老,源代码和最新版的caffe有很大的不同。在使用新版caffe训练HED的时候,需要添加HED使用的层,分别是:
1. 在caffe.prototxt
添加相关层信息和参数
2. base_data_layer
中添加LabelmapBatch
类和BasePrefetchingLabelmapDataLayer
类。
3. 修改blocking_queue.cpp
以支持LabelmapBatch
。
4. 添加新层image_labelmap_data_layer
,该层由BasePrefetchingLabelmapDataLayer
派生。
5. 添加HED使用loss层sigmoid_cross_entropy_loss_hed_layer
。因为HED中重新实现了加权cross_entropy_loss
6. 添加HED使用crop层crop_hed_layer
。因为HED中的crop是以中心点为参考进行crop,和caffe中不一样
BSDS500性能测试:在BSDS500边缘检测数据集上达到了F-measure=0.776(Saining Xie提供的HED版本为0.780)
caffe.prototxt
message LayerParameter
不用做修改。因为image_labelmap_data_layer
和image_data_layer
使用共同的参数结构;sigmoid_cross_entropy_loss_hed_layer
和sigmoid_layer
使用共同的参数结构;crop_hed_layer
没有参数。
message V1LayerParameter
的LayerType
中添加这两层的层名(编号不重复就行):
IMAGE_LABELMAP_DATA = 43;
SIGMOID_CROSS_ENTROPY_LOSS_HED = 44;
CROP_HED = 45;
base_data_layer
base_data_layer.hpp
:在caffe命名空间中添加LabelmapBatch
和BasePrefetchingLabelmapDataLayer
类。
template <typename Dtype>
class LabelmapBatch {
public:
Blob data_, labelmap_;
};
template <typename Dtype>
class BasePrefetchingLabelmapDataLayer :
public BaseDataLayer, public InternalThread {
public:
explicit BasePrefetchingLabelmapDataLayer(const LayerParameter& param);
// LayerSetUp: implements common data layer setup functionality, and calls
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden.
void LayerSetUp(const vector *>& bottom,
const vector *>& top);
virtual void Forward_cpu(const vector *>& bottom,
const vector *>& top);
virtual void Forward_gpu(const vector *>& bottom,
const vector *>& top);
// Prefetches batches (asynchronously if to GPU memory)
static const int PREFETCH_COUNT = 3;
protected:
virtual void InternalThreadEntry();
virtual void load_batch(LabelmapBatch* labelmapbatch) = 0;
LabelmapBatch prefetch_[PREFETCH_COUNT];
BlockingQueue*> prefetch_free_;
BlockingQueue*> prefetch_full_;
Blob transformed_data_;
Blob transformed_labelmap_;
};
base_data_layer.cpp
:头文件中成员函数的实现:
template <typename Dtype>
BasePrefetchingLabelmapDataLayer::BasePrefetchingLabelmapDataLayer(
const LayerParameter& param)
: BaseDataLayer(param),
prefetch_free_(), prefetch_full_() {
for (int i = 0; i < PREFETCH_COUNT; ++i) {
prefetch_free_.push(&prefetch_[i]);
}
}
template <typename Dtype>
void BasePrefetchingLabelmapDataLayer::LayerSetUp(
const vector *>& bottom, const vector *>& top) {
BaseDataLayer::LayerSetUp(bottom, top);
// Before starting the prefetch thread, we make cpu_data and gpu_data
// calls so that the prefetch thread does not accidentally make simultaneous
// cudaMalloc calls when the main thread is running. In some GPUs this
// seems to cause failures if we do not so.
for (int i = 0; i < PREFETCH_COUNT; ++i) {
prefetch_[i].data_.mutable_cpu_data();
prefetch_[i].labelmap_.mutable_cpu_data();
}
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
for (int i = 0; i < PREFETCH_COUNT; ++i) {
prefetch_[i].data_.mutable_gpu_data();
prefetch_[i].labelmap_.mutable_gpu_data();
}
}
#endif
DLOG(INFO) << "Initializing prefetch";
this->data_transformer_->InitRand();
StartInternalThread();
DLOG(INFO) << "Prefetch initialized.";
}
template <typename Dtype>
void BasePrefetchingLabelmapDataLayer::InternalThreadEntry() {
#ifndef CPU_ONLY
cudaStream_t stream;
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
}
#endif
try {
while (!must_stop()) {
LabelmapBatch* batch = prefetch_free_.pop();
load_batch(batch);
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
batch->data_.data().get()->async_gpu_push(stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#endif
prefetch_full_.push(batch);
}
} catch (boost::thread_interrupted&) {
// Interrupted exception is expected on shutdown
}
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaStreamDestroy(stream));
}
#endif
}
template <typename Dtype>
void BasePrefetchingLabelmapDataLayer::Forward_cpu(
const vector *>& bottom, const vector *>& top) {
LabelmapBatch* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.cpu_data(),
top[0]->mutable_cpu_data());
DLOG(INFO) << "Prefetch copied";
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->labelmap_);
// Copy the labels.
caffe_copy(batch->labelmap_.count(), batch->labelmap_.cpu_data(),
top[1]->mutable_cpu_data());
prefetch_free_.push(batch);
}
#ifdef CPU_ONLY
STUB_GPU_FORWARD(BasePrefetchingLabelmapDataLayer, Forward);
#endif
INSTANTIATE_CLASS(BasePrefetchingLabelmapDataLayer);
base_data_layer.cu
:template <typename Dtype>
void BasePrefetchingLabelmapDataLayer::Forward_gpu(
const vector *>& bottom, const vector *>& top) {
LabelmapBatch* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
top[1]->ReshapeLike(batch->labelmap_);
// Copy the labels.
caffe_copy(batch->labelmap_.count(), batch->labelmap_.gpu_data(),
top[1]->mutable_gpu_data());
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingLabelmapDataLayer);
blocking_queue.cpp
改文件位于$CAFFE_ROOT/src/caffe/util/
中。
template class BlockingQueue<LabelmapBatch<float>*>;
template class BlockingQueue<LabelmapBatch<double>*>;
image_labelmap_data_layer
直接将HED
中的该层的头文件和源文件拷贝到相应的位置。
可参考image_data_layer.hpp
和image_data_layer.cpp
中包含的头文件。
sigmoid_cross_entropy_loss_hed_layer
将HED
中的sigmoid_cross_entropy_loss
层的头文件和源文件重命名后,拷贝到相应的位置。
crop_hed_layer
将caffe中的crop_layer.cpp
,crop_layer.cu
和crop_layer.hpp
重新复制一份并命名为crop_hed_layer
crop_hed_layer.cpp
中的LayerSetUp
和Reshape
函数。template <typename Dtype>
void CropHedLayer::LayerSetUp(const vector *>& bottom,
const vector *>& top) {
// LayerSetup() handles the number of dimensions; Reshape() handles the sizes.
// bottom[0] supplies the data
// bottom[1] supplies the size
CHECK_EQ(bottom.size(), 2) << "Wrong number of bottom blobs.";
int input_dim = bottom[0]->num_axes();
CHECK_EQ(input_dim, bottom[1]->num_axes()) << "Bottom blobs have different dimention.";
}
template <typename Dtype>
void CropHedLayer::Reshape(const vector *>& bottom,
const vector *>& top) {
int input_dim = bottom[0]->num_axes();
// Initialize offsets to 0 and the new shape to the current shape of the data.
offsets = vector<int>(input_dim, 0);
vector<int> new_shape(bottom[0]->shape());
// Determine crop offsets and the new shape post-crop.
for (int i = 0; i < input_dim; ++i) {
int crop_offset = 0;
int new_size = bottom[0]->shape(i);
if (i >= 2) {
new_size = bottom[1]->shape(i);
crop_offset = round(double(bottom[0]->shape(i)-bottom[1]->shape(i))/2);
// Check that the crop and offset are within the dimension's bounds.
CHECK_GE(bottom[0]->shape(i) - crop_offset, bottom[1]->shape(i))
<< "the crop for dimension " << i << " is out-of-bounds with "
<< "size " << bottom[1]->shape(i) << " and offset " << crop_offset;
}
new_shape[i] = new_size;
offsets[i] = crop_offset;
}
top[0]->Reshape(new_shape);
}