Caffe学习笔记(四)自定义layer并测试

实例:添加一个sin层

主要过程:

  1. 头文件:include/caffe/layers/your_layer.hpp
  2. 层定义:src/caffe/layers/your_layer.cppsrc/caffe/layers/your_layer.cu [可选]
  3. 测试文件:test/test_your_layer.cpp
  4. 在 build 文件夹下进行测试

头文件定义

caffe_root/include/caffe/layers/ 目录下创建头文件 sin_layer.hpp,添加如下内容:

#ifndef CAFFE_SIN_LAYER_HPP_
#define CAFFE_SIN_LAYER_HPP_

#include 

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

#include "caffe/layers/neuron_layer.hpp"

namespace caffe {

template <typename Dtype>
class SinLayer : public NeuronLayer<Dtype> {
 public:
  explicit SinLayer(const LayerParameter& param)
      : NeuronLayer<Dtype>(param) {}

# 需要被重写的方法
virtual inline const char* type() const { return "Sin"; }

# 需要被重写的方法,定义了前向和反向传播
protected:
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);

  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);  
  };
    
}  // namespace caffe

#endif  // CAFFE_SIN_LAYER_HPP_ 

Layer定义

CPU版本:.cpp

放在 caffe_root/src/caffe/layers/ 目录下:

// Sin neuron activation function layer.
// Adapted from TanH layer which was adapted from the ReLU layer code written by Yangqing Jia

#include 

#include "caffe/layers/sin_layer.hpp"

namespace caffe {

template <typename Dtype>
void SinLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
                                  const vector<Blob<Dtype>*>& top) 
{ 
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  const int count = bottom[0]->count();
  for (int i = 0; i < count; ++i) {
    top_data[i] = sin(bottom_data[i]);
  }
}

template <typename Dtype>
void SinLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
                                    const vector<bool>& propagate_down,
                                    const vector<Blob<Dtype>*>& bottom) 
{ 
  if (propagate_down[0]) {
    const Dtype* bottom_data = bottom[0]->cpu_data();
    const Dtype* top_diff = top[0]->cpu_diff();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const int count = bottom[0]->count();
    Dtype bottom_datum;
    for (int i = 0; i < count; ++i) {
      bottom_datum = bottom_data[i];
      bottom_diff[i] = top_diff[i] * cos(bottom_datum);
    }
  }
}

#ifdef CPU_ONLY
STUB_GPU(SinLayer);
#endif

INSTANTIATE_CLASS(SinLayer);
REGISTER_LAYER_CLASS(Sin);

}  // namespace caffe    

GPU版本:.cu

放在 caffe_root/src/caffe/layers/ 目录下:

// Sin neuron activation function layer.
// Adapted from TanH layer which was adapted from the ReLU layer code written by Yangqing Jia

#include 

#include "caffe/layers/sin_layer.hpp"

namespace caffe {

template <typename Dtype>
__global__ void SinForward(const int n, const Dtype* in, Dtype* out) {
  CUDA_KERNEL_LOOP(index, n) {
    out[index] = sin(in[index]);
  }
}

template <typename Dtype>
void SinLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->gpu_data();
  Dtype* top_data = top[0]->mutable_gpu_data();
  const int count = bottom[0]->count();
  // NOLINT_NEXT_LINE(whitespace/operators)
  SinForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
      count, bottom_data, top_data);
  CUDA_POST_KERNEL_CHECK;
}

template <typename Dtype>
__global__ void SinBackward(const int n, const Dtype* in_diff,
    const Dtype* out_data, Dtype* out_diff) {
  CUDA_KERNEL_LOOP(index, n) {
    Dtype sinx = out_data[index];
    out_diff[index] = in_diff[index] * cos(sinx);
  }
}

template <typename Dtype>
void SinLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[0]) {
    const Dtype* bottom_data = bottom[0]->gpu_data();
    const Dtype* top_diff = top[0]->gpu_diff();
    Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
    const int count = bottom[0]->count();
    // NOLINT_NEXT_LINE(whitespace/operators)
    SinBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
        count, top_diff, bottom_data, bottom_diff);
    CUDA_POST_KERNEL_CHECK;
  }
}

INSTANTIATE_LAYER_GPU_FUNCS(SinLayer);


}  // namespace caffe

测试Layer

添加一个测试文件用于测试定义的 sin layer 是否生效,在 caffe_root/src/caffe/test/ 目录下添加测试文件 test_sin_layer.cpp

#include 
#include 

#include "gtest/gtest.h"

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"

#include "caffe/test/test_caffe_main.hpp"
#include "caffe/test/test_gradient_check_util.hpp"

// inclued the layer that we are testing!
#include "caffe/layers/sin_layer.hpp"

namespace caffe {

template <typename TypeParam>
class SinLayerTest : public MultiDeviceTest<TypeParam> {
  typedef typename TypeParam::Dtype Dtype;

 protected:
  SinLayerTest()
      : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
        blob_top_(new Blob<Dtype>())
  {
    Caffe::set_random_seed(1701);
    FillerParameter filler_param;
    blob_bottom_vec_.push_back(blob_bottom_);
    blob_top_vec_.push_back(blob_top_);
  }
  virtual ~SinLayerTest() { delete blob_bottom_; delete blob_top_; }

// test forward process
    void TestForward(Dtype filler_std)
  {
    FillerParameter filler_param;
    filler_param.set_std(filler_std);
    GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_);

    LayerParameter layer_param;
    SinLayer<Dtype> layer(layer_param);
    layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
    // Now, check values
    const Dtype* bottom_data = this->blob_bottom_->cpu_data();
    const Dtype* top_data = this->blob_top_->cpu_data();
    const Dtype min_precision = 1e-5;
    for (int i = 0; i < this->blob_bottom_->count(); ++i) {
      Dtype expected_value = sin(bottom_data[i]);
      Dtype precision = std::max(
        Dtype(std::abs(expected_value * Dtype(1e-4))), min_precision);
      EXPECT_NEAR(expected_value, top_data[i], precision);
    }
  }

  // test backward process
   void TestBackward(Dtype filler_std)
  {
    FillerParameter filler_param;
    filler_param.set_std(filler_std);
    GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_);

    LayerParameter layer_param;
    SinLayer<Dtype> layer(layer_param);
    GradientChecker<Dtype> checker(1e-4, 1e-2, 1701);
    checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
        this->blob_top_vec_);
  }

  Blob<Dtype>* const blob_bottom_;
  Blob<Dtype>* const blob_top_;
  vector<Blob<Dtype>*> blob_bottom_vec_;
  vector<Blob<Dtype>*> blob_top_vec_;
};

// test type(in this case SinLayerTest)
TYPED_TEST_CASE(SinLayerTest, TestDtypesAndDevices);

// test sin
TYPED_TEST(SinLayerTest, TestSin) {
  this->TestForward(1.0);
}

// test calculating the gradient correctly when backpropagating
TYPED_TEST(SinLayerTest, TestSinGradient) {
  this->TestBackward(1.0);
}

}  // namespace caffe

执行测试: 在 build 文件夹下

cmake ..
make all -j8
make test
make runtest GTEST_FILTER='SinLayerTest/*'

其中测试的类名称在对应的 test 文件里找

你可能感兴趣的:(#,Caffe,深度学习)