如何给20151230版caffe添加新的层 (一)

由于caffe在一步一步的更新,因此半年前的教程可能不适用于当前版的caffe


因此把这次尝试caffe修改过程记录下来,以饷后人。

首先下载caffe

https://github.com/yjxiong/caffe

这个版本的caffe是multi-gpu支持比较好的,用的是MPI技术,用的显存又少,而且在第一时间段支持了cudnn4,follow起来还是很爽的

关于第一阶段最naive的想法是添加类似conv层功能的Layer,主要参考了

http://blog.csdn.net/kuaitoukid/article/details/41865803

同样的,我们将新的layer明明为wtf layer

修改步骤

1.在 include/caffe/vision_layers.hpp 中添加 wtflayer的定义,由于此处不实现GPU函数,因此注释掉了gpu函数

<span class="hljs-keyword">template</span> <<span class="hljs-keyword">typename</span> Dtype>
<span class="hljs-keyword">class</span> WtfLayer : <span class="hljs-keyword">public</span> BaseConvolutionLayer<Dtype> {
 <span class="hljs-keyword">public</span>:

  <span class="hljs-function"><span class="hljs-keyword">explicit</span> <span class="hljs-title">WtfLayer</span><span class="hljs-params">(<span class="hljs-keyword">const</span> LayerParameter& param)</span>
      : BaseConvolutionLayer<Dtype><span class="hljs-params">(param)</span> </span>{}

  <span class="hljs-keyword">virtual</span> <span class="hljs-keyword">inline</span> <span class="hljs-keyword">const</span> <span class="hljs-keyword">char</span>* type() <span class="hljs-keyword">const</span> { <span class="hljs-keyword">return</span> <span class="hljs-string">"Wtf"</span>; }

 <span class="hljs-keyword">protected</span>:
  <span class="hljs-function"><span class="hljs-keyword">virtual</span> <span class="hljs-keyword">void</span> <span class="hljs-title">Forward_cpu</span><span class="hljs-params">(<span class="hljs-keyword">const</span> <span class="hljs-built_in">vector</span><Blob<Dtype>*>& bottom,
      <span class="hljs-keyword">const</span> <span class="hljs-built_in">vector</span><Blob<Dtype>*>& top)</span></span>;
  <span class="hljs-comment">//virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,</span>
  <span class="hljs-comment">//    const vector<Blob<Dtype>*>& top);</span>
  <span class="hljs-function"><span class="hljs-keyword">virtual</span> <span class="hljs-keyword">void</span> <span class="hljs-title">Backward_cpu</span><span class="hljs-params">(<span class="hljs-keyword">const</span> <span class="hljs-built_in">vector</span><Blob<Dtype>*>& top,
      <span class="hljs-keyword">const</span> <span class="hljs-built_in">vector</span><<span class="hljs-keyword">bool</span>>& propagate_down, <span class="hljs-keyword">const</span> <span class="hljs-built_in">vector</span><Blob<Dtype>*>& bottom)</span></span>;
  <span class="hljs-comment">//virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,</span>
  <span class="hljs-comment">//    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);</span>
  <span class="hljs-function"><span class="hljs-keyword">virtual</span> <span class="hljs-keyword">inline</span> <span class="hljs-keyword">bool</span> <span class="hljs-title">reverse_dimensions</span><span class="hljs-params">()</span> </span>{ <span class="hljs-keyword">return</span> <span class="hljs-keyword">false</span>; }
  <span class="hljs-function"><span class="hljs-keyword">virtual</span> <span class="hljs-keyword">void</span> <span class="hljs-title">compute_output_shape</span><span class="hljs-params">()</span></span>;
};
2.将Wtf_layer.cpp添加到src\caffe\layers文件夹中,代码内容复制convolutio_layer.cpp即可,注意在最后复制的时候添加 REGISTER_LAYER_CLASS(Wtf); 注册一下Wtf Layer
<span class="hljs-preprocessor">#<span class="hljs-keyword">include</span> <vector></span>

<span class="hljs-preprocessor">#<span class="hljs-keyword">include</span> "caffe/filler.hpp"</span>
<span class="hljs-preprocessor">#<span class="hljs-keyword">include</span> "caffe/layer.hpp"</span>
<span class="hljs-preprocessor">#<span class="hljs-keyword">include</span> "caffe/util/im2col.hpp"</span>
<span class="hljs-preprocessor">#<span class="hljs-keyword">include</span> "caffe/util/math_functions.hpp"</span>
<span class="hljs-preprocessor">#<span class="hljs-keyword">include</span> "caffe/vision_layers.hpp"</span>

<span class="hljs-keyword">namespace</span> caffe {

<span class="hljs-keyword">template</span> <<span class="hljs-keyword">typename</span> Dtype>
<span class="hljs-keyword">void</span> WtfLayer<Dtype>::compute_output_shape() {
  <span class="hljs-keyword">this</span>->height_out_ = (<span class="hljs-keyword">this</span>->height_ + <span class="hljs-number">2</span> * <span class="hljs-keyword">this</span>->pad_h_ - <span class="hljs-keyword">this</span>->kernel_h_)
      / <span class="hljs-keyword">this</span>->stride_h_ + <span class="hljs-number">1</span>;
  <span class="hljs-keyword">this</span>->width_out_ = (<span class="hljs-keyword">this</span>->width_ + <span class="hljs-number">2</span> * <span class="hljs-keyword">this</span>->pad_w_ - <span class="hljs-keyword">this</span>->kernel_w_)
      / <span class="hljs-keyword">this</span>->stride_w_ + <span class="hljs-number">1</span>;
}

<span class="hljs-keyword">template</span> <<span class="hljs-keyword">typename</span> Dtype>
<span class="hljs-keyword">void</span> WtfLayer<Dtype>::Forward_cpu(<span class="hljs-keyword">const</span> <span class="hljs-stl_container"><span class="hljs-built_in">vector</span><Blob<Dtype></span>*>& bottom,
      <span class="hljs-keyword">const</span> <span class="hljs-stl_container"><span class="hljs-built_in">vector</span><Blob<Dtype></span>*>& top) {
  <span class="hljs-keyword">const</span> Dtype* weight = <span class="hljs-keyword">this</span>->blobs_[<span class="hljs-number">0</span>]->cpu_data();
  <span class="hljs-keyword">for</span> (<span class="hljs-keyword">int</span> i = <span class="hljs-number">0</span>; i < bottom.size(); ++i) {
    <span class="hljs-keyword">const</span> Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* top_data = top[i]->mutable_cpu_data();
    <span class="hljs-keyword">for</span> (<span class="hljs-keyword">int</span> n = <span class="hljs-number">0</span>; n < <span class="hljs-keyword">this</span>->num_; ++n) {
      <span class="hljs-keyword">this</span>->forward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight,
          top_data + top[i]->offset(n));
      <span class="hljs-keyword">if</span> (<span class="hljs-keyword">this</span>->bias_term_) {
        <span class="hljs-keyword">const</span> Dtype* bias = <span class="hljs-keyword">this</span>->blobs_[<span class="hljs-number">1</span>]->cpu_data();
        <span class="hljs-keyword">this</span>->forward_cpu_bias(top_data + top[i]->offset(n), bias);
      }
    }
  }
}

<span class="hljs-keyword">template</span> <<span class="hljs-keyword">typename</span> Dtype>
<span class="hljs-keyword">void</span> WtfLayer<Dtype>::Backward_cpu(<span class="hljs-keyword">const</span> <span class="hljs-stl_container"><span class="hljs-built_in">vector</span><Blob<Dtype></span>*>& top,
      <span class="hljs-keyword">const</span> <span class="hljs-stl_container"><span class="hljs-built_in">vector</span><<span class="hljs-keyword">bool</span>></span>& propagate_down, <span class="hljs-keyword">const</span> <span class="hljs-stl_container"><span class="hljs-built_in">vector</span><Blob<Dtype></span>*>& bottom) {
  <span class="hljs-keyword">const</span> Dtype* weight = <span class="hljs-keyword">this</span>->blobs_[<span class="hljs-number">0</span>]->cpu_data();
  Dtype* weight_diff = <span class="hljs-keyword">this</span>->blobs_[<span class="hljs-number">0</span>]->mutable_cpu_diff();
  <span class="hljs-keyword">for</span> (<span class="hljs-keyword">int</span> i = <span class="hljs-number">0</span>; i < top.size(); ++i) {
    <span class="hljs-keyword">const</span> Dtype* top_diff = top[i]->cpu_diff();
    <span class="hljs-keyword">const</span> Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
    <span class="hljs-comment">// Bias gradient, if necessary.</span>
    <span class="hljs-keyword">if</span> (<span class="hljs-keyword">this</span>->bias_term_ && <span class="hljs-keyword">this</span>->param_propagate_down_[<span class="hljs-number">1</span>]) {
      Dtype* bias_diff = <span class="hljs-keyword">this</span>->blobs_[<span class="hljs-number">1</span>]->mutable_cpu_diff();
      <span class="hljs-keyword">for</span> (<span class="hljs-keyword">int</span> n = <span class="hljs-number">0</span>; n < <span class="hljs-keyword">this</span>->num_; ++n) {
        <span class="hljs-keyword">this</span>->backward_cpu_bias(bias_diff, top_diff + top[i]->offset(n));
      }
    }
    <span class="hljs-keyword">if</span> (<span class="hljs-keyword">this</span>->param_propagate_down_[<span class="hljs-number">0</span>] || propagate_down[i]) {
      <span class="hljs-keyword">for</span> (<span class="hljs-keyword">int</span> n = <span class="hljs-number">0</span>; n < <span class="hljs-keyword">this</span>->num_; ++n) {
        <span class="hljs-comment">// gradient w.r.t. weight. Note that we will accumulate diffs.</span>
        <span class="hljs-keyword">if</span> (<span class="hljs-keyword">this</span>->param_propagate_down_[<span class="hljs-number">0</span>]) {
          <span class="hljs-keyword">this</span>->weight_cpu_gemm(bottom_data + bottom[i]->offset(n),
              top_diff + top[i]->offset(n), weight_diff);
        }
        <span class="hljs-comment">// gradient w.r.t. bottom data, if necessary.</span>
        <span class="hljs-keyword">if</span> (propagate_down[i]) {
          <span class="hljs-keyword">this</span>->backward_cpu_gemm(top_diff + top[i]->offset(n), weight,
              bottom_diff + bottom[i]->offset(n));
        }
      }
    }
  }
}

<span class="hljs-preprocessor">#ifdef CPU_ONLY</span>
STUB_GPU(WtfLayer);
<span class="hljs-preprocessor">#<span class="hljs-keyword">endif</span></span>

INSTANTIATE_CLASS(WtfLayer);
REGISTER_LAYER_CLASS(Wtf);

}  <span class="hljs-comment">// namespace caffe</span>
3.  修改proto/caffe.proto文件,找到LayerType,添加WTF,并更新ID。假如说Wtf_Layer有参数,比如Convolution肯定是有参数的,那么添加WtfParameter类
具体添加一下几行
optional WtfParameter wtf_param = <span class="hljs-number">141</span>;
以及
message WtfParameter {
  optional uint32 num_output = <span class="hljs-number">1</span>; <span class="hljs-comment">// The number of outputs for the layer</span>
  optional <span class="hljs-keyword">bool</span> bias_term = <span class="hljs-number">2</span> [<span class="hljs-keyword">default</span> = <span class="hljs-keyword">true</span>]; <span class="hljs-comment">// whether to have bias terms</span>
  <span class="hljs-comment">// Pad, kernel size, and stride are all given as a single value for equal</span>
  <span class="hljs-comment">// dimensions in height and width or as Y, X pairs.</span>
  optional uint32 pad = <span class="hljs-number">3</span> [<span class="hljs-keyword">default</span> = <span class="hljs-number">0</span>]; <span class="hljs-comment">// The padding size (equal in Y, X)</span>
  optional uint32 pad_h = <span class="hljs-number">9</span> [<span class="hljs-keyword">default</span> = <span class="hljs-number">0</span>]; <span class="hljs-comment">// The padding height</span>
  optional uint32 pad_w = <span class="hljs-number">10</span> [<span class="hljs-keyword">default</span> = <span class="hljs-number">0</span>]; <span class="hljs-comment">// The padding width</span>
  optional uint32 kernel_size = <span class="hljs-number">4</span>; <span class="hljs-comment">// The kernel size (square)</span>
  optional uint32 kernel_h = <span class="hljs-number">11</span>; <span class="hljs-comment">// The kernel height</span>
  optional uint32 kernel_w = <span class="hljs-number">12</span>; <span class="hljs-comment">// The kernel width</span>
  optional uint32 group = <span class="hljs-number">5</span> [<span class="hljs-keyword">default</span> = <span class="hljs-number">1</span>]; <span class="hljs-comment">// The group size for group conv</span>
  optional uint32 stride = <span class="hljs-number">6</span> [<span class="hljs-keyword">default</span> = <span class="hljs-number">1</span>]; <span class="hljs-comment">// The stride (equal in Y, X)</span>
  optional uint32 stride_h = <span class="hljs-number">13</span>; <span class="hljs-comment">// The stride height</span>
  optional uint32 stride_w = <span class="hljs-number">14</span>; <span class="hljs-comment">// The stride width</span>
  optional FillerParameter weight_filler = <span class="hljs-number">7</span>; <span class="hljs-comment">// The filler for the weight</span>
  optional FillerParameter bias_filler = <span class="hljs-number">8</span>; <span class="hljs-comment">// The filler for the bias</span>
  <span class="hljs-keyword">enum</span> Engine {
    DEFAULT = <span class="hljs-number">0</span>;
    CAFFE = <span class="hljs-number">1</span>;
    CUDNN = <span class="hljs-number">2</span>;
  }
  optional Engine engine = <span class="hljs-number">15</span> [<span class="hljs-keyword">default</span> = DEFAULT];
}
以及
WTF = 52;
以及
optional WtfParameter wtf_param = 53;


接下来编译通过

如何测试我们写的layer的正确性呢?

1.将某些层的type: "Convolution"  转化为type: "Wtf"  测试Wtf的loss情况下降的和Convolution是否一样

2.有了训练好的model和prototxt,用matlab和python接口测试一下是否达到了卷积的效果。


暂时到这里~等待后来继续添加层

你可能感兴趣的:(如何给20151230版caffe添加新的层 (一))