caffe 源码导读(一) 了解protobuf
caffe 源码导读(二) Blob数据结构介绍
caffe 源码导读(三) Blob.hpp头文件解析
caffe 源码导读(四) Blob.cpp解析
caffe源码导读(五)Layer数据结构描述
d===========================================================================d
Layer头文件位于 include/caffe/layer.hpp中,内容以及解析如下
#ifndef CAFFE_LAYER_H_
#define CAFFE_LAYER_H_
#include
#include
#include
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer_factory.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/math_functions.hpp"
/**
Forward declare boost::thread instead of including boost/thread.hpp
to avoid a boost/NVCC issues (#1009, #1010) on OSX.
*/
namespace boost { class mutex; }
namespace caffe {
/**
* @brief An interface for the units of computation which can be composed into a
* Net.
*
* Layer%s must implement a Forward function, in which they take their input
* (bottom) Blob%s (if any) and compute their output Blob%s (if any).
* They may also implement a Backward function, in which they compute the error
* gradients with respect to their input Blob%s, given the error gradients with
* their output Blob%s.
*/
//Layer中三个重要参数:
//1.layer_param_:是protobuf文件中存储的layer参数
//2.blobs_:存储layer的参数,在程序中用的,layer学习到的参数
//3.param_propagate_down_:这个bool表示是否计算各个blob参数的diff,即传播误差
// vector > > blobs_;
template <typename Dtype>
class Layer {
public:
/**
* You should not implement your own constructor. Any set up code should go
* to SetUp(), where the dimensions of the bottom blobs are provided to the
* layer.
*/
// 显式构造函数,从LayerParameter对象中加载配置
explicit Layer(const LayerParameter& param)
: layer_param_(param) {
// Set phase and copy blobs (if there are any).
phase_ = param.phase(); //设置当前阶段
if (layer_param_.blobs_size() > 0) { // 按照layer_param_ 设置本身Blob对象的个数
blobs_.resize(layer_param_.blobs_size()); // 给blob_分配空间
//在初始化列表初始化LayerParameter,
// 之后blobs_这里存放的是一个指向blob类的shared_ptr指针的一个vector,
// 这里是申请空间,然后将出传入的layer_param中的blob拷贝过来
for (int i = 0; i < layer_param_.blobs_size(); ++i) {
//必须先reset一下,不然会报错,为空的,然后在反序列化拷贝
// 如果没有reset,会报/.. Assertion `px != 0' failed.错误
// 具体看例一
blobs_[i].reset(new Blob<Dtype>());
blobs_[i]->FromProto(layer_param_.blobs(i));//从序列化对象中克隆blob_[i]
}
}
}
// 虚析构函数
// 配置函数,实现常用层配置接口,不可被覆盖
virtual ~Layer() {}
/**
* @brief Implements common layer setup functionality.
*
* @param bottom the preshaped input blobs
* @param top
* the allocated but unshaped output blobs, to be shaped by Reshape
*
* Checks that the number of bottom and top blobs is correct.
* Calls LayerSetUp to do special layer setup for individual layer types,
* followed by Reshape to set up sizes of top blobs and internal buffers.
* Sets up the loss weight multiplier blobs for any non-zero loss weights.
* This method may not be overridden.
*/
// 配置函数,实现常用层配置接口,不可被覆盖
void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CheckBlobCounts(bottom, top); // 检查blob
LayerSetUp(bottom, top); // 与层类型相关的配置过程,对每一具体的层做进一步设置
Reshape(bottom, top); // 对Top Blob变形
SetLossWeights(top); // 设置损失权值因子
}
/**
* @brief Does layer-specific setup: your layer should implement this function
* as well as Reshape.
*
* @param bottom
* the preshaped input blobs, whose data fields store the input data for
* this layer
* @param top
* the allocated but unshaped output blobs
*
* This method should do one-time layer specific setup. This includes reading
* and processing relevent parameters from the layer_param_
.
* Setting up the shapes of top blobs and internal buffers should be done in
* Reshape
, which will be called before the forward pass to
* adjust the top blob sizes.
*/
// 层配置(虚函数),做特定类型层相关的配置,有该层自己实现,
// 具体每层实现不一样,主要是输出blob的reshape
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
/**
* @brief Adjust the shapes of top blobs and internal buffers to accommodate
* the shapes of the bottom blobs.
*
* @param bottom the input blobs, with the requested input shapes
* @param top the top blobs, which should be reshaped as needed
*
* This method should reshape top blobs as needed according to the shapes
* of the bottom (input) blobs, as well as reshaping any internal buffers
* and making any other necessary adjustments so that the layer can
* accommodate the bottom blobs.
*/
// 纯虚函数,修改Top Blob以及内部Blob缓冲区的形状
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Given the bottom blobs, compute the top blobs and the loss.
*
* @param bottom
* the input blobs, whose data fields store the input data for this layer
* @param top
* the preshaped output blobs, whose data fields will store this layers'
* outputs
* \return The total loss from the layer.
*
* The Forward wrapper calls the relevant device wrapper function
* (Forward_cpu or Forward_gpu) to compute the top blob values given the
* bottom blobs. If the layer has any non-zero loss_weights, the wrapper
* then computes and returns the loss.
*
* Your layer should implement Forward_cpu and (optionally) Forward_gpu.
*/
// 前向传播函数,给定Bottom Blob,计算Top Blob和loss,返回值为当前层的loss
// 该函数会调用相关设备包装函数,如Forword_cpu或这Forword_gpu来实现真正计算过程
// 如果该层有任意非零loss_weights参数,那么包装函数会计算并返回loss
// 派生类应出现Forward_cpu and (optionally) Forward_gpu.
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Given the top blob error gradients, compute the bottom blob error
* gradients.
*
* @param top
* the output blobs, whose diff fields store the gradient of the error
* with respect to themselves
* @param propagate_down
* a vector with equal length to bottom, with each index indicating
* whether to propagate the error gradients down to the bottom blob at
* the corresponding index
* @param bottom
* the input blobs, whose diff fields will store the gradient of the error
* with respect to themselves after Backward is run
*
* The Backward wrapper calls the relevant device wrapper function
* (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
* top blob diffs.
*
* Your layer should implement Backward_cpu and (optionally) Backward_gpu.
*/
// 反向传播函数,给定Top Blob误差梯度,计算 Bottom Blob误差梯度
// 参数说明:
// top-Top Blob, 其diff域包含来自上一层的误差梯度
// propagate_down, 这个参数和Booton的长度是一样的,每一个index用来指定是否需要反向传播对应的bottom blob
// bootom里面的diff区域存放的是backward 计算出来相应的 gradient error
// 该函数会调用相应设备包装函数, 如Backward_cpu或者Backward_gpu来实现计算过程,有派生类负责实现
inline void Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);
/**
* @brief Returns the vector of learnable parameter blobs.
* 返回内部可训练的权值和偏置项向量
*/
vector<shared_ptr<Blob<Dtype> > >& blobs() {
return blobs_;
}
/**
* @brief Returns the layer parameter.
* 返回layer初始化参数(由ProtoBuffer提供)
*/
const LayerParameter& layer_param() const { return layer_param_; }
/**
* @brief Writes the layer parameter to a protocol buffer
* 将Layer初始化参数写入ProtoBuffer缓冲区
*/
virtual void ToProto(LayerParameter* param, bool write_diff = false);
/**
* @brief Returns the scalar loss associated with a top blob at a given index.
* 返回指定index相关的标量loss值
*/
inline Dtype loss(const int top_index) const {
return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
}
/**
* @brief Sets the loss associated with a top blob at a given index.
* 给定index,设置loss值
*/
inline void set_loss(const int top_index, const Dtype value) {
if (loss_.size() <= top_index) {
loss_.resize(top_index + 1, Dtype(0));
}
loss_[top_index] = value;
}
/**
* @brief Returns the layer type.
* 返回层类型字符串,便于识别,由派生类负责实现
*/
virtual inline const char* type() const { return ""; }
/**
* @brief Returns the exact number of bottom blobs required by the layer,
* or -1 if no exact number is required.
* 返回该Layer需要的输入的Blob数目, -1表示不关心
* This method should be overridden to return a non-negative value if your
* layer expects some exact number of bottom blobs.
* 如果您的图层需要一些确切数量的bottom blobs,则应该重写此方法以返回非负值。
*/
virtual inline int ExactNumBottomBlobs() const { return -1; }
/**
* @brief Returns the minimum number of bottom blobs required by the layer,
* or -1 if no minimum number is required.
* 返回该Layer需要的输入的最小Blob数目
* This method should be overridden to return a non-negative value if your
* layer expects some minimum number of bottom blobs.
*/
virtual inline int MinBottomBlobs() const { return -1; }
/**
* @brief Returns the maximum number of bottom blobs required by the layer,
* or -1 if no maximum number is required.
* 返回该Layer需要的输入的最大Blob数目
* This method should be overridden to return a non-negative value if your
* layer expects some maximum number of bottom blobs.
*/
virtual inline int MaxBottomBlobs() const { return -1; }
/**
* @brief Returns the exact number of top blobs required by the layer,
* or -1 if no exact number is required.
* 需要输出的Blob数目
* This method should be overridden to return a non-negative value if your
* layer expects some exact number of top blobs.
*/
virtual inline int ExactNumTopBlobs() const { return -1; }
/**
* @brief Returns the minimum number of top blobs required by the layer,
* or -1 if no minimum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some minimum number of top blobs.
*/
virtual inline int MinTopBlobs() const { return -1; }
/**
* @brief Returns the maximum number of top blobs required by the layer,
* or -1 if no maximum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some maximum number of top blobs.
*/
virtual inline int MaxTopBlobs() const { return -1; }
/**
* @brief Returns true if the layer requires an equal number of bottom and
* top blobs.
* 返回该Layer是否有相同的输入/输出Blob.由派生类实现
* This method should be overridden to return true if your layer expects an
* equal number of bottom and top blobs.
*/
virtual inline bool EqualNumBottomTopBlobs() const { return false; }
/**
* @brief Return whether "anonymous" top blobs are created automatically
* by the layer.
* 返回是否允许匿名Top Blob,即由该Layer自动创建,如果为真,Net::Init函数中会创建足够多的
* 匿名Top Blob来满足该Layer ExactNumTopBlobs() MinTopBlobs需求
* If this method returns true, Net::Init will create enough "anonymous" top
* blobs to fulfill the requirement specified by ExactNumTopBlobs() or
* MinTopBlobs().
*/
virtual inline bool AutoTopBlobs() const { return false; }
/**
* @brief Return whether to allow force_backward for a given bottom blob
* index.
*
* If AllowForceBackward(i) == false, we will ignore the force_backward
* setting and backpropagate to blob i only if it needs gradient information
* (as is done when force_backward == false).
* 返回某些Bottom Blob是否允许强制反向传播,If AllowForceBackward(i) == false,将会忽略
* force_backward设定,因为有些层不需要梯度信息,后面两个函数分别查看以及设置是否需要计算梯度
*/
virtual inline bool AllowForceBackward(const int bottom_index) const {
return true;
}
/**
* @brief Specifies whether the layer should compute gradients w.r.t. a
* parameter at a particular index given by param_id.
*
* You can safely ignore false values and always compute gradients
* for all parameters, but possibly with wasteful computation.
* 指定该layer是否计算相对权值或偏置项的梯度,具体相对谁由param_id指定
*/
inline bool param_propagate_down(const int param_id) {
return (param_propagate_down_.size() > param_id) ?
param_propagate_down_[param_id] : false;
}
/**
* @brief Sets whether the layer should compute gradients w.r.t. a
* parameter at a particular index given by param_id.
*/
inline void set_param_propagate_down(const int param_id, const bool value) {
if (param_propagate_down_.size() <= param_id) {
param_propagate_down_.resize(param_id + 1, true);
}
param_propagate_down_[param_id] = value;
}
protected:
/** The protobuf that stores the layer parameters */
LayerParameter layer_param_; // 保存Layer参数的ProtoBuffer对象
/** The phase: TRAIN or TEST */
Phase phase_; // Layer当前所处阶段
/** The vector that stores the learnable parameters as a set of blobs. */
vector<shared_ptr<Blob<Dtype> > > blobs_; //Layer内部权值或偏置项,以Blob方式组织
/** Vector indicating whether to compute the diff of each param blob. */
vector<bool> param_propagate_down_; // 标志位,是否计算对应参数的误差梯度
/** The vector that indicates whether each top blob has a non-zero weight in
* the objective function. */
vector<Dtype> loss_; // 标志位,在目标函数中,是否每个Top Blob都有非零权重
// 下面这四个函数,会在各个Layer的派生类中经常看到
/** @brief Using the CPU device, compute the layer output. */
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Using the GPU device, compute the layer output.
* Fall back to Forward_cpu() if unavailable.
*/
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// LOG(WARNING) << "Using CPU code as backup.";
return Forward_cpu(bottom, top);
}
/**
* @brief Using the CPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) = 0;
/**
* @brief Using the GPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
* Fall back to Backward_cpu() if unavailable.
*/
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
// LOG(WARNING) << "Using CPU code as backup.";
Backward_cpu(top, propagate_down, bottom);
}
/**
* Called by the parent Layer's SetUp to check that the number of bottom
* and top Blobs provided as input match the expected numbers specified by
* the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions.
* 检查 输入/输出 Blob数目收满足Layer要求
*/
virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (ExactNumBottomBlobs() >= 0) {
CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
<< type() << " Layer takes " << ExactNumBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MinBottomBlobs() >= 0) {
CHECK_LE(MinBottomBlobs(), bottom.size())
<< type() << " Layer takes at least " << MinBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MaxBottomBlobs() >= 0) {
CHECK_GE(MaxBottomBlobs(), bottom.size())
<< type() << " Layer takes at most " << MaxBottomBlobs()
<< " bottom blob(s) as input.";
}
if (ExactNumTopBlobs() >= 0) {
CHECK_EQ(ExactNumTopBlobs(), top.size())
<< type() << " Layer produces " << ExactNumTopBlobs()
<< " top blob(s) as output.";
}
if (MinTopBlobs() >= 0) {
CHECK_LE(MinTopBlobs(), top.size())
<< type() << " Layer produces at least " << MinTopBlobs()
<< " top blob(s) as output.";
}
if (MaxTopBlobs() >= 0) {
CHECK_GE(MaxTopBlobs(), top.size())
<< type() << " Layer produces at most " << MaxTopBlobs()
<< " top blob(s) as output.";
}
if (EqualNumBottomTopBlobs()) {
CHECK_EQ(bottom.size(), top.size())
<< type() << " Layer produces one top blob as output for each "
<< "bottom blob input.";
}
}
/**
* Called by SetUp to initialize the weights associated with any top blobs in
* the loss function. Store non-zero loss weights in the diff blob.
* 该函数在Layer的SetUp函数中调用,主要目的是初始化与Top Blob相关的loss权重,放到Top Blob的diff域,
* 实际由Forward()计算loss函数
* SetLoss是非常重要的不走,是被Setup调用来初始化top bottom的weights,
* 并且存储非零的Loss Weight在diff blob里面
*/
inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
// 从ProtoBuffer对象中获取Layer参数,这里需要调用loss_weight参数
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) { //如果Protobuffer中存在至少一个loss_weight参数
// loss_weight参数个数应当与Top Blob数目想同,或者不要loss_weight参数
CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
for (int top_id = 0; top_id < top.size(); ++top_id) {
// 从ProtoBuffer对象拿到loss_weight实际值(0或者1)
const Dtype loss_weight = layer_param_.loss_weight(top_id);
// 如果为0
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight); // 本地记录loss_weight的值
const int count = top[top_id]->count();
Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
// 将loss_weight值写入到Top Blob的diff域,传递到其他需要的地方,实现远程同步
caffe_set(count, loss_weight, loss_multiplier);
}
}
}
private:
DISABLE_COPY_AND_ASSIGN(Layer);// 禁止拷贝构造函数和赋值运算函数
}; // class Layer
// Forward and backward wrappers. You should implement the cpu and
// gpu specific implementations instead, and should not change these
// functions.
// 前向函数和反向函数包装,不需要修改这两个函数
// 使用时,只需要在派生类中改写Forword_cpu等
template <typename Dtype>
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype loss = 0;
Reshape(bottom, top); //修改Top Blob以及内部Blob缓冲区的形状
switch (Caffe::mode()) { // 判断计算设备
case Caffe::CPU: //在cpu上执行
Forward_cpu(bottom, top); // 调用cpu版本的Forward函数
//等等,还要计算loss,如有loss
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
// 如果是LossLater,则已经通过Forword函数计算出全局损失函数,放在Top Blob diff域
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights); //计算加权后的loss之和,得到标量loss值
// data[i]*loss_weight[i]之和,函数调用如下,在math_functions中可以看到
/**
* Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) {
return caffe_cpu_strided_dot(n, x, 1, y, 1);
}
template <>
double caffe_cpu_strided_dot(const int n, const double* x,
const int incx, const double* y, const int incy) {
return cblas_ddot(n, x, incx, y, incy);
}
功能: 返回 vector X 和 vector Y 的内积。
incx, incy : 步长,即每隔incx 或 incy 个element 进行操作。
* */
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
return loss;
}
// 反向传播函数,直接调用相应设备函数
template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (Caffe::mode()) {
case Caffe::CPU:
Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU:
Backward_gpu(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
// Serialize LayerParameter to protocol buffer
// 将层配置参数序列化为ProtoBuffer
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i) { //权值和偏项也会保存
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
}
} // namespace caffe
#endif // CAFFE_LAYER_H_
需要注意的是Layer大部分函数并没有实现,只有虚函数,真正实现的都在派生类中,具体代码可以阅读src/caffe/layers/*.cpp
在使用Layer之前,需要包含头文件#include
使用命名空间,如果代码中试图创建Layer对象,会报错,因为Layer是一个虚基类,不能直接创建对象
看如下例子
void test2(){
Blob<float> a;
cout<<"size:"<<a.shape_string()<<endl;
a.Reshape(1,2,3,4);
cout<<"size:"<<a.shape_string()<<endl;
// 上面输出的是
// size:(0)
// size:1 2 3 4 (24)
//一、创建好Blob对象后,可以通过mutable_cpu[gpu]_data[diff]修改内部的数值
float *p = a.mutable_cpu_data();
for(int i=0;i<a.count();i++){ // 将 1 2 3 4 ..写入Blob 对象a中
p[i] = i;
}
//二、BlobProto对象实现了磁盘\内存之间的数据同学.这对于保存\载入训练好的模型权值非常实用
BlobProto bp; //构造一个BlobProto对象
a.ToProto(&bp, true); //将a序列化,连同diff(默认不带)
WriteProtoToBinaryFile(bp, "test.blob"); //写入磁盘文件"test.blob",这个是路径
// 三、试一试reset和FromProto
BlobProto bp2; // 构造一个新的BlobProto对象
ReadProtoFromBinaryFileOrDie("test.blob", &bp2); //读取磁盘
boost::shared_ptr<Blob<float> > b; // 新建立一个新的Blob对象,注意,用了boost中的只能指针
b.reset(new Blob<float>()); //必须先reset一下,不然会报错,为空的,然后在反序列化拷贝
// 如果没有reset,会报/.. Assertion `px != 0' failed.错误
b->FromProto(bp2, true);//反序列化,从序列化对象中克隆b,连同形状一起
for(int n=0;n<b->num();n++){
for(int c=0;c<b->channels();c++){
for(int h=0;h<b->height();h++){
for(int w=0;w<b->width();w++){
cout<<"b["<<n<<"]["<<c<<"]["<<h<<"]["<<w<<"]="<<b->data_at(n,c,h,w)<<endl;
}
}
}
}
}
/**
* test2 输出如下
size:(0)
size:1 2 3 4 (24)
b[0][0][0][0]=0
b[0][0][0][1]=1
b[0][0][0][2]=2
等等
**/