Caffe 架构学习-底层数据1

前言 shared_ptr智能指针

为了解决C++内存泄漏的问题,C++11引入了智能指针(Smart Pointer)。
C++11提供了三种智能指针:std::shared_ptr, std::unique_ptr, std::weak_ptr,使用时需添加头文件
shared_ptr使用引用计数,每一个shared_ptr的拷贝都指向相同的内存。每使用他一次,内部的引用计数加1,每析构一次,内部的引用计数减1,减为0时,删除所指向的堆内存。shared_ptr内部的引用计数是安全的,但是对象的读取需要加锁。

#include "stdafx.h"
#include 
#include 
#include 

using namespace std;
class Person
{
public:
    Person(int v) {
        value = v;
        std::cout << "Cons" < p1(new Person(1));// Person(1)的引用计数为1

    std::shared_ptr p2 = std::make_shared(2);

    p1.reset(new Person(3));// 首先生成新对象,然后引用计数减1,引用计数为0,故析构Person(1)
                            // 最后将新对象的指针交给智能指针

    std::shared_ptr p3 = p1;//现在p1和p3同时指向Person(3),Person(3)的引用计数为2

    p1.reset();//Person(3)的引用计数为1
    p3.reset();//Person(3)的引用计数为0,析构Person(3)
    return 0;
}
注意,不能将一个原始指针直接赋值给一个智能指针,如下所示,原因是一个是类,一个是指针。
std::shared_ptr p4 = new int(1);// error

获取原始指针
std::shared_ptr p4(new int(5));
int *pInt = p4.get();

SyncedMemory 负责内存同步

size_ :储存空间的大小
head_ :SyncedMemory的当前状态{UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED}
cpu_ptr_ :CPU指针
gpu_ptr_ :GPU指针

SyncedMemory(size_t size) 仅仅初始化size_,不分配内存!
to_cpu()、to_gpu() 真正分配内存,同步CPU和GPU

Blob 基本计算单元

重要的成员
//重要的成员函数
Blob(const int num, const int channels, const int height,const int width);
Reshape(const int num, const int channels, const int height,const int width);
const Dtype* cpu_data() const;
void set_cpu_data(Dtype* data);
const int* gpu_shape() const;
const Dtype* gpu_data() const;
const Dtype* cpu_diff() const;
const Dtype* gpu_diff() const;
Dtype* mutable_cpu_data();
Dtype* mutable_gpu_data();
Dtype* mutable_cpu_diff();
Dtype* mutable_gpu_diff();
void Update();
void FromProto(const BlobProto& proto, bool reshape = true);
/// @brief Compute the sum of absolute values (L1 norm) of the data.
Dtype asum_data() const;
/// @brief Compute the sum of absolute values (L1 norm) of the diff.
Dtype asum_diff() const;
/// @brief Compute the sum of squares (L2 norm squared) of the data.
Dtype sumsq_data() const;
/// @brief Compute the sum of squares (L2 norm squared) of the diff.
Dtype sumsq_diff() const;

//重要成员变量
shared_ptr data_;
shared_ptr diff_;
shared_ptr shape_data_;
vector shape_;
int count_;
int capacity_;

Blob(const int num, const int channels, const int height, const int width) 构造函数

Blob::Blob(const int num, const int channels, const int height,
    const int width)
  // capacity_ must be initialized before calling Reshape
  : capacity_(0) {
  Reshape(num, channels, height, width);
}

调用了void Reshape(const int num, const int channels, const int height,const int width);

Reshape 变维函数,负责申请内存

template 
void Blob::Reshape(const vector& shape) {
  CHECK_LE(shape.size(), kMaxBlobAxes);
  count_ = 1;
  shape_.resize(shape.size());
  if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
    shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
  }
  int* shape_data = static_cast(shape_data_->mutable_cpu_data());
  for (int i = 0; i < shape.size(); ++i) {
    CHECK_GE(shape[i], 0);
    CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
    count_ *= shape[i];
    shape_[i] = shape[i];
    shape_data[i] = shape[i];
  }
  if (count_ > capacity_) {
    capacity_ = count_;
    data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
    diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
  }
}

Reshape 调用了SyncedMemory 进行内存申请登记,申请的长度为capacity_ * sizeof(Dtype),其中capacity_ =count_,注意的是这阶段同时申请了data_和diff_两块内存登记,即前向的结果何梯度的内存但并未真正申请,SyncedMemory 中的head处于UNINITIALIZED未初始化状态。
到此,caffe的内存申请没有真正运行,仅仅是登记了需要的内存的size
Blob中重要的成员函数cpu_data()、mutable_cpu_data()、gpu_data()、mutable_gpu_data()等则是真正申请内存并注明数据所在(cpu还是gpu)

template 
const Dtype* Blob::gpu_data() const {
  CHECK(data_);
  return (const Dtype*)data_->gpu_data();
}

template 
Dtype* Blob::mutable_gpu_data() {
  CHECK(data_);
  return static_cast(data_->mutable_gpu_data());
}

通过返回是否常指针来限制读写权限。
再来分析一下Blob的Update(),用户网络参数的更新,不支持int和unsigned int

// The "update" method is used for parameter blobs in a Net, which are stored
// as Blob or Blob -- hence we do not define it for
// Blob or Blob.
template <> void Blob::Update() { NOT_IMPLEMENTED; }
template <> void Blob::Update() { NOT_IMPLEMENTED; }

template 
void Blob::Update() {
  // We will perform update based on where the data is located.
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    // perform computation on CPU
    caffe_axpy(count_, Dtype(-1),
        static_cast(diff_->cpu_data()),
        static_cast(data_->mutable_cpu_data()));
    break;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    // perform computation on GPU
    caffe_gpu_axpy(count_, Dtype(-1),
        static_cast(diff_->gpu_data()),
        static_cast(data_->mutable_gpu_data()));
#else
    NO_GPU;
#endif
    break;
  default:
    LOG(FATAL) << "Syncedmem not initialized.";
  }
}

最后分析void FromProto(const BlobProto& proto, bool reshape = true);
从磁盘加载之前导出的Blob,仅到内存中

template 
void Blob::FromProto(const BlobProto& proto, bool reshape) {
  if (reshape) {
    vector shape;
    if (proto.has_num() || proto.has_channels() ||
        proto.has_height() || proto.has_width()) {
      // Using deprecated 4D Blob dimensions --
      // shape is (num, channels, height, width).
      shape.resize(4);
      shape[0] = proto.num();
      shape[1] = proto.channels();
      shape[2] = proto.height();
      shape[3] = proto.width();
    } else {
      shape.resize(proto.shape().dim_size());
      for (int i = 0; i < proto.shape().dim_size(); ++i) {
        shape[i] = proto.shape().dim(i);
      }
    }
    Reshape(shape);
  } else {
    CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
  }
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  if (proto.double_data_size() > 0) {
    CHECK_EQ(count_, proto.double_data_size());
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.double_data(i);
    }
  } else {
    CHECK_EQ(count_, proto.data_size());
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.data(i);
    }
  }
  if (proto.double_diff_size() > 0) {
    CHECK_EQ(count_, proto.double_diff_size());
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.double_diff(i);
    }
  } else if (proto.diff_size() > 0) {
    CHECK_EQ(count_, proto.diff_size());
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}

Layer

至少有一个输入Blob(Bottom Blob)和一个输出Blob(Top Blob),部分layer带有权重(weight)和偏置(bais),有两个运算方向:前向和反向
重要的成员

//成员函数 
explicit Layer(const LayerParameter& param);
void SetUp(const vector*>& bottom,const vector*>& top);
virtual void LayerSetUp(const vector*>& bottom,const vector*>& top);
virtual void Reshape(const vector*>& bottom,const vector*>& top) = 0;
inline Dtype Forward(const vector*>& bottom,const vector*>& top);
inline void Backward(const vector*>& top,const vector& propagate_down,const vector*>& bottom);
// Returns the vector of learnable parameter blobs.
vector > >& blobs() 
{
    return blobs_;
}
//Returns the layer parameter.
const LayerParameter& layer_param() const { return layer_param_; }
virtual void Forward_cpu(const vector*>& bottom,const vector*>& top) = 0;
virtual void Forward_gpu(const vector*>& bottom,const vector*>& top)
{
    // LOG(WARNING) << "Using CPU code as backup.";
    return Forward_cpu(bottom, top);
  }

//成员变量
/** The protobuf that stores the layer parameters */
LayerParameter layer_param_;
/** The phase: TRAIN or TEST */
Phase phase_;
/** The vector that stores the learnable parameters as a set of blobs. */
vector > > blobs_;
/** Vector indicating whether to compute the diff of each param blob. */
vector param_propagate_down_;

/** The vector that indicates whether each top blob has a non-zero weight in
*  the objective function. */
vector loss_;

explicit Layer(const LayerParameter& param);
从LayerParameter加载配置,既从model中加载,此过程会申请cpu内存。

void SetUp(const vector>& bottom,const vector>& top);

*@简单实现通用层设置功能。
*
*@PARAM bottom 预输入blob
*@PARAM top
*分配的但不成形的输出斑点,通过整形来成形。
*
*检查底部和顶部斑点的数量是否正确。
*调用LaySt设置为各个层类型做特殊的层设置,
*随后进行整形以设置顶部斑点和内部缓冲区的大小。
*为任何非零损失权重设置损失权重乘数块。
*此方法可能不会被重写。
void SetUp(const vector*>& bottom,const vector*>& top)
 {
    InitMutex();
    CheckBlobCounts(bottom, top);
    LayerSetUp(bottom, top);
    Reshape(bottom, top);
    SetLossWeights(top);
  }

*简短的做层特定的设置:你的层应该实现这个功能以及重塑。
*
*@PARAM bottom
*预先输入的输入blob,其数据字段为此层存储输入数据。
*
*@PARAM top
*分配但不成形的输出top
*
*此方法应进行一次性层特定设置。这包括阅读
*并从<代码> Layer-PARAMM< <代码>中处理相关参数。
*设置顶部块和内部缓冲区的形状应在
*<代码>整形/<代码>,它将在向前传递之前调用。
*调整顶部斑点大小。
virtual void LayerSetUp(const vector*>& bottom,const vector*>& top) {}

layer比较简单,就是具体实现各个层的功能
注意的是
layer_param_ 保存layer参数的protobuffer对象
blobs_ 保存内部权值和偏置项

你可能感兴趣的:(Caffe 架构学习-底层数据1)