/// @brief The network name string name_; /// @brief The phase: TRAIN or TEST Phase phase_; /// @brief Individual layers in the net vector<shared_ptr<Layer<Dtype> > > layers_; vector<string> layer_names_; map<string, int> layer_names_index_; vector<bool> layer_need_backward_;
/// @brief the blobs storing intermediate results between the layer. vector<shared_ptr<Blob<Dtype> > > blobs_;//blobs_存储的是中间结果,是针对整个网络中所有非参数blob而设计的一个变量。我觉得params_存储的也是中间结果 vector<string> blob_names_;//整个网络中,所有非参数blob的name。 map<string, int> blob_names_index_; vector<bool> blob_need_backward_;//整个网络中,所有非参数blob,是否需要backward。注意,这里所说的所有非参数blob其实指的是AppendTop函数中遍历的所有top blob,并不是每一层的top+bottom,因为这一层的top就是下一层的bottom,网络是一层一层堆起来的。
/// bottom_vecs stores the vectors containing the input for each layer. /// They don't actually host the blobs (blobs_ does), so we simply store /// pointers. vector<vector<Blob<Dtype>*> > bottom_vecs_;//存储整个网络所有网络层的bottom blob指针,实际上存储的是前一层的top,因为网络是一层一层堆起来的 vector<vector<int> > bottom_id_vecs_;//存储整个网络所有网络层的bottom blob的ID vector<vector<bool> > bottom_need_backward_;//整个网络所有网络层的bottom blob是否需要backward
/// top_vecs stores the vectors containing the output for each layer vector<vector<Blob<Dtype>*> > top_vecs_;//存储整个网络所有网络层的top blob指针. vector<vector<int> > top_id_vecs_;//存储整个网络所有网络层的top blob的ID.top_id_vecs_中存储的最基本元素是blob_id ——> 每一个新的blob都会赋予其一个blob_id,top_vecs_则与之对应,但是这个blob_id可能是会有重复的(因为in-place)
/// Vector of weight in the loss (or objective) function of each net blob, /// indexed by blob_id. vector<Dtype> blob_loss_weights_;//每次遍历一个layer的时候,都会resize blob_loss_weights_, 然后调用模板类layer的loss函数返回loss_weight vector<vector<int> > param_id_vecs_;//存储的基本元素是net_param_id,每遍历一个参数blob,net_param_id和param_id_vecs_都会更新 vector<int> param_owners_;//param_owners_ 是一个存储parameter "onwer"的一个向量 ——> -1 表示当前Layer就是该parameter的"owner" vector<string> param_display_names_; vector<pair<int, int> > param_layer_indices_;//其元素为当layer_id 与当前param_id 组成的pair.vector<pair<int, int> > param_layer_indices_ map<string, int> param_names_index_;//是整个网络的参数non-empty name与index的映射。注意,这个name是ParamSpec 类型中的name。
/// blob indices for the input and the output of the net整个网络的输入输出blob以及ID vector<int> net_input_blob_indices_; vector<int> net_output_blob_indices_; vector<Blob<Dtype>*> net_input_blobs_; vector<Blob<Dtype>*> net_output_blobs_;
/// The parameters in the network. vector<shared_ptr<Blob<Dtype> > > params_;//整个网络的参数blob。 !!!不管这个参数有没有non-emty name,是否参与share!!! vector<Blob<Dtype>*> learnable_params_;// /** * The mapping from params_ -> learnable_params_: we have * learnable_param_ids_.size() == params_.size(), * and learnable_params_[learnable_param_ids_[i]] == params_[i].get() * if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer * and learnable_params_[learnable_param_ids_[i]] gives its owner. */ vector<int> learnable_param_ids_;
/// the learning rate multipliers for learnable_params_ vector<float> params_lr_; vector<bool> has_params_lr_;
/// the weight decay multipliers for learnable_params_ vector<float> params_weight_decay_; vector<bool> has_params_decay_;
/// The bytes of memory used by this net size_t memory_used_; /// Whether to compute and display debug info for the net. bool debug_info_; /// The root net that actually holds the shared layers in data parallelism const Net* const root_net_;
================================AppendTop函数================================================ // Helper for Net::Init: add a new input or top blob to the net. (Inputs have // layer_id == -1, tops have layer_id >= 0.) template <typename Dtype> void Net<Dtype>::AppendTop(const NetParameter& param, const int layer_id, const int top_id, set<string>* available_blobs, map<string, int>* blob_name_to_idx) { shared_ptr<LayerParameter> layer_param((layer_id >= 0) ? (new LayerParameter(param.layer(layer_id))) : NULL); const string& blob_name = layer_param ? (layer_param->top_size() > top_id ? layer_param->top(top_id) : "(automatic)") : param.input(top_id); // Check if we are doing in-place computation if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && blob_name == layer_param->bottom(top_id)) { // In-place computation LOG_IF(INFO, Caffe::root_solver()) << layer_param->name() << " -> " << blob_name << " (in-place)"; top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); } else if (blob_name_to_idx && blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { // If we are not doing in-place computation but have duplicated blobs, // raise an error. LOG(FATAL) << "Top blob '" << blob_name << "' produced by multiple sources."; } else { // Normal output. if (Caffe::root_solver()) { if (layer_param) { LOG(INFO) << layer_param->name() << " -> " << blob_name; } else { LOG(INFO) << "Input " << top_id << " -> " << blob_name; } } shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>()); const int blob_id = blobs_.size();//blobs只是存储中间结果;每次遍历到一个top blob都会更新blob_id blobs_.push_back(blob_pointer); blob_names_.push_back(blob_name); blob_need_backward_.push_back(false); //blob_name_to_idx是一个局部变量,其实它是在当前layer的top blob 和下一层的bottom blob间起着一个桥梁作用。 //blob_name_to_idx中元素的pair是从网络最开始一层一层搭建的过程中压入map的,其中的name和id都是不重复的。name是关键字——不重复是map数据结构的必然要求,id也是不重复的——0,1,2... //blob_name_to_idx和blobs_一样,在"Normal output"的情形下,每次遍历到一个top blob的时候都会更新 if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; }//添加新元素-->map可以通过下标访问符为(关联)容器添加新元素 if (layer_id == -1) { // Set the (explicitly specified) dimensions of the input blob. if (param.input_dim_size() > 0) { blob_pointer->Reshape(param.input_dim(top_id * 4), param.input_dim(top_id * 4 + 1), param.input_dim(top_id * 4 + 2), param.input_dim(top_id * 4 + 3)); } else { blob_pointer->Reshape(param.input_shape(top_id)); } net_input_blob_indices_.push_back(blob_id); net_input_blobs_.push_back(blob_pointer.get());//当layer_id==-1时,即当前层为输入层的时候,会向net_input_blob_indices_里添加新元素,即add new input blob } else { top_id_vecs_[layer_id].push_back(blob_id); top_vecs_[layer_id].push_back(blob_pointer.get());//当layer_id !=-1时,即当前层不是输入层的时候,会向net_input_blob_indices_里添加新元素,即add new top blob } } if (available_blobs) { available_blobs->insert(blob_name); } }
================================AppendBottom函数================================================ // Helper for Net::Init: add a new bottom blob to the net. template <typename Dtype> int Net<Dtype>::AppendBottom(const NetParameter& param, const int layer_id, const int bottom_id, set<string>* available_blobs, map<string, int>* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '" << layer_param.name() << "', bottom index " << bottom_id << ")"; } const int blob_id = (*blob_name_to_idx)[blob_name];//blob_name_to_idx是一个map,其关键字是不重复的。blob_name_to_idx在输入层初始化过了-->*blob_name_to_idx)[blob_name] = blob_id LOG_IF(INFO, Caffe::root_solver()) << layer_names_[layer_id] << " <- " << blob_name; bottom_vecs_[layer_id].push_back(blobs_[blob_id].get());//调用shared_ptr类的get()方法提取存储在blobs_中的中间变量 bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); bool propagate_down = true; // Check if the backpropagation on bottom_id should be skipped if (layer_param.propagate_down_size() > 0) propagate_down = layer_param.propagate_down(bottom_id); const bool need_backward = blob_need_backward_[blob_id] && propagate_down;//propagate_down为true,则表示参与BP;否则,skip bp bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; }
================================AppendParam函数================================================ template <typename Dtype> void Net<Dtype>::AppendParam(const NetParameter& param, const int layer_id, const int param_id) { const LayerParameter& layer_param = layers_[layer_id]->layer_param();//模板类Layer的layer_param方法,返回Layerparameter类型成员 const int param_size = layer_param.param_size(); string param_name = (param_size > param_id) ? layer_param.param(param_id).name() : ""; if (param_name.size()) { param_display_names_.push_back(param_name);//vector<string> param_display_names_ 这里param_name获取的是PaParamSpec类型中的name成员,如果有name且非空,就把name压入该向量,否则就压入param_id } else { ostringstream param_display_name; param_display_name << param_id; param_display_names_.push_back(param_display_name.str()); } //Append 参数blob 每一次循环,net_param_id和param_id_vecs_都会更新 const int net_param_id = params_.size();//vector<shared_ptr<Blob<Dtype> > > params_--->The parameters in the network,整个网络的参数的id,!!!不管这个参数有没有non-emty name,是否参与share!!! params_.push_back(layers_[layer_id]->blobs()[param_id]);//将当前layer当前"参数blob"压入params_ --->vector<shared_ptr<Blob<Dtype> > > params_ param_id_vecs_[layer_id].push_back(net_param_id);//将整个网络的参数按层的形式来存储,存储的元素可以理解为params_这个向量的下标值(类型为整型) param_layer_indices_.push_back(make_pair(layer_id, param_id));//param_layer_indices_是向量,其元素为当layer_id 与当前param_id 组成的pair.vector<pair<int, int> > param_layer_indices_ //获取每个param_id所对应的Paramspec类型成员,如果param_id >= param_size 则返回default_param_spec。注意param_size <= num_param_blobs ParamSpec default_param_spec; const ParamSpec* param_spec = (layer_param.param_size() > param_id) ? &layer_param.param(param_id) : &default_param_spec; if (!param_size || !param_name.size() || (param_name.size() && param_names_index_.find(param_name) == param_names_index_.end())) { // This layer "owns" this parameter blob -- it is either anonymous // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. // 相反,如果param_name不为空,而且能够在param_names_index_中找到,说明这个parameter已经存在于之前的某个或者某些网络层里,说明这个parameter是共享于多个layer // 在caffe.proto的message ParamSpec里关于name的注释——>To share a parameter between two layers, give it a (non-empty) name, 可见,如果一个parameter是共享与多个网络层,那么它会有一个非空的name param_owners_.push_back(-1);//vector<int> param_owners_ 是一个存储parameter "onwer"的一个向量 ——> -1 表示当前Layer就是该parameter的"owner" //添加param_name if (param_name.size()) { //map<string, int> param_names_index_是整个网络的参数non-empty name与index的映射。 //注意,这个name是ParamSpec 类型中的name,而且,""To share a parameter between two layers, give it a (non-empty) name"",所以说这个map中存储的pair是<会被share的parameter_name, 其对应index> param_names_index_[param_name] = net_param_id;//map<string, int> param_names_index_ 。虽然每一次循环,net_param_id都会更新,但是net_param_id只有当param_name.size()>0时才会被压入向量param_names_index_ } //添加learnable_param const int learnable_param_id = learnable_params_.size();//vector<Blob<Dtype>*> learnable_params_ learnable_params_.push_back(params_[net_param_id].get());//压入learnable parameter ---> 在模板类layer中,定义了一个blobs_成员,其存储的就是learnable parameter。随后压入learnable_param_id learnable_param_ids_.push_back(learnable_param_id);//vector<int> learnable_param_ids_ has_params_lr_.push_back(param_spec->has_lr_mult());//vector<bool> has_params_lr_ has_params_decay_.push_back(param_spec->has_decay_mult()); params_lr_.push_back(param_spec->lr_mult());//vector<float> params_lr_ params_weight_decay_.push_back(param_spec->decay_mult()); } else { // Named param blob with name we've seen before: share params const int owner_net_param_id = param_names_index_[param_name];//因为"To share a parameter between two layers, give it a (non-empty) name",所以这句代码就是获取shared parameter的"owner" net_param_id param_owners_.push_back(owner_net_param_id);//vector<int> param_owners_ const pair<int, int>& owner_index = param_layer_indices_[owner_net_param_id];//只获取了那些shared的parameter,即具有non-empty name的parameter的pair<layer_id, param_id> const int owner_layer_id = owner_index.first; const int owner_param_id = owner_index.second; LOG_IF(INFO, Caffe::root_solver()) << "Sharing parameters '" << param_name << "' owned by " << "layer '" << layer_names_[owner_layer_id] << "', param " << "index " << owner_param_id; Blob<Dtype>* this_blob = layers_[layer_id]->blobs()[param_id].get();//获取当前层的当前参数Blob Blob<Dtype>* owner_blob = layers_[owner_layer_id]->blobs()[owner_param_id].get();//获取owner layer的对应的参数blob const int param_size = layer_param.param_size(); if (param_size > param_id && (layer_param.param(param_id).share_mode() == ParamSpec_DimCheckMode_PERMISSIVE)) { // Permissive dimension checking -- only check counts are the same. CHECK_EQ(this_blob->count(), owner_blob->count()) << "Cannot share param '" << param_name << "' owned by layer '" << layer_names_[owner_layer_id] << "' with layer '" << layer_names_[layer_id] << "'; count mismatch. Owner layer param " << "shape is " << owner_blob->shape_string() << "; sharing layer " << "shape is " << this_blob->shape_string(); } else { // Strict dimension checking -- all dims must be the same. CHECK(this_blob->shape() == owner_blob->shape()) << "Cannot share param '" << param_name << "' owned by layer '" << layer_names_[owner_layer_id] << "' with layer '" << layer_names_[layer_id] << "'; shape mismatch. Owner layer param " << "shape is " << owner_blob->shape_string() << "; sharing layer " << "expects shape " << this_blob->shape_string(); } //获取owner layer的learnable_param_id,并且压入当前layer的向量learnable_param_ids_。 //而且在这里也没有把参数blob压入learnable_params_向量(只是将id压入learnable_param_ids_),从而避免当前layer与sharing layer之间关于shared parameter blob 的重复 const int learnable_param_id = learnable_param_ids_[owner_net_param_id];//vector<int> learnable_param_ids_ ; vector<float> params_lr_; learnable_param_ids_.push_back(learnable_param_id); if (param_spec->has_lr_mult()) { if (has_params_lr_[learnable_param_id]) { CHECK_EQ(param_spec->lr_mult(), params_lr_[learnable_param_id]) << "Shared param '" << param_name << "' has mismatched lr_mult."; } else { has_params_lr_[learnable_param_id] = true; params_lr_[learnable_param_id] = param_spec->lr_mult(); } } if (param_spec->has_decay_mult()) { if (has_params_decay_[learnable_param_id]) { CHECK_EQ(param_spec->decay_mult(), params_weight_decay_[learnable_param_id]) << "Shared param '" << param_name << "' has mismatched decay_mult."; } else { has_params_decay_[learnable_param_id] = true; params_weight_decay_[learnable_param_id] = param_spec->decay_mult(); } } } }
====================================Init函数================================================ template <typename Dtype> void Net<Dtype>::Init(const NetParameter& in_param) { CHECK(Caffe::root_solver() || root_net_) << "root_net_ needs to be set for all non-root solvers"; // Set phase from the state. phase_ = in_param.state().phase(); // Filter layers based on their include/exclude rules and // the current NetState. NetParameter filtered_param; FilterNet(in_param, &filtered_param); LOG_IF(INFO, Caffe::root_solver()) << "Initializing net from parameters: " << std::endl << filtered_param.DebugString(); // Create a copy of filtered_param with splits added where necessary. NetParameter param; InsertSplits(filtered_param, ¶m); // Basically, build all the layers and set up their connections. name_ = param.name(); map<string, int> blob_name_to_idx;//blob_name_to_idx是一个map,其关键字是不重复的 set<string> available_blobs;//available_blobs是一个set,其关键字是不重复的 CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) << "Must specify either input_shape OR deprecated input_dim, not both."; if (param.input_dim_size() > 0) { // Deprecated 4D dimensions. CHECK_EQ(param.input_size() * 4, param.input_dim_size()) << "Incorrect input blob dimension specifications."; } else { CHECK_EQ(param.input_size(), param.input_shape_size()) << "Exactly one input_shape must be specified per input."; } memory_used_ = 0; // set the input blobs for (int input_id = 0; input_id < param.input_size(); ++input_id) { const int layer_id = -1; // inputs have fake layer ID -1 AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx);//available_blobs存储blob name.blob_name_to_idx是如何初始化的????? } // For each layer, set up its input and output bottom_vecs_.resize(param.layer_size()); top_vecs_.resize(param.layer_size()); bottom_id_vecs_.resize(param.layer_size()); param_id_vecs_.resize(param.layer_size()); top_id_vecs_.resize(param.layer_size()); bottom_need_backward_.resize(param.layer_size()); for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { // For non-root solvers, whether this layer is shared from root_net_. bool share_from_root = !Caffe::root_solver() && root_net_->layers_[layer_id]->ShareInParallel(); // Inherit phase from net if unset. if (!param.layer(layer_id).has_phase()) { param.mutable_layer(layer_id)->set_phase(phase_);//实参phase_是网络的phase,为模板类layer设置shape_属性 } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); //检查LayerParameter类型propagate_down成员的个数师傅达标 if (layer_param.propagate_down_size() > 0) { CHECK_EQ(layer_param.propagate_down_size(), layer_param.bottom_size()) << "propagate_down param must be specified " << "either 0 or bottom_size times "; } //Creating Layer if (share_from_root) { LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net"; layers_.push_back(root_net_->layers_[layer_id]); layers_[layer_id]->SetShared(true);//调用的是模板类Layer的SetShared方法 } else { layers_.push_back(LayerRegistry<Dtype>::CreateLayer(layer_param)); } layer_names_.push_back(layer_param.name());//为layer_names_添加新元素 LOG_IF(INFO, Caffe::root_solver()) << "Creating Layer " << layer_param.name(); bool need_backward = false; // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { const int blob_id = AppendBottom(param, layer_id, bottom_id, &available_blobs, &blob_name_to_idx); // If a blob needs backward, this layer should provide it. need_backward |= blob_need_backward_[blob_id];//在遍历所有的bottom_id的过程中,只要有一次使得need_backward为真,则这个for循环结束后,need_backward也为真 } int num_top = layer_param.top_size(); for (int top_id = 0; top_id < num_top; ++top_id) { AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx);//在AppendTop函数中,会为向量blob_need_backward_添加新元素 } // If the layer specifies that AutoTopBlobs() -> true and the LayerParameter // specified fewer than the required number (as specified by // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. Layer<Dtype>* layer = layers_[layer_id].get(); if (layer->AutoTopBlobs()) { const int needed_num_top = std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs()); for (; num_top < needed_num_top; ++num_top) { // Add "anonymous" top blobs -- do not modify available_blobs or // blob_name_to_idx as we don't want these blobs to be usable as input // to other layers. AppendTop(param, layer_id, num_top, NULL, NULL); } } // After this layer is connected, set it up. if (share_from_root) { // Set up size of top blobs using root_net_ const vector<Blob<Dtype>*>& base_top = root_net_->top_vecs_[layer_id]; const vector<Blob<Dtype>*>& this_top = this->top_vecs_[layer_id]; for (int top_id = 0; top_id < base_top.size(); ++top_id) { this_top[top_id]->ReshapeLike(*base_top[top_id]); LOG(INFO) << "Created top blob " << top_id << " (shape: " << this_top[top_id]->shape_string() << ") for shared layer " << layer_param.name(); } } else { layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); } LOG_IF(INFO, Caffe::root_solver()) << "Setting up " << layer_names_[layer_id]; //每次循环,都会更新向量blob_loss_weights for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); } //top_id_vecs_中存储的最基本元素是blob_id ——> 每一个新的blob都会赋予其一个blob_id,但是这个blob_id可能是会有重复的 blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id);//loss函数返回loss_weight ——> 在模板类的SetUp方法中会调用SetLossWeights来设置其私有数据成员loss_,里面存储的其实是loss_weight LOG_IF(INFO, Caffe::root_solver()) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); if (layer->loss(top_id)) { LOG_IF(INFO, Caffe::root_solver()) << " with loss weight " << layer->loss(top_id); } memory_used_ += top_vecs_[layer_id][top_id]->count(); } LOG_IF(INFO, Caffe::root_solver()) << "Memory required for data: " << memory_used_ * sizeof(Dtype); const int param_size = layer_param.param_size(); const int num_param_blobs = layers_[layer_id]->blobs().size(); //param_size是Layermeter类型对象layer_param中ParamSpec param成员的个数, num_param_blobs是一个Layer中learnable parameter blob的个数,param_size <= num_param_blobs CHECK_LE(param_size, num_param_blobs) << "Too many params specified for layer " << layer_param.name(); ParamSpec default_param_spec; for (int param_id = 0; param_id < num_param_blobs; ++param_id) { const ParamSpec* param_spec = (param_id < param_size) ? &layer_param.param(param_id) : &default_param_spec; const bool param_need_backward = param_spec->lr_mult() != 0;//need backward 则为真。 need_backward |= param_need_backward;//由param_need_backward来决定need_backward是否为真,并且,只要有一次遍历使得need_backward为真,则这个for循环结束后,need_backward也为真 layers_[layer_id]->set_param_propagate_down(param_id, param_need_backward);//设定一个Layer的parameter blob 是否需要计算diff backward--->set_param_propagate_down是模板类Layer的方法。 } for (int param_id = 0; param_id < num_param_blobs; ++param_id) { //添加parameter blob,如果当前layer没有parameter blob(num_param_blobs==0),比如RELU,那么就不进入循环,不添加parameter blob //AppendParam只是执行为当前layer添加parameter blob的相关工作,并不会修改与backward的相关属性 AppendParam(param, layer_id, param_id); } // Finally, set the backward flag // 在这里初始化向量layer_need_backward_ layer_need_backward_.push_back(need_backward); //在上述的AppendTop函数中,在遍历当前层的每一个top blob的时候都会将一个false(默认值)压入向量blob_need_backward_。在下面的代码中,如果这个layer need backward,则会更新blob_need_backward_ if (need_backward) { for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; } } } // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. // Also checks if all bottom blobs don't need backward computation (possible // because the skip_propagate_down param) and so we can skip bacward // computation for the entire layer // 需要注意的是,上述代码中关于backward设置的部分,是按照前向的顺序设置的,而下面的代码是按后向顺序修正前向设置的结果。 // 一个layer是否需要backward computation,主要依据两个方面:(1)该layer的top blob 是否参与loss的计算;(2):该layer的bottom blob 是否需要backward computation,比如Data层一般就不需要backward computation set<string> blobs_under_loss; set<string> blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; bool layer_skip_propagate_down = true; //为true,则表示当前layer的bottom blob不需要backward computation,即该层不需要backward computation。 //这个局部变量所表示的意义与caffe.proto里message Layerparameter的propagate_down的定义恰好相反。 for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; } if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { layer_skip_propagate_down = false; } if (layer_contributes_loss && !layer_skip_propagate_down) break;//只是跳出当前if语句 } // If this layer can skip backward computation, also all his bottom blobs // don't need backpropagation if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { layer_need_backward_[layer_id] = false; for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); ++bottom_id) { bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } if (Caffe::root_solver()) { if (layer_need_backward_[layer_id]) { LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; } else { LOG(INFO) << layer_names_[layer_id] << " does not need backward computation."; } } for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();//修正前向设置的结果 ++bottom_id) { if (layer_contributes_loss) { const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; blobs_under_loss.insert(blob_name);//为blobs_under_loss添加新元素 } else { bottom_need_backward_[layer_id][bottom_id] = false; } if (!bottom_need_backward_[layer_id][bottom_id]) { const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; blobs_skip_backp.insert(blob_name);//为blobs_skip_backp添加新元素 } } } // Handle force_backward if needed.Netparameter类型的force_backward方法 if (param.force_backward()) { for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { layer_need_backward_[layer_id] = true; for (int bottom_id = 0; bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { bottom_need_backward_[layer_id][bottom_id] = bottom_need_backward_[layer_id][bottom_id] || layers_[layer_id]->AllowForceBackward(bottom_id); blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] = blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] || bottom_need_backward_[layer_id][bottom_id]; } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { layers_[layer_id]->set_param_propagate_down(param_id, true); } } } // In the end, all remaining blobs are considered output blobs. for (set<string>::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { LOG_IF(INFO, Caffe::root_solver()) << "This network produces output " << *it; net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { blob_names_index_[blob_names_[blob_id]] = blob_id;//第一次使用向量blob_names_index_,逐一添加元素,是一个map } for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { layer_names_index_[layer_names_[layer_id]] = layer_id;//第一次使用向量layer_names_index_,逐一添加元素,是一个map } ShareWeights(); debug_info_ = param.debug_info(); LOG_IF(INFO, Caffe::root_solver()) << "Network initialization done."; }以上几个函数最为主要。
然后看开其他的一些函数:
================================FilterNet函数======================================== FilterNet()给定当前phase/level/stage,移除指定层 template <typename Dtype> void Net<Dtype>::FilterNet(const NetParameter& param, NetParameter* param_filtered) { NetState net_state(param.state()); param_filtered->CopyFrom(param); param_filtered->clear_layer(); for (int i = 0; i < param.layer_size(); ++i) { const LayerParameter& layer_param = param.layer(i); const string& layer_name = layer_param.name(); CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0) << "Specify either include rules or exclude rules; not both."; // If no include rules are specified, the layer is included by default and // only excluded if it meets one of the exclude rules. bool layer_included = (layer_param.include_size() == 0); for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) { if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) { layer_included = false;//如果不包含include,只要meet一个include_size(idx)即可 } } for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) { if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) { layer_included = true;//如果包含include,只要符合一个include_size(idx)即可 } } if (layer_included) { param_filtered->add_layer()->CopyFrom(layer_param); } } } StateMeetsRule()中net的state是否满足NetStaterule ForwardPrefilled()用于前馈预先填满,即预先进行一次前馈。 Forward()把网络输入层的blob读到net_input_blobs_,然后进行前馈,计算出loss。Forward的重载,只是输入层的blob以string的格式传入。 Backward()对整个网络进行反向传播。 Reshape()用于改变每层的尺寸,比如输出的feature map的size Update()更新params_中blob的值。 ShareTrainedLayersWith(Net* other)从Other网络复制某些层 。 CopyTrainedLayersFrom()调用FromProto函数把源层的blob赋给目标层的blob。 ToProto()把网络的参数存入prototxt中。 params_lr()和params_weight_decay()学习速率和权重衰减; blob_by_name()判断是否存在名字为blob_name的blob;