OpenPose训练过程解析(7)

总结

DataLayerSetUp

首先,cpm_data_layer.cpp调用DataLayerSetUp函数设置层参数

template 
void CPMDataLayer::DataLayerSetUp(const vector*>& bottom,
      const vector*>& top)

transformed_label_在这里被Reshape为以下维度,其中num_parts为56

this->transformed_label_.Reshape(1, 2*(num_parts+1), height/stride, width/stride);  //Line:91

load_batch

接下来调用load_batch函数,

template
void CPMDataLayer::load_batch(Batch* batch)

Transform函数未调用

因为 datum.encoded() = false(encoded详见训练过程(4))

if (datum.encoded()) {
      this->cpm_data_transformer_->Transform(cv_img, &(this->transformed_data_));
    }

Transform_nv

调用Transform_nv函数

else {
      this->cpm_data_transformer_->Transform_nv(datum,   //Datum& datum: 应该为读入的数据
        &(this->transformed_data_),  //this->transformed_data_.set_cpu_data(top_data + offset_data);
        &(this->transformed_label_), cnt);  //this->transformed_label_.set_cpu_data(top_label + offset_label);
      ++cnt;  //cnt在batch_size  for循环外初始值为0,for循环内自增1
    }
template void CPMDataTransformer::Transform_nv(const Datum& datum, Dtype* transformed_data, Dtype* transformed_label, int cnt)

ReadMetaData(meta, data, offset3, offset1); //data = datum.data()    Line:514 

ReadMetaData(meta, data, offset3, offset1); 将data中的数据按顺序读入到meta中,类似于genLMDB.py生成的output.txt数据格式


TransformMetaJoints

  TransformMetaJoints(meta);

17个关节点变为18个


JSON_17num_parts.png
void CPMDataTransformer::TransformJoints(Joints& j) {
  //transform joints in meta from np_in_lmdb (specified in prototxt) to np (specified in prototxt)
  Joints jo = j;

  if(np == 56){
    int COCO_to_ours_1[18] = {1,6, 7,9,11, 6,8,10, 13,15,17, 12,14,16, 3,2,5,4};  //17个关节点变为18个
    int COCO_to_ours_2[18] = {1,7, 7,9,11, 6,8,10, 13,15,17, 12,14,16, 3,2,5,4};
    jo.joints.resize(np);
    jo.isVisible.resize(np);
    for(int i=0;i<18;i++){
      jo.joints[i] = (j.joints[COCO_to_ours_1[i]-1] + j.joints[COCO_to_ours_2[i]-1]) * 0.5;
      if(j.isVisible[COCO_to_ours_1[i]-1]==2 || j.isVisible[COCO_to_ours_2[i]-1]==2){
        jo.isVisible[i] = 2;
      }
      else if(j.isVisible[COCO_to_ours_1[i]-1]==3 || j.isVisible[COCO_to_ours_2[i]-1]==3){
        jo.isVisible[i] = 3;
      }
      else {
        jo.isVisible[i] = j.isVisible[COCO_to_ours_1[i]-1] && j.isVisible[COCO_to_ours_2[i]-1];
      }
    }
  }


generateLabelMap

  generateLabelMap(transformed_label, img_aug, meta);
void CPMDataTransformer::generateLabelMap(Dtype* transformed_label, Mat& img_aug, MetaData meta)

放置高斯响应,放置高斯响应函数比较简单(至于transformed_label为什么要从[(np+1) * channelOffset + g_y * grid_x + g_x]开始,是因为在generateLabelMap函数之前,被mask_miss和一个background占了

if (mode > 4){
    for (int g_y = 0; g_y < grid_y; g_y++){
      for (int g_x = 0; g_x < grid_x; g_x++){
        for (int i = 0; i < np; i++){
          float weight = float(mask_miss_aug.at(g_y, g_x)) /255; //mask_miss_aug.at(i, j); 
          if (meta.joint_self.isVisible[i] != 3){
            transformed_labeld[i*channelOffset + g_y*grid_x + g_x] = weight;
          }
        }  
        // background channel
        if(mode == 5){
          transformed_label[np*channelOffset + g_y*grid_x + g_x] = float(mask_miss_aug.at(g_y, g_x)) /255;
        }
        if(mode > 5){
          transformed_label[np*channelOffset + g_y*grid_x + g_x] = 1;
          transformed_label[(2*np+1)*channelOffset + g_y*grid_x + g_x] = float(mask_all_aug.at(g_y, g_x)) /255;
        }
      }
    }
  }
  for (int g_y = 0; g_y < grid_y; g_y++){
    for (int g_x = 0; g_x < grid_x; g_x++){
      for (int i = np+1; i < 2*(np+1); i++){
        if (mode == 6 && i == (2*np + 1))
          continue;
        transformed_label[i*channelOffset + g_y*grid_x + g_x] = 0;
      }
    }
  }
  if (np == 56){
    for (int i = 0; i < 18; i++){
      Point2f center = meta.joint_self.joints[i];
      if(meta.joint_self.isVisible[i] <= 1){
        putGaussianMaps(transformed_label + (i+np+39)*channelOffset, center, param_.stride(), 
                        grid_x, grid_y, param_.sigma()); //self 放置关节点高斯响应
      }
      for(int j = 0; j < meta.numOtherPeople; j++){ //for every other person
        Point2f center = meta.joint_others[j].joints[i];
        if(meta.joint_others[j].isVisible[i] <= 1){
          putGaussianMaps(transformed_label + (i+np+39)*channelOffset, center, param_.stride(), 
                          grid_x, grid_y, param_.sigma());
        }
      }
    }

2×19(PAF)的数组排序

    int mid_1[19] = {2, 9,  10, 2,  12, 13, 2, 3, 4, 3,  2, 6, 7, 6,  2, 1,  1,  15, 16};
    int mid_2[19] = {9, 10, 11, 12, 13, 14, 3, 4, 5, 17, 6, 7, 8, 18, 1, 15, 16, 17, 18};
    int thre = 1;

    for(int i=0;i<19;i++){   // 2×19=38 2×PAF
      Mat count = Mat::zeros(grid_y, grid_x, CV_8UC1);
      Joints jo = meta.joint_self;
      if(jo.isVisible[mid_1[i]-1]<=1 && jo.isVisible[mid_2[i]-1]<=1){
        //putVecPeaks
        putVecMaps(transformed_label + (np+ 1+ 2*i)*channelOffset, transformed_label + (np+ 2+ 2*i)*channelOffset, 
                  count, jo.joints[mid_1[i]-1], jo.joints[mid_2[i]-1], param_.stride(), grid_x, grid_y, param_.sigma(), thre); //self
      } //与COCO对应

      for(int j = 0; j < meta.numOtherPeople; j++){ //for every other person
        Joints jo2 = meta.joint_others[j];
        if(jo2.isVisible[mid_1[i]-1]<=1 && jo2.isVisible[mid_2[i]-1]<=1){
          //putVecPeaks
          putVecMaps(transformed_label + (np+ 1+ 2*i)*channelOffset, transformed_label + (np+ 2+ 2*i)*channelOffset, 
                  count, jo2.joints[mid_1[i]-1], jo2.joints[mid_2[i]-1], param_.stride(), grid_x, grid_y, param_.sigma(), thre); //self
        }
      }
    }

putVecMaps函数用于设置PAF的labels,count初始值为0

Mat count = Mat::zeros(grid_y, grid_x, CV_8UC1);
void CPMDataTransformer::putVecMaps(Dtype* entryX, Dtype* entryY, Mat& count, Point2f centerA, Point2f centerB, int stride, int grid_x, int grid_y, float sigma, int thre)

你可能感兴趣的:(OpenPose训练过程解析(7))