代码阅读 :SECOND pytorch版本

代码量很大。。。

框架

second.pytorch --------
                 |---images
                 |---second ----|---apex
                 |---torchplus  |---builder
                                |---configs
                                |---core
                                |---data
                                |---framework
                                |---kittiviewer
                                |---protos
                                |---pytorch ------|---builder
                                |---spconv        |---core
                                |---utils         |---models
                                                  |---utils

上述的代码主要工程树,简单说下每个文件夹下面的代码功能:

  • apexspconv是进行second.pytorch安装的第三方依赖库;
  • builder为基础网络的构建基础代码;
  • configs为网络参数配置文件夹;
  • core为基础功能文件夹,包括anchor、box_coder等些实现;
  • data文件夹为数据处理模块;
  • framework文件暂不清楚,好像是测试模块;
  • kittiviewer很显然为可视化功能模块;
  • protos模块读取内部的proto才构成py文件,具体不太清楚(理解后来填坑);
  • pytorch文件夹为second.pytorch的核心,涉及训练、预测、网络等代码;
  • utils为基础功能文件夹;

configs文件夹

car.fhd.config

model: {
  second: {
    network_class_name: "VoxelNet"
    # 体素生成
    voxel_generator {
      point_cloud_range : [0, -40, -3, 70.4, 40, 1]  # 点云范围
      # point_cloud_range : [0, -32.0, -3, 52.8, 32.0, 1]
      voxel_size : [0.05, 0.05, 0.1]  # 体素大小
      max_number_of_points_per_voxel : 5  # 每个体素的最大点数
    }
    # 体素特征提取器
    voxel_feature_extractor: {
      module_class_name: "SimpleVoxel"
      num_filters: [16]
      with_distance: false
      num_input_features: 4
    }
    # 中间特征提取器
    middle_feature_extractor: {
      module_class_name: "SpMiddleFHD"
      # num_filters_down1: [] # protobuf don't support empty list.
      # num_filters_down2: []
      downsample_factor: 8
      num_input_features: 4
    }
    # RPN网络
    rpn: {
      module_class_name: "RPNV2"
      layer_nums: [5]
      layer_strides: [1]
      num_filters: [128]
      upsample_strides: [1]
      num_upsample_filters: [128]
      use_groupnorm: false
      num_groups: 32
      num_input_features: 128
    }
    # 损失函数
    loss: {
      classification_loss: {
        weighted_sigmoid_focal: {
          alpha: 0.25
          gamma: 2.0
          anchorwise_output: true
        }
      }
      localization_loss: {
        weighted_smooth_l1: {
          sigma: 3.0
          code_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
        }
      }
      classification_weight: 1.0
      localization_weight: 2.0
    }
    num_point_features: 4 # model's num point feature should be independent of dataset
    # Outputs
    use_sigmoid_score: true
    encode_background_as_zeros: true
    encode_rad_error_by_sin: true
    sin_error_factor: 1.0

    use_direction_classifier: true # this can help for orientation benchmark
    direction_loss_weight: 0.2 # enough.
    num_direction_bins: 2
    direction_limit_offset: 1

    # Loss
    pos_class_weight: 1.0
    neg_class_weight: 1.0

    loss_norm_type: NormByNumPositives
    # Postprocess
    post_center_limit_range: [0, -40, -2.2, 70.4, 40, 0.8]
    nms_class_agnostic: false # only valid in multi-class nms

    box_coder: {
      ground_box3d_coder: {
        linear_dim: false
        encode_angle_vector: false
      }
    }
    target_assigner: {
      class_settings: {
        anchor_generator_range: {
          sizes: [1.6, 3.9, 1.56] # wlh
          anchor_ranges: [0, -40.0, -1.00, 70.4, 40.0, -1.00] # carefully set z center
          rotations: [0, 1.57] # DON'T modify this unless you are very familiar with my code.
        }
        matched_threshold : 0.6
        unmatched_threshold : 0.45
        class_name: "Car"
        use_rotate_nms: true
        use_multi_class_nms: false
        nms_pre_max_size: 1000
        nms_post_max_size: 100
        nms_score_threshold: 0.3 # 0.4 in submit, but 0.3 can get better hard performance
        nms_iou_threshold: 0.01

        region_similarity_calculator: {
          nearest_iou_similarity: {
          }
        }
      }
      # anchor_generators: {
      #   anchor_generator_stride: {
      #     sizes: [1.6, 3.9, 1.56] # wlh
      #     strides: [0.4, 0.4, 0.0] # if generate only 1 z_center, z_stride will be ignored
      #     offsets: [0.2, -39.8, -1.00] # origin_offset + strides / 2
      #     rotations: [0, 1.57] # DON'T modify this unless you are very familiar with my code.
      #     matched_threshold : 0.6
      #     unmatched_threshold : 0.45
      #   }
      # }
      sample_positive_fraction : -1
      sample_size : 512
      assign_per_class: true
    }
  }
}

#训练输入读取器,原batch_size=8,num_workers=3
train_input_reader: {
  dataset: {
    dataset_class_name: "KittiDataset"
    # kitti_info_path: "/media/yy/960evo/datasets/kitti/kitti_infos_train.pkl"
    # kitti_root_path: "/media/yy/960evo/datasets/kitti"
    kitti_info_path: "/home/cv/文档/datasets/KITTI_PP/kitti_infos_train.pkl"
    kitti_root_path: "/home/cv/文档/datasets/KITTI_PP"
  }

  batch_size: 8
  preprocess: {
    max_number_of_voxels: 17000
    shuffle_points: true
    num_workers: 1
    groundtruth_localization_noise_std: [1.0, 1.0, 0.5]
    # groundtruth_rotation_uniform_noise: [-0.3141592654, 0.3141592654]
    # groundtruth_rotation_uniform_noise: [-1.57, 1.57]
    groundtruth_rotation_uniform_noise: [-0.78539816, 0.78539816]
    global_rotation_uniform_noise: [-0.78539816, 0.78539816]
    global_scaling_uniform_noise: [0.95, 1.05]
    global_random_rotation_range_per_object: [0, 0] # pi/4 ~ 3pi/4
    global_translate_noise_std: [0, 0, 0]
    anchor_area_threshold: -1
    remove_points_after_sample: true
    groundtruth_points_drop_percentage: 0.0
    groundtruth_drop_max_keep_points: 15
    remove_unknown_examples: false
    sample_importance: 1.0
    random_flip_x: false
    random_flip_y: true
    remove_environment: false

    #数据库采样器
    database_sampler {
      # database_info_path: "/media/yy/960evo/datasets/kitti/kitti_dbinfos_train.pkl"
      database_info_path: "/home/cv/文档/datasets/KITTI_PP/kitti_dbinfos_train.pkl"
      sample_groups {
        name_to_max_num {
          key: "Car"
          value: 15
        }
      }
      database_prep_steps {
        filter_by_min_num_points {
          min_num_point_pairs {
            key: "Car"
            value: 5
          }
        }
      }
      database_prep_steps {
        filter_by_difficulty {
          removed_difficulties: [-1]
        }
      }
      global_random_rotation_range_per_object: [0, 0]
      rate: 1.0
    }
  }
}

train_config: {
  optimizer: {
    adam_optimizer: {
      learning_rate: {
        one_cycle: {
          lr_max: 2.25e-3
          moms: [0.95, 0.85]
          div_factor: 10.0
          pct_start: 0.4
        }
      }
      weight_decay: 0.01
    }
    fixed_weight_decay: true
    use_moving_average: false
  }
  # steps: 99040 # 1238 * 120
  # steps: 49520 # 619 * 80
  # steps: 30950 # 619 * 80
  # steps_per_eval: 3095 # 619 * 5
  steps: 23200 # 464 * 50
  steps_per_eval: 2320 # 619 * 5

  save_checkpoints_secs : 1800 # half hour
  save_summary_steps : 10
  enable_mixed_precision: false 
  loss_scale_factor: -1
  clear_metrics_every_epoch: true
}

#测试输入读取器,原batch_size=8,num_workers=3
eval_input_reader: {
  dataset: {
    dataset_class_name: "KittiDataset"
    # kitti_info_path: "/media/yy/960evo/datasets/kitti/kitti_infos_val.pkl"
    # # kitti_info_path: "/media/yy/960evo/datasets/kitti/kitti_infos_test.pkl"
    # kitti_root_path: "/media/yy/960evo/datasets/kitti"

    kitti_info_path: "/home/cv/文档/datasets/KITTI_PP/kitti_infos_val.pkl"
    # kitti_info_path: "/home/cv/文档/datasets/KITTI_PP/kitti_infos_test.pkl"
    kitti_root_path: "/home/cv/文档/datasets/KITTI_PP"
  }
  batch_size: 8
  preprocess: {
    max_number_of_voxels: 40000
    shuffle_points: false
    num_workers: 3
    anchor_area_threshold: -1
    remove_environment: false
  }
}

你可能感兴趣的:(无人驾驶)