OpenPCDet
├── pcdet
├── tools
├── checkpoints
├── data
│ ├── kitti
│ │ ├── ImageSets
│ │ ├── testing
│ │ │ ├── calib
│ │ │ ├── image_2
│ │ │ ├── velodyne
│ │ ├── training
│ │ │ ├── calib
│ │ │ ├── image_2
│ │ │ ├── label_2
│ │ │ ├── velodyne
# 这一步后会生成下方所示的gt_database(里面是根据label分割好的点云)及.pkl文件
python -m pcdet.datasets.kitti.kitti_dataset create_kitti_infos tools/cfgs/dataset_configs/kitti_dataset.yaml
kitti
├── ImageSets
│ ├── test.txt
│ ├── train.txt
├── testing
│ ├── calib
│ ├── image_2
│ ├── velodyne
├── training
│ ├── calib
│ ├── image_2
│ ├── label_2
│ ├── velodyne
├── gt_database
│ ├── xxxxx.bin
├── kitti_infos_train.pkl
├── kitti_infos_val.pkl
├── kitti_dbinfos_train.pkl
├── kitti_infos_trainval.pkl
cd tools
python train.py --cfg_file cfgs/kitti_models/pointpillar.yaml --batch_size=1 --epochs=10 --workers=1
# 测试生成模型的性能 test.py
python test.py --cfg_file cfgs/kitti_models/pointpillar.yaml --batch_size 1 --ckpt ../output/kitti_models/pointpillar/default/ckpt/checkpoint_epoch_9.pth
# 可视化模型预测效果 demo.py
python demo.py --cfg_file cfgs/kitti_models/pointpillar.yaml --data_path ../data/kitti/testing/velodyne/000001.bin --ckpt ../output/kitti_models/pointpillar/default/ckpt/checkpoint_epoch_9.pth
- tools/cfgs/dataset_configs/kitti_dataset.yaml
- 23行 USE_ROAD_PLANE: true 将true改为False
- pcdet/datasets/augmentor/data_augmentor.py
- 225-228行 注释掉
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
- pcdet/datasets/augmentor/database_sampler.py
- 161-167行 注释掉
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
- 186-189行 注释掉
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
obj_points[:, 2] -= mv_height[idx]
from .custom.custom_dataset import CustomDataset
# 在__all__ = 中增加
'CustomDataset': CustomDataset
custom
├── ImageSets
│ ├── test.txt
│ ├── train.txt
├── testing
│ ├── velodyne
├── training
│ ├── label_2
│ ├── velodyne
├── gt_database
│ ├── xxxxx.bin
├── custom_infos_train.pkl
├── custom_infos_val.pkl
├── custom_dbinfos_train.pkl
python -m pcdet.datasets.custom.custom_dataset create_custom_infos tools/cfgs/dataset_configs/custom_dataset.yaml
python tools/train.py --cfg_file tools/cfgs/custom_models/pointrcnn.yaml --batch_size=1 --epochs=10 --workers=1
下面说一下我训练时遇到的错误避免大家踩坑
一开始测试没有后我检查我的标签数据发现我中间几位的数据是
修正标签数据后重新对数据集进行预处理和训练,gt_database文件夹下生成的.bin文件是对的,也可以进行训练(训练时报错什么difficulty把该行注释后就可以跑了)并且生成对应pth文件,但是测试的时候依旧没有框。
由于一开始我的标签是在3.1中利用PCD文件标注生成的,思考会不会因为这个原因导致测试没有框,于是又重新对所有的.bin点云进行重新标注,结果训练的时候报错ValueError: Caught ValueError in DataLoader worker process 0.把worker值改为0 后就没报这个错了,但还是报错ValueError: Cannot take a larger sample than population when 'replace=False’跟着几个博客改了改还是没能改成功。
更新一下报错ValueError: Cannot take a larger sample than population when 'replace=False’找到解决办法了
# 将pcdet/datasets/processor/data_processor.py大概第161行的
extra_choice = np.random.choice(choice, num_points - len(points), replace=False)
# 修改为以下代码
try:
extra_choice = np.random.choice(choice, num_points - len(points), replace=False)
except ValueError:
extra_choice = np.random.choice(choice, num_points - len(points), replace=True)
# -*- coding:utf-8 -*-
import os
def process(path):
files = os.listdir(path)
file_names = set([file.split('.')[0] for file in files])
file_names = list(file_names)
# 替换字符
print(file_names)
for filename in file_names:
file_data = ''
dir_path = os.path.join(path, filename + ".txt") # TXT文件
# f = open('E:/DataSet/label_2/cloudnorm_00001.txt') # 打开txt文件
print(type(dir_path))
print(dir_path)
f = open(dir_path) # 打开txt文件
line = f.readline() # 以行的形式进行读取文件
list1 = []
while line:
a = line.split()
b = a[0:15] # 这是选取需要读取/修改的列
list1.append(b) # 将其添加在列表之中
line = f.readline()
f.close()
# path_out = dir_path # 新的txt文件
with open(dir_path, 'w+', encoding='utf-8') as f_out:
for i in list1:
# print(len(i))
l0 = i[0]
l1 = i[1]
l2 = i[2]
l3 = i[3]
l4 = i[4]
l5 = i[5]
l6 = i[6]
l7 = i[7]
l8 = i[8]
l9 = i[9]
l10 = i[10]
l11 = i[11]
l12 = i[12]
l13 = i[13]
l14 = i[14]
a=round( (float(i[10])-0.5*float(i[13])),6)
# print(fir)
# print(str(sec))
f_out.write(
l0 + ' ' + l1 + ' ' + l2 + ' ' + l3 + ' ' + l4 + ' ' + l5 + ' ' + l6 + ' ' + l7 + ' ' + l13 + ' ' + l12 + ' ' + l11 + ' '+ l8 + ' ' + l9 +' ' + str(a) + ' ' + l14 + '\n')
print('trans has done!!!')
if __name__ == '__main__':
# 这里路径修改为你自己point-cloud-annotation-tool标注文件路径
path = 'E:/DataSet/text2/'
process(path)
# 将原本的
loc_lidar = np.concatenate([np.array((float(loc_obj[2]), float(-loc_obj[0]), float(loc_obj[1]-2.3)), dtype=np.float32).reshape(1,3) for loc_obj in loc])
return loc_lidar
# 修改为
loc_lidar = np.concatenate(
[np.array((float(loc_obj[0]), float(loc_obj[1]), float(loc_obj[2])), dtype=np.float32).reshape(1, 3) for loc_obj in loc])
return loc_lidar