注意在写的过程中完成label_map_invs,
注意在写的过程中完成label_map_invs,
注意在写的过程中完成label_map_invs,
参考:
https://github.com/PRBonn/semantic-kitti-api/blob/master/remap_semantic_labels.py
使用脚本运行
run.sh
#!/bin/bash
for i in $(seq 11 21)
do
echo "===> start sequence ${i}.";
/home/xiaokeai1/anaconda3/envs/torch1.7/bin/python \
SemanticKITTI_eval.py --device 0 --sequence $i --outDIR submission
echo "===> finish sequence ${i}.";
done
eval.py
'''
Description: 评估kitti测试集, 生成对应的.label文件
Author: suyunzheng
Date: 2022-04-13 10:53:16
LastEditTime: 2022-04-14 08:53:06
LastEditors: maple
'''
import os
import sys
from turtle import pen, shapesize
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import DetCurveDisplay
from tqdm import tqdm
import torch
from yaml import parse
from core import builder
import argparse
import open3d as o3d
import time
from multiprocessing.dummy import Pool as ThreadPool
import logging
from core.models import semantic_kitti
logging.basicConfig(format='%(asctime)s.%(msecs)03d [%(levelname)s] [%(filename)s, line:%(lineno)d] %(message)s',
datefmt='## %Y-%m-%d %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger()
from visualize_model_pred import process_point_cloud
global device
# if torch.cuda.is_available():
# device = 'cuda:0'
# else:
# device = 'cpu'
class kittiEval:
'''
description: 输入root_path会在上一级目录下创建对应的文件夹
param {*}
return {*}
param {*} self
param {*} root_path
'''
def __init__(self, root_path, configs) -> None:
self.feed_dict = None
self.configs = configs
# logger.info("===> Load configs")
# self.root_path:/mnt/software/suyunzheng/data/semantic-kitti/unzip_kitti_dataset
self.root_path = root_path
tmp_list = self.root_path.split("/")[:-1]
# /mnt/software/suyunzheng/data/semantic-kitti
self.output_path = '/'
for head in tmp_list:
self.output_path = os.path.join(self.output_path, head )
# self.output_path:/mnt/software/suyunzheng/data/semantic-kitti/test_output
self.output_path = os.path.join(self.output_path, configs.outDIR, 'sequences')
logger.info("===> label file will be saved at:{}".format(self.output_path))
try:
os.makedirs(self.output_path, exist_ok=False)
except OSError:
logger.error('===> {} already exists'.format(self.output_path))
pass
else:
logger.info("===> mkdirs {}".format(self.output_path))
pass
# 读取一个文件,生成一个.label文件
'''
description:
param {*}
return {*}
param {*} self
param {*} sequence, int
'''
def get_pred(self, sequence):
sequence_str = str(sequence)
velodyne_dir = os.path.join(self.root_path, sequence_str,'velodyne')
input_point_clouds = sorted(os.listdir(velodyne_dir))
# self.output_path:/mnt/software/suyunzheng/data/semantic-kitti/test_output/sequences/11
label_dir = os.path.join(self.output_path, sequence_str)
# self.output_path:/mnt/software/suyunzheng/data
# /semantic-kitti/test_output/sequences/11/predictions
predictions_dir = os.path.join(label_dir, 'predictions')
try:
os.makedirs(predictions_dir, exist_ok=False)
except OSError:
logger.error('===> {} already exists'.format(predictions_dir))
pass
else:
logger.info("===> mkdirs {}".format(predictions_dir))
# 用作线程池
def process(point_cloud_name):
logger.info("===> point_cloud_name:{}".format(point_cloud_name))
point_cloud_name_prue = point_cloud_name.split(".")[-2] # 000000
label_name = f'{point_cloud_name_prue}.label' # 000000.label
pc = np.fromfile(f'{velodyne_dir}/{point_cloud_name}',
dtype=np.float32).reshape(-1, 4)
label = None
feed_dict = process_point_cloud(pc, label)
self.feed_dict = feed_dict
# 加载模型
predictions = self.LoadModel(feed_dict) # [n,] numpy array
logger.info("===> before set(predictions):{}".format(set(predictions)))
# 可视化
# self.visualize(feed_dict['pc'], predictions)
predictions+=1
logger.info("===> after set(predictions):{}".format(set(predictions)))
# 保存
self.saveBinLabel(f'{predictions_dir}/{label_name}', predictions)
from multiprocessing import cpu_count
from pathos.multiprocessing import ProcessingPool as Pool
print("===> CPU number: %d" % cpu_count())
pool = Pool(6)
pool.map(process, input_point_clouds)
pool.close()
pool.join()
'''
description:
param {*}
return {*} predictions array [n,]
param {*} self
param {*} feed_dict
'''
def LoadModel(self, feed_dict):
global device
# model = builder.make_model().to(device)
# logger.info(self.configs)
if self.configs.model == 'base_minkunet':
from core.models.semantic_kitti import BaseMinkUNet
# if 'cr' in configs.model:
# cr = configs.model.cr
# else:
# cr = 1.0
model = BaseMinkUNet(num_classes=19, cr=1.0)
elif self.configs.model == 'AF2S3_only_aff_Net':
from core.models.semantic_kitti.AF2S3_only_aff_Net import AF2S3_only_aff_Net
model = AF2S3_only_aff_Net(num_classes=19)
else:
raise NotImplementedError(self.configs.model)
model = model.to(device)
if self.configs.model == 'base_minkunet':
load_path = 'runs/run-98527c7b-864842b9/checkpoints/step-286980.pt'
elif self.configs.model == 'AF2S3_only_aff_Net':
load_path = 'runs/run-a5168cf1-b19bc802/checkpoints/step-286980.pt'
else:
raise NotImplementedError
state_dict = torch.load(load_path, map_location=device)['model']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`,表面从第7个key值字符取到最后一个字符,正好去掉了module.
new_state_dict[name] = v #新字典的key值对应的value为一一对应的值。
# load params
model.load_state_dict(new_state_dict) # 从新加载这个模型。
model.eval()
with torch.no_grad():
inputs = feed_dict['lidar'].to(device)
outputs = model(inputs)
predictions = outputs.argmax(1).cpu().numpy()
predictions = predictions[feed_dict['inverse_map']]
logger.info("===> Finish model inference.")
torch.cuda.empty_cache()
logger.info("===> predictions.shape:{}".format(predictions.shape)) # array [n,]
logger.info("===> out_pc shape:{}".format(feed_dict['pc'].shape)) # array [n,4]
return predictions
'''
description:将array [n,]写入到bin中, 格式为np.uint32
param {*}
return {*}
param {*} self
param {*} labelFilename
param {*} predictions
'''
def saveBinLabel(self, labelFilename, predictions):
if os.path.exists(labelFilename):
logger.info("===> {} already exist, skip.".format(labelFilename.split("/")[-1]))
return
predictions.astype(np.uint32)
logger.info(predictions.shape)
logger.info(predictions.dtype)
def save_bin(binName, pred):
with open(binName, 'wb+') as binfile :
for i in range(pred.shape[0]):
class_label = pred[i]
class_label = int(class_label)
# logger.info("===> class_label:{}".format(class_label))
# logger.info("===> class_label:{}".format(class_label.type))
content = class_label.to_bytes(4, byteorder = 'little') # 4字节,小端
# logging.info({class_label:content})
# byt4 = (1).to_bytes(4, byteorder = 'big')
# print(byt4)
binfile.write(content )
# label_map_inv
label_map_inv = {
0: 0, # "unlabeled", and others ignored
1: 10, # "car"
2: 11, # "bicycle"
3: 15, # "motorcycle"
4: 18, # "truck"
5: 20, # "other-vehicle"
6: 30, # "person"
7: 31, # "bicyclist"
8: 32, # "motorcyclist"
9: 40, # "road"
10: 44, # "parking"
11: 48, # "sidewalk"
12: 49, # "other-ground"
13: 50, # "building"
14: 51, # "fence"
15: 70, # "vegetation"
16: 71, # "trunk"
17: 72, # "terrain"
18: 80, # "pole"
19: 81 # "traffic-sign"
}
# make lookup table for mapping
maxkey = max(label_map_inv.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(label_map_inv.keys())] = list(label_map_inv.values())
# logger.info("remap_lut:{}".format(remap_lut))
predictions_inv = remap_lut[predictions]
# upper_half = np.zeros((predictions.shape), dtype=np.uint32)
# save_bin(labelFilename, predictions)
predictions_inv.astype(np.uint32)
logger.info("predictions:{}".format(set(predictions)))
logger.info("predictions_inv:{}".format(set(predictions_inv)))
predictions_inv.tofile(labelFilename)
logger.info('===> Save label file:{}'.format(labelFilename))
logger.info("*"*80)
# sem_label = self.loadBinLabel(labelFilename).reshape((-1))
# logger.info("sem_label:{}".format(set(sem_label)))
# self.visualize(self.feed_dict['pc'], sem_label)
pass
def loadBinLabel(self, labelFilename):
logger.info("===> Loading {}".format(labelFilename))
label = np.fromfile(labelFilename, dtype=np.uint32)
label = label.reshape((-1))
logger.info("===> [Load]label:{}".format(set(label)))
sem_label = label & 0xFFFF # 获取低16位信息
logger.info("===> [Load]sem_label :{}".format(set(sem_label)))
# label_map
remapdict = {
0 : 0, # "unlabeled"
1 : 0, # "outlier" mapped to "unlabeled" --------------------------mapped
10: 1, # "car"
11: 2, # "bicycle"
13: 5, # "bus" mapped to "other-vehicle" --------------------------mapped
15: 3, # "motorcycle"
16: 5, # "on-rails" mapped to "other-vehicle" ---------------------mapped
18: 4, # "truck"
20: 5, # "other-vehicle"
30: 6, # "person"
31: 7, # "bicyclist"
32: 8, # "motorcyclist"
40: 9, # "road"
44: 10, # "parking"
48: 11, # "sidewalk"
49: 12, # "other-ground"
50: 13, # "building"
51: 14, # "fence"
52: 0, # "other-structure" mapped to "unlabeled" ------------------mapped
60: 9, # "lane-marking" to "road" ---------------------------------mapped
70: 15, # "vegetation"
71: 16, # "trunk"
72: 17, # "terrain"
80: 18, # "pole"
81: 19, # "traffic-sign"
99: 0, # "other-object" to "unlabeled" ----------------------------mapped
252: 1, # "moving-car" to "car" ------------------------------------mapped
253: 7, # "moving-bicyclist" to "bicyclist" ------------------------mapped
254: 6, # "moving-person" to "person" ------------------------------mapped
255: 8, # "moving-motorcyclist" to "motorcyclist" ------------------mapped
256: 5, # "moving-on-rails" mapped to "other-vehicle" --------------mapped
257: 5, # "moving-bus" mapped to "other-vehicle" -------------------mapped
258: 4, # "moving-truck" to "truck" --------------------------------mapped
259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
}
# make lookup table for mapping
maxkey = max(remapdict.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(remapdict.keys())] = list(remapdict.values())
# logger.info(remap_lut)
sem_label_mapped = remap_lut[sem_label] # do the remapping of semantics
logger.info("===> [Load]sem_label_mapped :{}".format(set(sem_label_mapped)))
sem_label_mapped-=1
return sem_label_mapped
pass
def visualize(self, pc, label):
logger.info("===> visualize...")
cmap = np.array([
[245, 150, 100, 255],
[245, 230, 100, 255],
[150, 60, 30, 255],
[180, 30, 80, 255],
[255, 0, 0, 255],
[30, 30, 255, 255],
[200, 40, 255, 255],
[90, 30, 150, 255],
[255, 0, 255, 255],
[255, 150, 255, 255],
[75, 0, 75, 255],
[75, 0, 175, 255],
[0, 200, 255, 255],
[50, 120, 255, 255],
[0, 175, 0, 255],
[0, 60, 135, 255],
[80, 240, 150, 255],
[150, 240, 255, 255],
[0, 0, 255, 255],
])
cmap = cmap[:, [2, 1, 0, 3]] # convert bgra to rgba
color_dict = {}
for i in range(19):
color_dict[i] = cmap[i, :]
color_dict[255] = [0,0,0,255]
pct = o3d.geometry.PointCloud()
pct.points = o3d.utility.Vector3dVector(pc[:,:-1])
cloud_color = [color_dict[i] for i in list(label)]
color = np.array(cloud_color).reshape((-1,4))[:,:3] / 255
pct.colors = o3d.utility.Vector3dVector(color)
o3d.visualization.draw_geometries([pct], top=0, left=0)
def loadConfig():
parser = argparse.ArgumentParser('Model')
parser.add_argument('--model', type=str, default='AF2S3_only_aff_Net')
parser.add_argument('--sequence', type=int, default=11)
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--outDIR', type=str, default='submission_dir')
args = parser.parse_args()
return args
def main():
global device
args = loadConfig()
if args.device==1:
device = 'cuda:1'
elif args.device == 0:
device = 'cuda:0'
else:
device = 'cpu'
root_path = '/mnt/software/suyunzheng/data/semantic-kitti/sequences'
eval = kittiEval(root_path=root_path, configs=args)
# for sequence in tqdm(range(13, 22)):
# logger.info("===> current sequence:{}".format(sequence))
# eval.get_pred(sequence=sequence)
logger.info("===> current sequence:{}".format(args.sequence))
eval.get_pred(sequence=args.sequence)
if __name__ =='__main__':
main()
zip -r zip_name.zip ./
参考:
https://github.com/PRBonn/semantic-kitti-api/blob/master/validate_submission.py
./validate_submission.py \
/mnt/software/suyunzheng/data/semantic-kitti/submission/submission3.zip \
/mnt/software/suyunzheng/data/semantic-kitti/
https://competitions.codalab.org/competitions