Semantic-Kitti数据解析

1. .bin文件

文件中保存的是二进制格式的四维雷达数据,包括x,y,z,intensity

打开文件:

def load_data_points(points_path):
    # cloud = np.fromfile(points_path, dtype=np.float32).reshape((-1, 4))
    cloud = np.fromfile(points_path, dtype=np.float32)
    return cloud

 2. .label文件

文件中保存对应每个点的label

打开文件:


def load_data_labels(label_path):
    labels = np.fromfile(label_path, dtype=np.uint32).reshape((-1, 1))
    return labels

 3. 我是切割自己数据集时发现坐标轴出现问题,训练的模型无法正确显示其效果,排查发现训练集数据xyz值做了归一化与原始数据不一致,导致应用模型时输入原始数据效果不佳。

重新加载所有训练集数据修正偏移量(我的是原始数据Z值减了1.34),代码如下:

import numpy as np
import argparse
import struct
import math
import os

EXTENSIONS_SCAN = ['.bin']
EXTENSIONS_LABEL = ['.label']


def is_scan(filename):
  return any(filename.endswith(ext) for ext in EXTENSIONS_SCAN)


def is_label(filename):
  return any(filename.endswith(ext) for ext in EXTENSIONS_LABEL)


def getitem(dataset_path, seq_id):
    root = os.path.join(dataset_path, "sequences")

    seq_id = '{0:02d}'.format(int(seq_id))
    print(seq_id)


    scan_files = []
    label_files = []
    for seq in seq_id:
        scan_path = os.path.join(root, seq_id)
        # print("scan_path:{}".format(scan_path))
        label_path = os.path.join(root, seq_id)
      
        scan_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
            os.path.expanduser(scan_path)) for f in fn if is_scan(f)]
        # print("scan_files:{}".format(scan_files))
        
        label_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
            os.path.expanduser(label_path)) for f in fn if is_label(f)]

    scan_files.extend(scan_files)
    label_files.extend(label_files)

    return scan_files, label_files


def load_data_points(points_path):
    # cloud = np.fromfile(points_path, dtype=np.float32).reshape((-1, 4))
    cloud = np.fromfile(points_path, dtype=np.float32)
    # print(cloud.shape)   
    return cloud


def load_data_labels(label_path):
    labels = np.fromfile(label_path, dtype=np.uint32).reshape((-1, 1))
    return labels


def transform(roll, pitch, yaw ,x, y, z):
    Rx = np.array([[1, 0, 0],
                [0, np.cos(roll), -(np.sin(roll))],
                [0, np.sin(roll), np.cos(roll)]])
    Ry = np.array([[np.cos(pitch), 0, np.sin(pitch)],
                [0, 1, 0],
                [-(np.sin(pitch)), 0 ,np.cos(pitch)]])
    Rz = np.array([[np.cos(yaw), -(np.sin(yaw)), 0],
                [np.sin(yaw), np.cos(yaw), 0],
                [0, 0, 1]])
    R = Rz.dot(Ry.dot(Rx))
    t = np.array([x,y,z])
    t = t.reshape(3,1)
    Rt = np. vstack((np.hstack([R, t]), [0,0,0,1]))
    print(Rt)
    return Rt


def rechange_points(points):
    points_num = int(points.size/4)
    for num in range(points_num):
        # print("----test_points_intensity:{}------".format(points[4*num+3]))
        points[4*num+2] += 1.34
        if(points[4*num+3] > 500):
            points[4*num+3] = intensity[num]/1000
        # print("----test_points_intensity:{}------".format(points[4*num+3]))
        
    return points


def WriteFile(points, file_save):
    binfile = open(file_save, 'wb') #二进制写模式
    for wf in range(int(points.size)):
        a = struct.pack('f',points[wf])
        binfile.write(a)
    binfile.close()

if __name__ == "__main__":

    parser = argparse.ArgumentParser("./main.py")
    parser.add_argument(
        '--dataset_path', '-d',
        type=str,
        required=True,
    )
    parser.add_argument(
        '--seq_id', '-s',
        type=str,
        required=True,
    )
    parser.add_argument(
        '--dataset_path_save', '-ds',
        type=str,
        required=True,
    )
    parser.add_argument(
        '--seq_id_save', '-ss',
        type=str,
        required=True,
    )
    FLAGS, unparsed = parser.parse_known_args()

    print(FLAGS.dataset_path_save)

    scan_files, label_files = getitem(FLAGS.dataset_path, FLAGS.seq_id)
    scan_files_save, label_files_save = getitem(FLAGS.dataset_path_save, FLAGS.seq_id_save)
    # print("scan_files:{}".format(scan_files))

    '''
    test
    print(scan_files[0])
    print(scan_files_save[0])
    points = load_data_points(scan_files[0])
    rechange_points(points)
    WriteFile(points,scan_files_save[0])

    '''    

    for file in scan_files:
        points_ = load_data_points(file)
        rechange_points(points_)
        points_all.append(points_)
    
    # write bin
    i = 0
    for file_save in scan_files_save:
        WriteFile(points_all[i],file_save)
        i +=1

使用和rangenet使用数据集格式一致:

python test.py -d /path/to/dataset -s 00 -ds /path/to/save -ss 00

如有错误,望指正

你可能感兴趣的:(深度学习,semantic-kitti,python)