**
**
[2021-08-06 19:19:07,489 demo.py 89 INFO] Visualized sample index: 1
(26969, 4)
(1, 25, 8)
libGL error: No matching fbConfigs or visuals found
libGL error: failed to load driver: swrast
libGL error: No matching fbConfigs or visuals found
libGL error: failed to load driver: swrast
ERROR: In /work/standalone-x64-build/VTK-source/Rendering/OpenGL2/vtkXOpenGLRenderWindow.cxx, line 606
vtkXOpenGLRenderWindow (0x55625691ad00): Cannot create GLX context. Aborting.
参考解决方法:解决方法
https://github.com/SoonminHwang/dockers/issues/1
以下是我自己的docker容器生成命令,根据自己的情况修改/usr/lib/x86_64-linux-gnu, /usr/lib/i386-linux-gnu , -v /mnt/data:/mnt, --name openpcd_new以及image
docker run -it -v /mnt/data:/mnt -v /tmp/.X11-unix:/tmp/.X11-unix:rw -v /usr/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu -v /usr/lib/i386-linux-gnu:/usr/lib/i386-linux-gnu --privileged -e DISPLAY=unix$DISPLAY -e GDK_SCALE -e GDK_DPI_SCALE --runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=compute,utility -e NVIDIA_VISIBLE_DEVICES=all --name openpcd_new --device /dev/dri f330897258a1 /bin/bash
1.执行
pip install mayavi
一般可能报错,无法生成轮子,原因是vtk版本不对。对于我的Ubuntu18.04,Python37,vtk使用8.1.2,一般都通用,不行就多试试。
报错如下:
ERROR: Command errored out with exit status 1: /root/anaconda3/envs/openpcd/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-t0e2a8ag/mayavi_fce703ecff17463d9019c2f59b8a78b4/setup.py'"'"'; __file__='"'"'/tmp/pip-install-t0e2a8ag/mayavi_fce703ecff17463d9019c2f59b8a78b4/setup.py'"'"';f = getattr(tokenize, '"'"'open'"'"', open)(__file__) if os.path.exists(__file__) else io.StringIO('"'"'from setuptools import setup; setup()'"'"');code = f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' install --record /tmp/pip-record-opq7bwbl/install-record.txt --single-version-externally-managed --compile --install-headers /root/anaconda3/envs/openpcd/include/python3.7m/mayavi Check the logs for full command output.
2.安装vtk指定版本:
pip install vtk==8.1.2
3.再重新安装,就可以成功。
pip install mayavi
建议直接采用conda安装,pip也可以
conda install pyqt
正常情况就可以直接运行demo进行可视化了。
如果关于qt报错,这部分主要是缺少链接的问题
qt.qpa.plugin: Could not load the Qt platform plugin "xcb" in "" even though it was found. This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem. Available platform plugins
解决方法参考:
https://xiaopengzhen.blog.csdn.net/article/details/108509503
具体方法就是先查看缺少哪些链接,然后安装就行,一般就是缺少下述链接。
sudo apt-get install libxcb-xinerama0
import argparse
import glob
from pathlib import Path
import mayavi.mlab as mlab
import numpy as np
import torch
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.datasets.nuscenes.nuscenes_dataset import NuScenesDataset
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
from visual_utils import visualize_utils as V
class DemoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.root_path = root_path
self.ext = ext
data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]
data_file_list.sort()
self.sample_file_list = data_file_list
def __len__(self):
return len(self.sample_file_list)
def __getitem__(self, index):
if self.ext == '.bin':
points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4)
elif self.ext == '.npy':
points = np.load(self.sample_file_list[index])
else:
raise NotImplementedError
input_dict = {
'points': points,
'frame_id': index,
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
def main():
args, cfg = parse_config()
logger = common_utils.create_logger()
logger.info('-----------------Quick Demo of OpenPCDet-------------------------')
demo_dataset = NuScenesDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(cfg.DATA_CONFIG.DATA_PATH), logger=logger
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
# model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
# model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
# model.cuda()
# model.eval()
for idx, data_dict in enumerate(demo_dataset):
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = demo_dataset.collate_batch([data_dict])
print(data_dict['points'].shape)
print(data_dict['gt_boxes'].shape)
# load_data_to_gpu(data_dict)
# pred_dicts, _ = model.forward(data_dict)
V.draw_scenes(
points=data_dict['points'][:, 1:], ref_boxes=data_dict['gt_boxes'][0],
ref_scores=None, ref_labels=None
)
mlab.show(stop=True)
logger.info('Demo done.')
if __name__ == '__main__':
main()