使用RGBD数据集进行点云绘制

本文接上文视觉SLAM14讲使用第八章深度图片进行点云图绘制

https://blog.csdn.net/unlimitedai/article/details/86556508

下载数据集为:https://vision.in.tum.de/data/datasets/rgbd-dataset/download#freiburg1_xyz

程序与其他东西下载:链接: https://pan.baidu.com/s/136gfwTVGNIMCOHSKeFEaUg 提取码: pn6a

补:pcd下载:链接: https://pan.baidu.com/s/1EDKOFnCrIuemSa5oyLkCHw 提取码: bdgx map1是点较多的,比较清晰

******如果你下载我的文件后面几行可以忽略*******

下载解压后使用这个python文件进行处理,生成associate.txt文件。

#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
#  * Redistributions of source code must retain the above copyright
#    notice, this list of conditions and the following disclaimer.
#  * Redistributions in binary form must reproduce the above
#    copyright notice, this list of conditions and the following
#    disclaimer in the documentation and/or other materials provided
#    with the distribution.
#  * Neither the name of TUM nor the names of its
#    contributors may be used to endorse or promote products derived
#    from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements: 
# sudo apt-get install python-argparse

"""
The Kinect provides the color and depth images in an un-synchronized way. This means that the set of time stamps from the color images do not intersect with those of the depth images. Therefore, we need some way of associating color images to depth images.

For this purpose, you can use the ''associate.py'' script. It reads the time stamps from the rgb.txt file and the depth.txt file, and joins them by finding the best matches.
"""

import argparse
import sys
import os
import numpy


def read_file_list(filename):
    """
    Reads a trajectory from a text file. 
    
    File format:
    The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
    and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. 
    
    Input:
    filename -- File name
    
    Output:
    dict -- dictionary of (stamp,data) tuples
    
    """
    file = open(filename)
    data = file.read()
    lines = data.replace(","," ").replace("\t"," ").split("\n") 
    list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
    list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
    return dict(list)

def associate(first_list, second_list,offset,max_difference):
    """
    Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim 
    to find the closest match for every input tuple.
    
    Input:
    first_list -- first dictionary of (stamp,data) tuples
    second_list -- second dictionary of (stamp,data) tuples
    offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
    max_difference -- search radius for candidate generation

    Output:
    matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
    
    """
    first_keys = first_list.keys()
    second_keys = second_list.keys()
    potential_matches = [(abs(a - (b + offset)), a, b) 
                         for a in first_keys 
                         for b in second_keys 
                         if abs(a - (b + offset)) < max_difference]
    potential_matches.sort()
    matches = []
    for diff, a, b in potential_matches:
        if a in first_keys and b in second_keys:
            first_keys.remove(a)
            second_keys.remove(b)
            matches.append((a, b))
    
    matches.sort()
    return matches

if __name__ == '__main__':
    
    # parse command line
    parser = argparse.ArgumentParser(description='''
    This script takes two data files with timestamps and associates them   
    ''')
    parser.add_argument('first_file', help='first text file (format: timestamp data)')
    parser.add_argument('second_file', help='second text file (format: timestamp data)')
    parser.add_argument('--first_only', help='only output associated lines from first file', action='store_true')
    parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
    parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
    args = parser.parse_args()

    first_list = read_file_list(args.first_file)
    second_list = read_file_list(args.second_file)

    matches = associate(first_list, second_list,float(args.offset),float(args.max_difference))    

    if args.first_only:
        for a,b in matches:
            print("%f %s"%(a," ".join(first_list[a])))
    else:
        for a,b in matches:
            print("%f %s %f %s"%(a," ".join(first_list[a]),b-float(args.offset)," ".join(second_list[b])))
            

使用语句为:

python associate.py rgb.txt depth.txt > associate.txt

然后删掉groundtruth.txt文件的前三行。

*************************************************************

最后你的程序目录下是这样的:

使用RGBD数据集进行点云绘制_第1张图片

最后你的data文件夹下是这样的:

使用RGBD数据集进行点云绘制_第2张图片

此时你的cmakelist:

cmake_minimum_required( VERSION 2.8 )

project( test_read )
set( CMAKE_BUILD_TYPE Release )
set( CMAKE_CXX_FLAGS "-std=c++14 -O3" )

# opencv 
find_package( OpenCV REQUIRED )
include_directories( ${OpenCV_INCLUDE_DIRS} )

# eigen 
include_directories( "/usr/include/eigen3/" )

# pcl 
find_package( PCL 1.9 REQUIRED COMPONENT common io )
list (REMOVE_ITEM PCL_LIBRARIES "vtkproj4")
add_executable( test_read main.cpp )

target_link_libraries( test_read ${OpenCV_LIBS} ${PCL_LIBRARIES} )

你的cpp:

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include  
#include   // for formating strings
#include  
#include  
using namespace std;

//格式转换子函数,对读取的图片时间string进行double变换
double string_to_double(string data_string) 
{
  stringstream ss;
  double data_double;
  ss<>data_double;
  return(data_double);
}

int main( int argc, char** argv )
{
    if ( argc != 2 )
    {
        cout<<"usage: path_to_dataset"< colorImgs, depthImgs;    // 彩色图和深度图
    vector> poses;         // 相机位姿
    //标志位,检测到图片时间和机器人位置时间相等时变成1
    int flag;
    for ( int index=0; index<700; index++ )
    {
	flag=0; 
	//读取图像位置信息,并存储
	fin1>>time_rgb>>rgb_file>>time_depth>>depth_file;
	if(index%10==0){
        //几张图片一取呢,10张吧
        color = cv::imread( path_to_dataset+"/"+rgb_file );
	colorImgs.push_back( color );
        depth = cv::imread( path_to_dataset+"/"+depth_file, -1 );
        depthImgs.push_back( depth ); // 使用-1读取原始图像
	//检测图像时间和第几行机器人位置记录时间相等,并将机器人位姿信息保存
	for(;flag==0;)
	{
	  double data[8] = {0};
	  for ( auto& d:data )
	    fin2>>d;
	  //时间相差小于0.01秒认为时间一致
	  if(abs(2*data[0]-string_to_double(time_rgb)-string_to_double(time_depth))<0.01)
	  {
	    cout< PointCloud;
    
    // 新建一个点云
    PointCloud::Ptr pointCloud( new PointCloud ); 
    for ( int i=0; i<70; i++ )//使用几张图片呢,不能超过(index/10)
    {
        cout<<"转换图像中: "< ( v )[u]; // 深度值
                if ( d==0 ) continue; // 为0表示没有测量到
                Eigen::Vector3d point; 
                point[2] = double(d)/depthScale; 
                point[0] = (u-cx)*point[2]/fx;
                point[1] = (v-cy)*point[2]/fy; 
                Eigen::Vector3d pointWorld = T*point;
                
                PointT p ;
                p.x = pointWorld[0];
                p.y = pointWorld[1];
                p.z = pointWorld[2];
                p.b = color.data[ v*color.step+u*color.channels() ];
                p.g = color.data[ v*color.step+u*color.channels()+1 ];
                p.r = color.data[ v*color.step+u*color.channels()+2 ];
                pointCloud->points.push_back( p );
            }
    }
    
    pointCloud->is_dense = false;
    cout<<"点云共有"<size()<<"个点."<

之后cmake一套编译走一走。

你的build下会有一个map.pcd

使用语句打开:

pcl_viewer map.pcd

效果如图,我千元机带不动太多点,你们可以调调参数,就那个(index%10)和后面pcl使用图片数。

 

你可能感兴趣的:(SLAM安装与程序)