视觉SLAM(3)_RGBD相机的点云数据生成

本节展示了如何将rgbd的深度及图像信息转换为点云数据的过程,转换的公式需要一定的相机模型的基础知识,可以参考高翔的《视觉SLAM十四讲》,这里以奥比的一款相机为例,理论都比较简单,理解起来并不困难,这里直接贴上代码:

#include
#include
#include
#include
#include
#include
//pcl
#include
#include
#include

#include
#include
#include
using namespace std;

sensor_msgs::CameraInfo cameraInfo;
Eigen::Matrix3f m_K;
std::string depth_topic_;
std::string rgb_topic_;
std::string camera_info_topic_;
std::string rgbd_frame_id_;
ros::Publisher pc_publisher;
const float camera_factor = 0.001;
void convertToDepthPointCloud(const cv::Mat &depth_pic,const cv::Mat &_nonZeroCoordinates)
{
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
    sensor_msgs::PointCloud2 pub_pointcloud;
    for (int m = 0; m < depth_pic.rows; m++)
    {
        for (int n = 0; n < depth_pic.cols; n++)
        {
            float d = depth_pic.ptr<float>(m)[n]; // 获取深度图中(m,n)处的值
            if(d==0.0)
            {
                continue;
            }
            //注入点云
            pcl::PointXYZRGB point;
            point.z=double(d)*camera_factor;
            point.x=((n-cameraInfo.K.at(2))*point.z/cameraInfo.K.at(0));
            point.y=((m-cameraInfo.K.at(5))*point.z/cameraInfo.K.at(4));
            point.b=depth_pic.data[m*depth_pic.step+n*depth_pic.channels()+0];
            point.g=depth_pic.data[m*depth_pic.step+n*depth_pic.channels()+1];
            point.r=depth_pic.data[m*depth_pic.step+n*depth_pic.channels()+2];
            cloud->points.push_back(point);
        }
    }
    cloud->height=1;
    cloud->width=cloud->points.size();
    cloud->is_dense=false;
    pcl::toROSMsg(*cloud,pub_pointcloud);
    pub_pointcloud.header.frame_id=rgbd_frame_id_;
    pub_pointcloud.header.stamp=ros::Time::now();
    pc_publisher.publish(pub_pointcloud);
}

void PointCloudCallback(const sensor_msgs::ImageConstPtr& _depthImage, const sensor_msgs::ImageConstPtr& _rgbImage)
{
    cv_bridge::CvImageConstPtr rgbImage,depthRaw,depthImage;
    try
    {
        rgbImage=cv_bridge::toCvCopy(_rgbImage,sensor_msgs::image_encodings::BGR8);
    }
    catch(cv_bridge::Exception& ex)
    {
        ROS_ERROR("cv_bridge RGB exception: %s", ex.what());
        return;
    }
    try
    {
        depthImage=cv_bridge::toCvShare(_depthImage,sensor_msgs::image_encodings::TYPE_32FC1);
        depthRaw=cv_bridge::toCvShare(_depthImage,sensor_msgs::image_encodings::TYPE_8UC1);
    }
    catch(cv_bridge::Exception &ex)
    {
        ROS_ERROR("cv_bridge DEPTH exception: %s", ex.what());
        return;
    }
    cv::Mat NonZeroCoordinates;
    cv::findNonZero(depthRaw->image,NonZeroCoordinates);
    convertToDepthPointCloud(depthImage->image,NonZeroCoordinates);
}
int main(int argc,char** argv)
{
    ros::init(argc,argv,"image_listener");
    ros::NodeHandle nh("~");
    nh.param("depth_topic", depth_topic_, std::string("/depth/image_raw"));
    nh.param("rgb_topic", rgb_topic_, std::string("/rgb/image_raw"));
    nh.param("camera_info_topic", camera_info_topic_, std::string("/depth/camera_info"));
    nh.param("rgbd_frame_id",rgbd_frame_id_,std::string("camera_01_depth_optical_frame"));
    try
    {
        ROS_INFO("WAITING FOR CAMERA INFO");
        cameraInfo = *(ros::topic::waitForMessage<sensor_msgs::CameraInfo>(camera_info_topic_, nh));
        m_K <<  cameraInfo.K.at(0), cameraInfo.K.at(1), cameraInfo.K.at(2),
                cameraInfo.K.at(3), cameraInfo.K.at(4), cameraInfo.K.at(5),
                cameraInfo.K.at(6), cameraInfo.K.at(7), cameraInfo.K.at(8);
        std::cout << m_K << std::endl;        
        ROS_INFO("GOT CAMERA INFO");
    }
    catch(std::exception ex)
    {
        ROS_WARN("NO CAMERA INFO");
    }
    message_filters::Subscriber<sensor_msgs::Image> depth_sub(nh, depth_topic_, 1);
    message_filters::Subscriber<sensor_msgs::Image> rgb_sub(nh, rgb_topic_, 1);
    typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image,sensor_msgs::Image> syncPolicy;
    message_filters::Synchronizer<syncPolicy> sync(syncPolicy(10),depth_sub,rgb_sub);
    sync.registerCallback(boost::bind(&PointCloudCallback,_1,_2));
    pc_publisher=nh.advertise<sensor_msgs::PointCloud2>("rgbd_pointcloud",1);
    ros::spin();
}

对应的Launch文件:

<launch>
	<arg name="prefix" value="/camera" />
	<node name="image_listener" pkg="rgbd2pc" type="image_listener" output="screen">	
		<param name="depth_topic" value="$(arg prefix)/depth/image_raw" />
		<param name="camera_info_topic" value="$(arg prefix)/depth/camera_info" />
		<param name="rgb_topic" value="$(arg prefix)/rgb/image_raw" />	
		<param name="rgbd_frame_id" value="camera_depth_optical_frame"/>
	</node>
</launch>

结果:
视觉SLAM(3)_RGBD相机的点云数据生成_第1张图片
学会了这里的方法,以后拿到realsense、奥比也、乐视等RGBD相机,稍微理解一下,直接转就可以了。

你可能感兴趣的:(视觉SLAM,基础)