jetson nano平台上使用ros进行双目csi相机标定

使用jetson nano平台,已经安装了nvidia的官方镜像,并且已经安装好了ros的工作环境。

我的jetson nano上接了两个csi相机,现在要用这两个相机做双目视觉,首先需要进行标定。标定使用ros的camera_calibration(参考:http://wiki.ros.org/camera_calibration/Tutorials/StereoCalibration),我们需要通过ros发布左右相机相应的image_raw话题,具体步骤如下:

1.在我们的ros工作环境中创建一个包

cd ~/catkin_ws/src
catkin_create_pkg csi_cam image_transport cv_bridge
cd ..
catkin_make
source ./devel/setup.bash

我们创建了一个名为csi_cam的包,需要依赖image_transport和cv_bridge两个包。

2.进入csi_cam/src目录,创建源文件dual_csi_topic.cpp,代码如下:

#include 
#include 
#include 
#include 
#include 
#include 

using namespace cv;

std::string gstreamer_pipeline (int sensor_id, int sensor_mode, int capture_width, int capture_height, int display_width, int display_height, int framerate, int flip_method) {
    return "nvarguscamerasrc sensor_id=" + std::to_string(sensor_id) + " sensor_mode=" + std::to_string(sensor_mode) + " ! video/x-raw(memory:NVMM), width=(int)" + std::to_string(capture_width) + ", height=(int)" +
           std::to_string(capture_height) + ", format=(string)NV12, framerate=(fraction)" + std::to_string(framerate) +
           "/1 ! nvvidconv flip-method=" + std::to_string(flip_method) + " ! video/x-raw, width=(int)" + std::to_string(display_width) + ", height=(int)" +
           std::to_string(display_height) + ", format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink";
}

int main(int argc, char** argv) {
    int capture_width = 640 ;
    int capture_height = 480 ;
    int display_width = 640 ;
    int display_height = 480 ;
    int framerate = 60 ;
    int flip_method = 0 ;	

    ros::init(argc, argv, "image_publisher");
    ros::NodeHandle nh;
    image_transport::ImageTransport it(nh);
    image_transport::Publisher pub_left  = it.advertise("csi_stereo/left/image_raw", 1);
    image_transport::Publisher pub_right = it.advertise("csi_stereo/right/image_raw", 1);

    std::string pipeline_left = gstreamer_pipeline(0, 3, capture_width,
	capture_height,
	display_width,
	display_height,
	framerate,
	flip_method);
    std::cout << "Using pipeline_left: \n\t" << pipeline_left << "\n";
    std::string pipeline_right = gstreamer_pipeline(1, 3, capture_width,
	capture_height,
	display_width,
	display_height,
	framerate,
	flip_method);
    std::cout << "Using pipeline_right: \n\t" << pipeline_right << "\n";

    cv::VideoCapture cap_left(pipeline_left, cv::CAP_GSTREAMER);
    if (!cap_left.isOpened()) {
        ROS_INFO("cannot open video device0\n");
        return 1;
    }	    

    cv::VideoCapture cap_right(pipeline_right, cv::CAP_GSTREAMER);
    
    if (!cap_right.isOpened()) {
        ROS_INFO("cannot open video device1\n");
        return 1;
    }
 
    sensor_msgs::ImagePtr msg_left;
    sensor_msgs::ImagePtr msg_right;
    ros::Rate loop_rate(10);//hz
    cv::Mat frameL;
    cv::Mat frameR;
    int count = 0;
    char image_left[200];
    char image_right[200];
    while (nh.ok()) {
    	if (!cap_left.read(frameL)) {
		    std::cout << "Capture left read error" << std::endl;
		    break;
	    }
	    if (!cap_right.read(frameR)) {
		    std::cout << "Capture right read error" << std::endl;
		    break;
	    }

	    ros::Time time_L= ros::Time::now ();
	    ros::Time time_R= ros::Time::now ();
	
        std::cout << "frameR "<<"frameR.cols: "<header.stamp = time_R;  
            pub_right.publish(msg_right);  
        }
	    if (!frameL.empty()) {  
            msg_left = cv_bridge::CvImage(std_msgs::Header(), "bgr8", frameL).toImageMsg(); 
	        msg_left->header.stamp = time_L;
            pub_left.publish(msg_left);  
        }
	
	    ROS_INFO("Publishing csi_stereo/left/image_raw csi_stereo/right/image_raw ROS topic MSG!! ");
        ros::spinOnce();  
        loop_rate.sleep();//与ros::Rate loop_rate相对应

	    int keycode = cv::waitKey(30) & 0xff ; 
        if (keycode == 27) break ;
	    if (keycode == 32) {
		    count++;
		    snprintf(image_left, sizeof(image_left), "./img/left%02d.jpg", count);
		    cv::imwrite(image_left, frameL);
		    snprintf(image_right, sizeof(image_right), "./img/right%02d.jpg", count);
		    cv::imwrite(image_right, frameR);		
	    }
    }
    cap_left.release();
    cap_right.release();	
}

代码添加了通过空格键手动保存左右相机图片对,也可以使用图片对通过matlab来标定。

3.修改CMakeLists.txt文件,增加如下内容

find_package(OpenCV REQUIRED)
include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})

add_executable(dual_csi_cam src/dual_csi_topic.cpp)
target_link_libraries(dual_csi_cam  ${OpenCV_LIBS})
target_link_libraries(dual_csi_cam  ${catkin_LIBRARIES})

4.编译

cd ~/catkin_ws
catkin_make -DCATKIN_WHITELIST_PACKAGES="csi_cam"

这时候一般会出现如下错误:

CMake Error at /opt/ros/melodic/share/cv_bridge/cmake/cv_bridgeConfig.cmake:113 (message):
  Project 'cv_bridge' specifies '/usr/include/opencv' as an include dir,
  which is not found.  It does neither exist as an absolute directory nor in
  '${{prefix}}//usr/include/opencv'.  Check the issue tracker
  'https://github.com/ros-perception/vision_opencv/issues' and consider
  creating a ticket if the problem has not been reported yet.
Call Stack (most recent call first):
  /opt/ros/melodic/share/catkin/cmake/catkinConfig.cmake:76 (find_package)
  csi_cam/CMakeLists.txt:10 (find_package)

这是由于NVIDIA镜像文件把opencv文件命名成了opencv4,这时候只需要把/opt/ros/melodic/share/cv_bridge/cmake/cv_bridgeConfig.cmake文件中所有的/usr/include/opencv修改成/usr/include/opencv4,再重新编译即可。

5.运行节点发布话题

首先打开一个终端运行roscore

再打开另一个终端,运行我们刚才编译的节点

rosrun csi_cam dual_csi_cam

查看话题

rostopic list

6.运行camera_calibration进行校准

rosrun camera_calibration cameracalibrator.py --approximate 0.1 --size 9x6 --square 0.0245 right:=/csi_stereo/right/image_raw left:=/csi_stereo/left/image_raw 

 

你可能感兴趣的:(OpenCV)