奥比中光 Astra Pro 一代(MX400)RGBD 摄像头 彩色RGB及深度采集

主要介绍

起因

毕业设计需要自己采集RGB图像和深度图像,就在咸鱼上面淘到了一个RGBD摄像机,230块钱。在使用过程中走了一些弯路,在这分享以下,为后来人提个醒。文章后面会分享完整的C++代码,文章里面也会介绍到python的方法。


实现方法

数据采集

由于摄像头硬件问题,官方给的软件也无法同时采集深度和彩色图像,所以只能自己写程序。深度图的采集使用OpenNI框架,彩色图像使用OpenCV。
单纯采集深度图和RGB图像可以直接使用python,分别调用opencv库和openni库就行,其中openni库就是C++函数库的python封装。程序可以参考这篇文章,具体代码就自己看吧,openni的包直接可以在奥比中光的官网下载。
python虽然封装的比较号,但是在使用硬件配准的时候,传值总是报错。不知道是不是一代比较垃圾的原因。但是使用C++编写采集程序的时候就可以使用硬件配准。
下面是C++采集图像的完整程序:

//test.cpp
#include 
#include 
#include 
#include 
#include 
#include 
#include "OpenNI.h"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include
#include

using namespace std;
using namespace cv;
using namespace openni;
 
void writeMatToXML(const std::string xmlName, const cv::Mat & mat)
{
    FileStorage fs(xmlName, FileStorage::WRITE);
    fs << "Mat" << mat;
    fs.release();
};

void CheckOpenNIError( Status result, string status )
{ 
	if( result != STATUS_OK ) 
		cerr << status << " Error: " << OpenNI::getExtendedError() << endl;
};
 
class ScenceNumber
{
    // use a file(number.txt) to recoder different scence
    public:

        int scNum;
        string fileName = "./data/number.txt";
        ScenceNumber()
        {
            ifstream f;
            f.open(this->fileName);
            if(f.good())
            {
                stringstream cvStr;
                string tmpStr;
                getline(f, tmpStr);
                cvStr << tmpStr;
                cvStr >> this->scNum;
                f.close();
            }
            else
            {
                ofstream f(this->fileName);
                f << "0" << endl;
                this->scNum = 0;
                f.close();
            }
        }
        string getNum()
        {
            ofstream f(this->fileName);
            stringstream cvStr;
            string tmpStr;
            this->scNum ++;
            cvStr << this->scNum;
            cvStr >> tmpStr;
            f << tmpStr << endl;
            f.close();
            cvStr >> tmpStr;
            return tmpStr;

        }
};

int main( int argc, char** argv )
{
	Status result = STATUS_OK;  
    ScenceNumber ScN;
    string baseFilePath = "./data";
    string filePath;
    char autoFlag = 0;
	//OpenNI2 image
	VideoFrameRef oniDepthImg;
    //VideoFrameRef oniColorImg;
 
	//OpenCV image
	cv::Mat cvDepthImg;
	cv::Mat cvBGRImg;
	cv::Mat cvFusionImg;
	
	cv::namedWindow("depth");
	cv::namedWindow("image");
	cv::namedWindow("fusion");
	char key=0;
 
	//【1】
	// initialize OpenNI2
    result = OpenNI::initialize();
	CheckOpenNIError( result, "initialize context" );  
 
	// open device  
	Device device;
    result = device.open( openni::ANY_DEVICE );

 
	//【2】
	// create depth stream 
    VideoStream oniDepthStream;
    result = oniDepthStream.create( device, openni::SENSOR_DEPTH );
 
	//【3】
	// set depth video mode
    VideoMode modeDepth;
    modeDepth.setResolution( 640, 480 );
    modeDepth.setFps( 30 );
    modeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );
    oniDepthStream.setVideoMode(modeDepth);
	// start depth stream
    result = oniDepthStream.start();
 
	 create color stream
    VideoCapture capture;
    capture.open(0);
    capture.set(3, 640); //set the rgb size
    capture.set(4, 480);
	
//【4】
	set depth and color imge registration mode
	if( device.isImageRegistrationModeSupported(IMAGE_REGISTRATION_DEPTH_TO_COLOR ) )
	{
	device.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );
	}
 

    long numInSc;
	while( key!=27 ) 
	{  
        if (key == 'g')
        {
            //generate the path
            if (not autoFlag)
            {
                filePath = baseFilePath + "/scence" + ScN.getNum();
                mkdir(filePath.c_str(), 0777);
                numInSc = 0;
                autoFlag = 1;
                cout << filePath << endl;
            }
        }
        if (key == 's')
        {
            //generate the path
            if (autoFlag)
            {
                numInSc = 0;
                autoFlag = 0;
                cout << "scence over" << endl;
            }
        }
		// read frame
		if( oniDepthStream.readFrame( &oniDepthImg ) == STATUS_OK )
		{
            capture >> cvBGRImg;
			cv::Mat cvRawImg16U( oniDepthImg.getHeight(), oniDepthImg.getWidth(), CV_16UC1, (void*)oniDepthImg.getData() );
            
			cvRawImg16U.convertTo(cvDepthImg, CV_8U, 255.0/(oniDepthStream.getMaxPixelValue()));
            cv::flip(cvDepthImg, cvDepthImg, 1);
			//【5】
			// convert depth image GRAY to BGR
			cv::cvtColor(cvDepthImg,cvFusionImg,COLOR_GRAY2BGR);
			cv::imshow( "depth", cvDepthImg );
            cv::imshow( "image", cvBGRImg );
            if(autoFlag) //auto take photos
            {
                stringstream cvt;
                string SNumInSc;
                cvt << numInSc;
                cvt >> SNumInSc;
                writeMatToXML(filePath + "/" + SNumInSc + ".xml", cvRawImg16U);
                cv::imwrite(filePath + "/" + SNumInSc + ".jpg", cvBGRImg);
                cout << SNumInSc << " " << numInSc << "  saved" << endl;
                numInSc ++;
            }
		}
		//【6】
		cv::addWeighted(cvBGRImg,0.5,cvFusionImg,0.5,0,cvFusionImg);
		cv::imshow( "fusion", cvFusionImg );
		key = cv::waitKey(100);
	}
    //cv destroy
	cv::destroyWindow("depth");
	cv::destroyWindow("image");
	cv::destroyWindow("fusion");

    //OpenNI2 destroy
    oniDepthStream.destroy();
    capture.release();
    device.close();
    OpenNI::shutdown();

	return 0;
}

CMake文件如下:

# cmake needs this line
cmake_minimum_required(VERSION 3.1)
 
# Enable C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
 
# Define project name
project(data_capture)
 
# Find OpenCV, you may need to set OpenCV_DIR variable
# to the absolute path to the directory containing OpenCVConfig.cmake file
# via the command line or GUI
INCLUDE_DIRECTORIES($ENV{OPENNI2_INCLUDE})
link_directories($ENV{OPENNI2_REDIST})
find_package(OpenCV REQUIRED)
 
# If the package has been found, several variables will
# be set, you can find the full list with descriptions
# in the OpenCVConfig.cmake file.
# Print some message showing some of them
message(STATUS "OpenCV library status:")
message(STATUS "    config: ${OpenCV_DIR}")
message(STATUS "    version: ${OpenCV_VERSION}")
message(STATUS "    libraries: ${OpenCV_LIBS}")
message(STATUS "    include path: ${OpenCV_INCLUDE_DIRS}")
message(STATUS "    include path: $ENV{OPENNI2_INCLUDE}")
 
# Declare the executable target built from your sources
add_executable(data_capture test.cpp)
 
# Link your application with OpenCV libraries
target_link_libraries(data_capture LINK_PRIVATE ${OpenCV_LIBS} libOpenNI2.so)

其中需要注意的是,RGB读取和深度读取的顺序不能调换,否则会出现不同步。这里的深度图保存为.xml格式是为了后面python直接调用。其实可以保存为16bit深度的无损png图片,文件会小很多。(因为开始做的时候不知道,所以浪费了很多是时间。。。)
代码中展示的是完成的采集程序,程序可以以固定频率不间断采集多个不同场景的数据。

配准

配准有软配准和硬件配准两种方式,由于本人比较菜,软件配准方向方向搞错了,所以没搞出来。因此上面程序中device.setImageRegistrationMode(IMAGE_REGISTRATION_DEPTH_TO_COLOR);就是配准操作。这样摄像头就可以直接输出配准后的图像。结果我就不展示了。
据说软件配准做出来的效果更好,大佬您可以试一试。

标定

一代相机虽然参数写入了内部存储,但是,哎!你用SDK读不出来。。。所以只能自己标定。网上有很多方法了,比如opencv,matlab等等。这里推荐的方法是这篇文章。虽然通用性差,但是快,方便就是硬道理。


以上就是我的分享,欢迎批评指正!

你可能感兴趣的:(有点小用,图像处理)