使用Kinect2.0的设备采集数据时深度图和彩色图怎么对齐是一个问题,在解决这一问题的过程中参考了很多方法,最终解决了这一问题,感谢以下两篇文章的作者给与了我解决问题的思路。
第一位: 我找不到他的博客地址了,但还是很感谢这位博主的无私奉献
第二位:https://zhuanlan.zhihu.com/p/33001167
在运行本文代码前请注意以下几点:
在写这篇博客时,本着助人为乐的原则,在这里我加了代码介绍,对代码进行简单介绍,帮助有需要的人理解代码。
IKinectSensor* m_pKinectSensor;
HRESULT hr;
hr = GetDefaultKinectSensor(&m_pKinectSensor);
IMultiSourceFrameReader* m_pMultiFrameReader;
读取的数据主要有彩色、深度以及红外数据,如果想采集指定的数据可以声明不同的读取器比如:
IDepthFrameReader;
IColorFrameReader;
IInfraredFrameReader;
Mat i_rgb(1080, 1920, CV_8UC4);
Mat i_depth(424, 512, CV_8UC1);
Mat i_ir(424, 512, CV_16UC1);
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num) + "_bgra.png", i_rgb);
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num) + "_depth.png", i_depth);
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num) + "_ir.png", i_ir);
这里是本文的重点,关键函数就是
HRESULT STDMETHODCALLTYPE MapDepthFrameToColorSpace(
UINT depthPointCount,
const UINT16 *depthFrameData,
UINT colorPointCount,
ColorSpacePoint *colorSpacePoints);
第一个参数为深度帧里面点的个数,对Kinect2为 512x424
第二个参数为深度帧
第三个参数是输出彩色图的像素个数,也是512x424
第四个参数是二者的映射矩阵
得到映射矩阵后可以根据对应的映射坐标生成彩色对齐图如下图所示:
到此代码主要模块基本介绍完了,下面会贴上全部代码,希望能帮到你。
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
//安全释放指针
template<class Interface>
inline void SafeRelease(Interface*& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
int main()
{
//获取Kinect设备
IKinectSensor* m_pKinectSensor;
HRESULT hr;
hr = GetDefaultKinectSensor(&m_pKinectSensor);
if (FAILED(hr))
{
return hr;
}
//获取多数据源到读取器
IMultiSourceFrameReader* m_pMultiFrameReader; // IDepthFrameReader; IColorFrameReader;
if (m_pKinectSensor)
{
hr = m_pKinectSensor->Open();
if (SUCCEEDED(hr))
{
hr = m_pKinectSensor->OpenMultiSourceFrameReader(
FrameSourceTypes::FrameSourceTypes_Color |
FrameSourceTypes::FrameSourceTypes_Infrared |
FrameSourceTypes::FrameSourceTypes_Depth,
&m_pMultiFrameReader);
}
}
if (!m_pKinectSensor || FAILED(hr))
{
cout << "E_FAIL=" << E_FAIL << endl;
return E_FAIL;
}
//三个数据帧及引用
IDepthFrameReference* m_pDepthFrameReference; //深度
IColorFrameReference* m_pColorFrameReference; //RGB
IInfraredFrameReference* m_pInfraredFrameReference; //红外
IDepthFrame* m_pDepthFrame;
IColorFrame* m_pColorFrame;
IInfraredFrame* m_pInfraredFrame;
//三个图片格式
Mat i_rgb(1080, 1920, CV_8UC4);//注意:这里必须为4通道的图,Kinect的数据只能以Bgra格式传出
Mat i_depth(424, 512, CV_8UC1);
Mat i_ir(424, 512, CV_16UC1);
UINT16* depthData = new UINT16[424 * 512];
IMultiSourceFrame* m_pMultiFrame = nullptr;
int sample_id = 1;
int img_num = 1;
while (true)
{
//获取新的一个多源数据帧
hr = m_pMultiFrameReader->AcquireLatestFrame(&m_pMultiFrame);
if (FAILED(hr) || !m_pMultiFrame)
{
continue;
}
//从多源数据帧中分离出彩色数据,深度数据和红外数据
if (SUCCEEDED(hr))
hr = m_pMultiFrame->get_ColorFrameReference(&m_pColorFrameReference);
if (SUCCEEDED(hr))
hr = m_pColorFrameReference->AcquireFrame(&m_pColorFrame);
if (SUCCEEDED(hr))
hr = m_pMultiFrame->get_DepthFrameReference(&m_pDepthFrameReference);
if (SUCCEEDED(hr))
hr = m_pDepthFrameReference->AcquireFrame(&m_pDepthFrame);
if (SUCCEEDED(hr))
hr = m_pMultiFrame->get_InfraredFrameReference(&m_pInfraredFrameReference);
if (SUCCEEDED(hr))
hr = m_pInfraredFrameReference->AcquireFrame(&m_pInfraredFrame);
UINT nColorBufferSize = 1920 * 1080 * 4;//BGRA拷贝到图片中
if (SUCCEEDED(hr))
hr = m_pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, reinterpret_cast<BYTE*>(i_rgb.data), ColorImageFormat::ColorImageFormat_Bgra);
if (SUCCEEDED(hr))//Depth拷贝到图片中
{
hr = m_pDepthFrame->CopyFrameDataToArray(424 * 512, depthData);
for (int i = 0; i < 512 * 424; i++)
{
//0-255深度图,为了显示明显,只取深度数据的低8位
BYTE intensity = static_cast<BYTE>(depthData[i] % 256);
reinterpret_cast<BYTE*>(i_depth.data)[i] = intensity;
}
}
if (SUCCEEDED(hr))//Infrared拷贝到图片中
{
hr = m_pInfraredFrame->CopyFrameDataToArray(424 * 512, reinterpret_cast<UINT16*>(i_ir.data));
}
//显示图像
imshow("rgb", i_rgb);
if (waitKey(1) == VK_ESCAPE)
break;
imshow("depth", i_depth);
if (waitKey(1) == VK_ESCAPE)
break;
imshow("ir", i_ir);
if (waitKey(1) == VK_ESCAPE)
break;
sample_id += 1;
if (sample_id % 30 == 0)
{
//保存图像
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num) + "_bgra.png", i_rgb);
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num) + "_depth.png", i_depth);
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num) + "_ir.png", i_ir);
cout << "img_num_" << img_num << " has been saved.\n";
img_num += 1;
}
Mat color_img = Mat(424, 512, CV_8UC4); // 创建三通道黑色图像。
vector<ColorSpacePoint> MappingMatrix(512 * 424); //彩色图矩阵,用于与深度图对齐
ICoordinateMapper* pCoordinateMapper;
m_pKinectSensor->get_CoordinateMapper(&pCoordinateMapper);
pCoordinateMapper->MapDepthFrameToColorSpace(512 * 424, depthData, 512 * 424, &MappingMatrix[0]);
for (int i = 0; i < 424 * 512; i++)
{
ColorSpacePoint point = MappingMatrix[i];
if (point.X != -INFINITY && point.Y != -INFINITY)
{
int colorX = static_cast<int>(ceil(point.X));
int colorY = static_cast<int>(ceil(point.Y));
if ((colorX >= 0 && colorX < 1920) && (colorY >= 0 && colorY < 1080))
{
color_img.data[i * 4] = i_rgb.data[(colorY * 1920 + colorX) * 4];
color_img.data[i * 4 + 1] = i_rgb.data[(colorY * 1920 + colorX) * 4 + 1];
color_img.data[i * 4 + 2] = i_rgb.data[(colorY * 1920 + colorX) * 4 + 2];
color_img.data[i * 4 + 3] = i_rgb.data[(colorY * 1920 + colorX) * 4 + 3];
}
}
}
imshow("rgb_matched", color_img);
if (waitKey(1) == VK_ESCAPE)
break
if (sample_id % 30 == 1)
{
//保存图像
imwrite("E:\\CCode\\Line_Segmentation\\File\\img\\" + to_string(img_num - 1) + "_rgb.png", color_img);
}
//释放资源
SafeRelease(m_pColorFrame);
SafeRelease(m_pDepthFrame);
SafeRelease(m_pInfraredFrame);
SafeRelease(m_pColorFrameReference);
SafeRelease(m_pDepthFrameReference);
SafeRelease(m_pInfraredFrameReference);
SafeRelease(m_pMultiFrame);
}
//关闭窗口,设备
cv::destroyAllWindows();
m_pKinectSensor->Close();
return 0;
}