生成的点云一直不太正常,发现深度图和彩色图没有配准
Kinect的SDK的函数貌似无用?
以下为找到的几个配准代码记录:
1.链接:https://blog.csdn.net/shihz_fy/article/details/43602393#commentsedit
/***** Measurement of height by kinect ******/
/***** VisualStudio 2012 (开发工具)
OpenCV2.4.8 (显示界面库 vc11库)
KinectSDK-v2.0-PublicPreview1409-Setup (Kinect SDK驱动版本)
Windows 8.1 (操作系统) ******/
/***** shihz ******/
/***** 2015-2-7 ******/
#include"stdafx.h"
#include "opencv2/opencv.hpp"
#define Y 160
using namespace cv;
// Safe release for interfaces
template
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
//定义Kinect方法类
class Kinect
{
public:
static const int cDepthWidth = 512; //深度图的大小
static const int cDepthHeight = 424;
static const int cColorWidth = 1920; //彩色图的大小
static const int cColorHeight = 1080;
Mat showImageDepth;
HRESULT InitKinect();//初始化Kinect
void UpdateDepth();//更新深度数据
void UpdateColor();//更新深度数据
void ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth); //处理得到的深度图数据
void ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight); //处理得到的彩色图数据
Kinect(); //构造函数
~Kinect(); //析构函数
private:
IKinectSensor* m_pKinectSensor;// Current Kinect
IDepthFrameReader* m_pDepthFrameReader;// Depth reader 在需要的时候可以再添加IColorFrameReader,进行color reader
RGBQUAD* m_pDepthRGBX;
IColorFrameReader* m_pColorFrameReader;// Color reader
RGBQUAD* m_pColorRGBX;
};
int main()
{
Kinect kinect;
Mat showImageColor;
kinect.InitKinect();
while(1)
{
kinect.UpdateColor(); //程序的中心内容,获取数据并显示
kinect.UpdateDepth();
if(waitKey(1) >= 0)//按下任意键退出
{
break;
}
}
return 0;
}
Kinect::Kinect()
{
m_pKinectSensor = NULL;
m_pColorFrameReader = NULL;
m_pDepthFrameReader = NULL;
m_pDepthRGBX = new RGBQUAD[cDepthWidth * cDepthHeight];// create heap storage for color pixel data in RGBX format ,开辟一个动态存储区域
m_pColorRGBX = new RGBQUAD[cColorWidth * cColorHeight];// create heap storage for color pixel data in RGBX format
}
Kinect::~Kinect() //定义析构函数
{
if (m_pDepthRGBX)
{
delete [] m_pDepthRGBX; //删除动态存储区域
m_pDepthRGBX = NULL;
}
SafeRelease(m_pDepthFrameReader);// done with depth frame reader
if (m_pColorRGBX)
{
delete [] m_pColorRGBX;
m_pColorRGBX = NULL;
}
SafeRelease(m_pColorFrameReader);// done with color frame reader
if (m_pKinectSensor)
{
m_pKinectSensor->Close();// close the Kinect Sensor
}
SafeRelease(m_pKinectSensor);
}
HRESULT Kinect::InitKinect() //定义初始化kinect函数
{
HRESULT hr; //typedef long HRESULT
hr = GetDefaultKinectSensor(&m_pKinectSensor); //获取默认的kinect,一般来说只有用一个kinect,所以默认的也就是唯一的那个
if (FAILED(hr)) //Failed这个函数的参数小于0的时候返回true else 返回false
{
return hr;
}
if (m_pKinectSensor)
{
// Initialize the Kinect and get the depth reader
IDepthFrameSource* pDepthFrameSource = NULL;
IColorFrameSource* pColorFrameSource = NULL;
hr = m_pKinectSensor->Open();
if (SUCCEEDED(hr))
{
hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
}
if (SUCCEEDED(hr))
{
hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader); //初始化Depth reader,传入该IDepthReader的地址,让该指针指向深度数据流
}
SafeRelease(pColorFrameSource);
SafeRelease(pDepthFrameSource);
}
if (!m_pKinectSensor || FAILED(hr))
{
printf("No ready Kinect found! \n");
return E_FAIL;
}
return hr;
}
void Kinect::UpdateDepth()
{
if (!m_pDepthFrameReader)
{
return;
}
IDepthFrame* pDepthFrame = NULL;
HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxDistance = 0;
UINT nBufferSize = 0;
UINT16 *pBuffer = NULL;
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
}
if (SUCCEEDED(hr))
{
// In order to see the full range of depth (including the less reliable far field depth)
// we are setting nDepthMaxDistance to the extreme potential depth threshold
nDepthMaxDistance = USHRT_MAX;
// Note: If you wish to filter by reliable depth distance, uncomment the following line.
// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
}
if (SUCCEEDED(hr))
{
hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);
}
if (SUCCEEDED(hr))
{
ProcessDepth( pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
}
SafeRelease(pFrameDescription);
}
SafeRelease(pDepthFrame);
}
void Kinect::UpdateColor()
{
if (!m_pColorFrameReader)
{
return;
}
IColorFrame* pColorFrame = NULL;
HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);
if (SUCCEEDED(hr))
{
IFrameDescription* pFrameDescription = NULL;
int nWidth = 0;
int nHeight = 0;
ColorImageFormat imageFormat = ColorImageFormat_None;
UINT nBufferSize = 0;
RGBQUAD *pBuffer = NULL;
if (SUCCEEDED(hr))
{
hr = pColorFrame->get_FrameDescription(&pFrameDescription);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Width(&nWidth);
}
if (SUCCEEDED(hr))
{
hr = pFrameDescription->get_Height(&nHeight);
}
if (SUCCEEDED(hr))
{
hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
}
if (SUCCEEDED(hr))
{
if (imageFormat == ColorImageFormat_Bgra)
{
hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast(&pBuffer));
}
else if (m_pColorRGBX)
{
pBuffer = m_pColorRGBX;
nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast(pBuffer), ColorImageFormat_Bgra);
}
else
{
hr = E_FAIL;
}
}
if (SUCCEEDED(hr))
{
ProcessColor(pBuffer, nWidth, nHeight);
}
SafeRelease(pFrameDescription);
}
SafeRelease(pColorFrame);
}
void Kinect::ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth )
{
// Make sure we've received valid data
if (m_pDepthRGBX && pBuffer && (nWidth == cDepthWidth) && (nHeight == cDepthHeight))
{
RGBQUAD* pRGBX = m_pDepthRGBX;
// end pixel is start + width*height - 1
const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);
while (pBuffer < pBufferEnd)
{
USHORT depth = *pBuffer;
// To convert to a byte, we're discarding the most-significant
// rather than least-significant bits.
// We're preserving detail, although the intensity will "wrap."
// Values outside the reliable depth range are mapped to 0 (black).
// Note: Using conditionals in this loop could degrade performance.
// Consider using a lookup table instead when writing production code.
BYTE intensity = static_cast((depth >= nMinDepth) && (depth <= nMaxDepth) ? (depth % 256) : 0); //深度数据由黑白图像显示//,每256个单位是一个有黑到白的周期
pRGBX->rgbRed = intensity;
pRGBX->rgbGreen = intensity;
pRGBX->rgbBlue = intensity;
++pRGBX;
++pBuffer;
}
// Draw the data with OpenCV
Mat DepthImage(nHeight, nWidth, CV_8UC4, m_pDepthRGBX);
Mat show = DepthImage.clone();
resize(DepthImage, show, Size(cDepthWidth*1.437 , cDepthHeight*1.437));
showImageDepth = show.clone();
imshow("DepthImage", show);
}
}
void Kinect::ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight)
{
// Make sure we've received valid data
if (pBuffer && (nWidth == cColorWidth) && (nHeight == cColorHeight))
{
// Draw the data with OpenCV
Mat ColorImage(nHeight, nWidth, CV_8UC4, pBuffer);
Mat showImage = ColorImage.clone();
resize(ColorImage, showImage, Size(nWidth/2 , nHeight/2));
Rect rect(145,0,702,538);
int x = 33,y =-145;
//CV_8UC4
for(int i = 0;i <540;i++)
for(int j = 145;j < 960-114;j++)
showImage.at(i,j) = showImageDepth.at(i+x,j+y)*0.6+showImage.at(i,j)*0.4;
Mat image_roi = showImage(rect);
//imshow("ColorImage", showImage);//imshow("ColorImage", ColorImage);
imshow("Image_Roi", image_roi);
}
}
另外一份直接生成点云的代码,
链接:https://blog.csdn.net/u010848251/article/details/70992345
//#define vtkRenderingCore_AUTOINIT 4(vtkInteractionStyle,vtkRenderingFreeType,vtkRenderingFreeType,vtkRenderingOpenGL)
//#define vtkRenderingVolume_AUTOINIT 1(vtkRenderingVolumeOpenGL)
#include
#include
#include
#include
#include
#include //PCL的PCD格式文件的输入输出头文件
#include //PCL对各种格式的点的支持头文件
#include
#include
#include
#include
#include //ICP(iterative closest point)配准
#include //pcl控制台解析
//kd树
#include
//特征提取
#include
#include
//重构
#include
#include
#include
#include
using namespace cv;
using namespace std;
typedef pcl::PointXYZRGBA MyPointDataType;
// 安全释放指针
template
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
string num2str(int i)
{
stringstream ss;
ss << i;
return ss.str();
}
int main()
{
// 获取Kinect设备
IKinectSensor* m_pKinectSensor;
HRESULT hr;
hr = GetDefaultKinectSensor(&m_pKinectSensor);
if (FAILED(hr))
{
return hr;
}
IMultiSourceFrameReader* m_pMultiFrameReader;
if (m_pKinectSensor)
{
hr = m_pKinectSensor->Open();
if (SUCCEEDED(hr))
{
// 获取多数据源到读取器
hr = m_pKinectSensor->OpenMultiSourceFrameReader(
FrameSourceTypes::FrameSourceTypes_Color |
FrameSourceTypes::FrameSourceTypes_Infrared |
FrameSourceTypes::FrameSourceTypes_Depth,
&m_pMultiFrameReader);
}
}
if (!m_pKinectSensor || FAILED(hr))
{
return E_FAIL;
}
UINT16 *depthData = new UINT16[424 * 512];//用于存储深度图数据
Mat i_rgb(1080, 1920, CV_8UC4);
Mat i_depthWrite(424, 512, CV_16UC1);
UINT nColorBufferSize = 1920 * 1080 * 4;
// 三个数据帧及引用
IDepthFrameReference* m_pDepthFrameReference = nullptr;
IColorFrameReference* m_pColorFrameReference = nullptr;
IDepthFrame* m_pDepthFrame = nullptr;
IColorFrame* m_pColorFrame = nullptr;
IMultiSourceFrame* m_pMultiFrame = nullptr;
ICoordinateMapper* m_pCoordinateMapper = nullptr;
int count = 0;
while (count <= 30)
{
//Sleep(5000);
while (true)
{
// 获取新的一个多源数据帧
hr = m_pMultiFrameReader->AcquireLatestFrame(&m_pMultiFrame);
if (FAILED(hr) || !m_pMultiFrame)
{
continue;
}
break;
}
// 从多源数据帧中分离出彩色数据,深度数据
if (SUCCEEDED(hr))
hr = m_pMultiFrame->get_DepthFrameReference(&m_pDepthFrameReference);
if (SUCCEEDED(hr))
hr = m_pDepthFrameReference->AcquireFrame(&m_pDepthFrame);
if (SUCCEEDED(hr))
hr = m_pMultiFrame->get_ColorFrameReference(&m_pColorFrameReference);
if (SUCCEEDED(hr))
hr = m_pColorFrameReference->AcquireFrame(&m_pColorFrame);
hr = m_pKinectSensor->get_CoordinateMapper(&m_pCoordinateMapper);
if (SUCCEEDED(hr))
hr = m_pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, i_rgb.data, ColorImageFormat::ColorImageFormat_Bgra);
// 定义相关变量
pcl::PointCloud::Ptr cloud(new pcl::PointCloud);
pcl::PointCloud::Ptr cloud_filtered(new pcl::PointCloud);
//初始化点云数据PCD文件头
cloud->width = 512 * 424;
cloud->height = 1;
cloud->is_dense = false;
cloud->points.resize(cloud->width * cloud->height);
if (SUCCEEDED(hr))
{
hr = m_pDepthFrame->CopyFrameDataToArray(424 * 512, depthData);
hr = m_pDepthFrame->CopyFrameDataToArray(424 * 512, reinterpret_cast(i_depthWrite.data));
imwrite("depth_" + num2str(count) + ".png", i_depthWrite);
CameraSpacePoint* m_pCameraCoordinates = new CameraSpacePoint[512 * 424];
hr = m_pCoordinateMapper->MapDepthFrameToCameraSpace(512 * 424, depthData, 512 * 424, m_pCameraCoordinates);
ColorSpacePoint* m_pColorCoordinates = new ColorSpacePoint[512 * 424];
hr = m_pCoordinateMapper->MapDepthFrameToColorSpace(512 * 424, depthData, 512 * 424, m_pColorCoordinates);
for (int i = 0; i < 512 * 424; i++)
{
//------写入RGB------
ColorSpacePoint colorP = m_pColorCoordinates[i];
if (colorP.X != -std::numeric_limits::infinity() && colorP.Y != -std::numeric_limits::infinity())
{
int colorX = static_cast(colorP.X + 0.5f);
int colorY = static_cast(colorP.Y + 0.5f);
if ((colorX >= 0 && colorX < 1920) && (colorY >= 0 && colorY < 1080))
{
cloud->points[i].b = i_rgb.data[(colorY * 1920 + colorX) * 4];
cloud->points[i].g = i_rgb.data[(colorY * 1920 + colorX) * 4 + 1];
cloud->points[i].r = i_rgb.data[(colorY * 1920 + colorX) * 4 + 2];
}
}
//------写入XYZ------
CameraSpacePoint cameraP = m_pCameraCoordinates[i];
if (cameraP.X != -std::numeric_limits::infinity() && cameraP.Y != -std::numeric_limits::infinity() && cameraP.Z != -std::numeric_limits::infinity())
{
float cameraX = static_cast(cameraP.X);
float cameraY = static_cast(cameraP.Y);
float cameraZ = static_cast(cameraP.Z);
cloud->points[i].x = cameraX;
cloud->points[i].y = cameraY;
cloud->points[i].z = cameraZ;
}
}
}
//-----------------------提取范围内的点------------------------
pcl::ConditionAnd::Ptr range_cond(new pcl::ConditionAnd());
range_cond->addComparison(pcl::FieldComparison::ConstPtr(new pcl::FieldComparison("z", pcl::ComparisonOps::GT, 0.001)));
range_cond->addComparison(pcl::FieldComparison::ConstPtr(new pcl::FieldComparison("z", pcl::ComparisonOps::LT, 2.0)));
range_cond->addComparison(pcl::FieldComparison::ConstPtr(new pcl::FieldComparison("x", pcl::ComparisonOps::GT, -0.5)));
range_cond->addComparison(pcl::FieldComparison::ConstPtr(new pcl::FieldComparison("x", pcl::ComparisonOps::LT, 0.5)));
//range_cond->addComparison(pcl::FieldComparison::ConstPtr(new pcl::FieldComparison("y", pcl::ComparisonOps::GT, -0.85)));
pcl::ConditionalRemoval condrem(range_cond, false);
condrem.setInputCloud(cloud);
condrem.setKeepOrganized(false);
condrem.filter(*cloud_filtered);
//--------------------------------------------------------------
//-----------------------去除离群点------------------------
//pcl::RadiusOutlierRemoval outrem;
//outrem.setInputCloud(cloud_filtered);
//outrem.setRadiusSearch(0.03);
//outrem.setMinNeighborsInRadius(15);
//outrem.filter(*cloud_filtered);
//pcl::StatisticalOutlierRemoval sor;
//sor.setInputCloud(cloud_filtered);
//sor.setMeanK(10);
//sor.setStddevMulThresh(1.0);
//sor.filter(*cloud_filtered);
//--------------------------------------------------------------
string s = "cow";
s += num2str(count);
s += ".pcd";
pcl::io::savePCDFile(s, *cloud_filtered, false); //将点云保存到PCD文件中
std::cerr << "Saved " << cloud_filtered->points.size() << " data points." << std::endl;
s.clear();
//Beep(1046, 1000);
// 显示结果图
boost::shared_ptr viewer(new pcl::visualization::PCLVisualizer("3D viewer"));
viewer->addPointCloud(cloud_filtered);
viewer->resetCamera();
viewer->addCoordinateSystem(0.1);
viewer->initCameraParameters();
while (!viewer->wasStopped()){
viewer->spinOnce();
}
count++;
cout << "test" << endl;
// 释放资源
SafeRelease(m_pDepthFrame);
SafeRelease(m_pDepthFrameReference);
SafeRelease(m_pColorFrame);
SafeRelease(m_pColorFrameReference);
SafeRelease(m_pMultiFrame);
}
// 关闭窗口,设备
m_pKinectSensor->Close();
SafeRelease(m_pKinectSensor);
std::system("pause");
return 0;
}