kinect 2.0:利用深度进行前景分割(c++实现)

kinect2.0开发环境配置与获取深度图像请参考此篇博文
http://blog.csdn.net/ktigerhero3/article/details/49930911
获取彩色图像请参考此篇博文
http://blog.csdn.net/ktigerhero3/article/details/49934745
因为代码中会用到opencv,如果自己来编译配置opencv请参考我的另一篇博文VS2012+CMake重编译和配置OpenCV

前景分割思路

主要思想就是将kinect获取到的深度数据映射到彩色图像上,然后利用阈值过滤掉背景的深度,最后通过深度与彩色的对应关系来得到前景的彩色图像。

kinect2.0官方SDK已经提供了将深度帧映射到彩色空间的函数MapDepthFrameToColorSpace(), 该函数有四个参数:
UINT depthPointCount:表示深度数据的总数
const UINT16 *depthFrameData:表示得到的深度帧数据
UINT colorPointCount:表示与深度数据对应的彩色点的总数
ColorSpacePoint *colorSpacePoints:表示与深度数据对应的彩色点坐标

代码如下:

#include "stdafx.h"
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

using namespace cv;
using namespace std;

// 转换depth图像到cv::Mat
Mat ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight)
{ 
    Mat img(nHeight, nWidth, CV_8UC1);
    //指向头指针
    uchar* p_mat = img.data;     
    //指向最后一个元素的指针
    const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);  
    while (pBuffer < pBufferEnd)  //16位最大值为65536
    {
         //将16位数据转换到8位
         *p_mat = *pBuffer /65536.0 * 256;   
         pBuffer++;
         p_mat++;
    }
    return img;
}

void getForeground()
{
    IKinectSensor*          m_pKinectSensor;
    IDepthFrameReader*      m_pDepthFrameReader;
    IDepthFrameSource*      pDepthFrameSource = NULL;
    IColorFrameSource*      pColorFrameSource;
    IColorFrameReader*      m_pColorFrameReader;
    IFrameDescription*      depthFrameDescription = NULL;
    IFrameDescription*      colorFrameDescription = NULL;
    ColorImageFormat        imageFormat = ColorImageFormat_None;
    ICoordinateMapper*      coordinateMapper = NULL;

    GetDefaultKinectSensor(&m_pKinectSensor);      //获取默认kinect传感器
    printf("打开kinect传感器成功\n");
    //打开传感器
    m_pKinectSensor->Open();     

    //获得深度信息传感器
    m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource); 
    //获得彩色信息传感器  
    m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
    //打开深度信息帧读取器
    pDepthFrameSource->OpenReader(&m_pDepthFrameReader);  
    //打开彩色信息帧读取器  
    pColorFrameSource->OpenReader(&m_pColorFrameReader);

    while (true)
    {
         IColorFrame*       pColorFrame = NULL;
         IDepthFrame*       pDepthFrame = NULL;

         while(pDepthFrame == NULL){
             //由于有时候获取不到,因此循环获取最近的帧
             m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);     
         }
         pDepthFrame->get_FrameDescription(&depthFrameDescription);
         int depth_width, depth_height;
         //获取帧的像素信息(宽和高)
         depthFrameDescription->get_Width(&depth_width);
         depthFrameDescription->get_Height(&depth_height);
         printf("width=%d height=%d\n", depth_width, depth_height);
         UINT nBufferSize_depth = 0;
         UINT16 *pBuffer_depth = NULL;
         //获取图像像素个数和指向图像的指针
         pDepthFrame->AccessUnderlyingBuffer(&nBufferSize_depth, &pBuffer_depth);  
         //转换为8位的mat   
         Mat depthImg_show = ConvertMat(pBuffer_depth, depth_width, depth_height);
         //均衡化,为了提高显示效果    
         equalizeHist(depthImg_show, depthImg_show);       



         //获取彩色图像
         while(pColorFrame == NULL){
              //由于有时候获取不到,因此循环获取最近的帧
              m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);        
         }
         //获取图片描述信息
         pColorFrame->get_FrameDescription(&colorFrameDescription);        
         int nWidth, nHeight;
         uchar *pBuffer = NULL;
         UINT nBufferSize = 0;
         colorFrameDescription->get_Width(&nWidth);
         colorFrameDescription->get_Height(&nHeight);
         cout << "width=" << nWidth << endl;     
         cout << "Height=" << nHeight << endl; 
         pColorFrame->get_RawColorImageFormat(&imageFormat);
         //输出结果为 ColorImageFormat_Yuy2    = 5,为Yuy2格式   
         cout << "imageformat is " << imageFormat << endl;
         //新建一个mat对象,用于保存读入的图像,注意参数的高在前,宽在后
         Mat colorImg(nHeight, nWidth, CV_8UC4);     
         pBuffer = colorImg.data;
         nBufferSize = colorImg.rows*colorImg.step;
         pColorFrame->CopyConvertedFrameDataToArray(nBufferSize,      reinterpret_cast(pBuffer), ColorImageFormat_Bgra);


         //将深度帧映射到彩色空间 
         m_pKinectSensor->get_CoordinateMapper(&coordinateMapper);
         ColorSpacePoint* colorSpacePoint = new ColorSpacePoint[depth_width*depth_height];
         coordinateMapper->MapDepthFrameToColorSpace(depth_width*depth_height, pBuffer_depth, depth_width*depth_height, colorSpacePoint);


          UINT16 *depthData = NULL;
          depthData = pBuffer_depth;
          //创建一个空Mat对象保存最后得到的前景图像
          Mat resultImg(depth_height, depth_width, CV_8UC4, Scalar::all(0)); 
          for(int i = 0; i < depth_height; i++)
          {
              for(int j = 0; j < depth_width; j++)
              {
                  unsigned int index = i * depth_width + j;
                  ColorSpacePoint csp = colorSpacePoint[index]; 
                  int colorX = static_cast<int>( floor( csp.X + 0.5 ) );
                  int colorY = static_cast<int>( floor( csp.Y + 0.5 ) );
                  //选取落在彩色图像上的点并对前景背景进行阈值分割,再次更改阈值大小
                  if(colorX >=0 && colorX < nWidth && colorY >=0 && colorY < nHeight && *depthData <650)
                  {
                     //拷贝彩色信息
                     resultImg.at( i, j ) = colorImg.at( colorY, colorX );
                   } 
                  depthData++;
              }

          }

          //显示图像
          cv::imshow("colorImg", colorImg);
          cv::imshow("depthImg", depthImg_show);
          cv::imshow("result", resultImg);

          colorImg.release();
          depthImg_show.release();
          pDepthFrame->Release();
          pColorFrame->Release();
          delete[] colorSpacePoint;
          if (27 == waitKey(50))
                break;
    }
}

int main()
{
    getForeground();
}

结果如下图

kinect 2.0:利用深度进行前景分割(c++实现)_第1张图片

你可能感兴趣的:(kinect)