简单的Kinect前景抠图


    这个是用Kinect做三维扫描时的第一步,就是将感兴趣的前景部分取下来。大家都知道,Kinect的最大特点就是可以采集到深度数据,利用深度数据就可以将前景和背景区分开来。

    长话短说,先上效果图吧。

简单的Kinect前景抠图_第1张图片

 

再上源代码:

 

/***************************************
* Description:This program can rebuild  
* models by scanning objects with kinect
*
* Methods of application:
* 
*
* Author:Lixam
* Time:2012-11
****************************************/
#include "stdafx.h"

#include <XnCppWrapper.h> 
#include <opencv/highgui.h>
#include <opencv/cv.h>
#include <vector>
using namespace cv;
using namespace xn;
using namespace std;


#define IMAGEWIDTH 640
#define IMAGEHEIGTH 480

typedef struct 
{
	unsigned char R;
	unsigned char G;
	unsigned char B;
}RGBPIXEL;

typedef struct 
{
	unsigned char data_L;	//Low bits 
	unsigned char data_H;	//Heigh bits
}DEPTHPIXEL;

//Generator  
ImageGenerator m_imageGenerator;
DepthGenerator m_depthGenerator;
Context m_context;

/*************************************************************************
 *
 * GetROIdata()
 *
 * Parameters:
 *
 * IplImage* image		- Source of color image data
 * IplImage* depth		- Source of color depth data
 * vector<RGBPIXEL>& ROIimage	- Color iamge data that we are interested in
 * vector<DEPTHPIXEL>& ROIdepth	- Depth data that we are interested in
 * vector<CvPoint>& ROIimagePoint	- Coordinates of ROI points 
 *
 * Return:
 *
 * void				
 * 
 * Describtion:
 *
 * This function can get the data we are interested in from image source
 * 
 ************************************************************************/
void GetROIdata(IplImage* image,IplImage* depth,
				vector<RGBPIXEL>& ROIimage,vector<DEPTHPIXEL>& ROIdepth,
				vector<CvPoint>& ROIimagePoint)
{
	RGBPIXEL rgbData;
	DEPTHPIXEL depthData;
	CvPoint point;
	for(int j = 0;j < IMAGEHEIGTH;j++)	//We just want to get depth datas rang from 1000mm to 1500mm
			for(int i = 0;i < IMAGEWIDTH;i++)
	if(CV_IMAGE_ELEM(depth,unsigned char,j,i*2)+(CV_IMAGE_ELEM(depth,unsigned char,j,i*2+1)<<8) >= 1000
		&& CV_IMAGE_ELEM(depth,unsigned char,j,i*2)+(CV_IMAGE_ELEM(depth,unsigned char,j,i*2+1)<<8) <= 1500)
	{
		rgbData.R = CV_IMAGE_ELEM(image,unsigned char,j,i*3);	//Get image data
		rgbData.G = CV_IMAGE_ELEM(image,unsigned char,j,i*3 + 1);
		rgbData.B = CV_IMAGE_ELEM(image,unsigned char,j,i*3 + 2);
		
		depthData.data_L = CV_IMAGE_ELEM(depth,unsigned char,j,i*2);	//Get depth data
		depthData.data_H = CV_IMAGE_ELEM(depth,unsigned char,j,i*2 + 1);

		point.x = i;
		point.y = j;
		ROIimagePoint.push_back(point);
		ROIimage.push_back(rgbData);
		ROIdepth.push_back(depthData);
	}
}

/**********Test function**************/
void ShowROIimage(vector<RGBPIXEL> ROIimage1,vector<RGBPIXEL> ROIimage2,
				  vector<CvPoint> ROIimagePoint1,vector<CvPoint> ROIimagePoint2)
{
	IplImage* image1 = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);
	IplImage* image2 = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);
	cvZero(image1);
	cvZero(image2);
	vector<CvPoint>::iterator pointItr;
	vector<RGBPIXEL>::iterator rgbItr;
	
	if(ROIimagePoint1.size()&&ROIimagePoint2.size())
	{
		/***Process first pcture****/
		for(pointItr=ROIimagePoint1.begin(),rgbItr=ROIimage1.begin();
			pointItr != ROIimagePoint1.end(),rgbItr!=ROIimage1.end();
			pointItr++,rgbItr++)
		{
			CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3) = (*rgbItr).R;
			CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3+1) = (*rgbItr).G;
			CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3+2) = (*rgbItr).B;
		}//end for
		/***Process second pcture****/
		for(pointItr=ROIimagePoint2.begin(),rgbItr=ROIimage2.begin();
			pointItr != ROIimagePoint2.end(),rgbItr!=ROIimage2.end();
			pointItr++,rgbItr++)
		{
			CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3) = (*rgbItr).R;
			CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3+1) = (*rgbItr).G;
			CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3+2) = (*rgbItr).B;
		}//end for
	}//end if
	/********show images********/
	cvCvtColor(image1,image1,CV_RGB2BGR);
	cvCvtColor(image2,image2,CV_RGB2BGR);
	cvShowImage("image1",image1);
	cvShowImage("image2",image2);
}

int main(int argc, _TCHAR* argv[])
{
	XnStatus res;
	vector<RGBPIXEL> ROIimage1,ROIimage2;
	vector<DEPTHPIXEL> ROIdepth1,ROIdepth2;
	vector<CvPoint> ROIimagePoint1,ROIimagePoint2;

	/**********Initate a My3D intance********/
	res = m_context.Init();
	res = m_imageGenerator.Create(m_context);
	res = m_depthGenerator.Create(m_context);

	ImageMetaData ImageMap;
	DepthMetaData DepthMap;
	IplImage* depthImg = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_16U,1);
	IplImage* image = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);


	//Set out mode
	XnMapOutputMode mapMode;
	mapMode.nXRes = IMAGEWIDTH;    
    mapMode.nYRes = IMAGEHEIGTH;   
    mapMode.nFPS = 30;   
    res = m_imageGenerator.SetMapOutputMode(mapMode);
	res = m_depthGenerator.SetMapOutputMode(mapMode);

	/*******Test codes*************/
	cvNamedWindow("image1",CV_WINDOW_AUTOSIZE);
	cvNamedWindow("image2",CV_WINDOW_AUTOSIZE);
	/*********/
	m_depthGenerator.GetAlternativeViewPointCap().SetViewPoint(m_imageGenerator);

	//Generate data
    m_context.StartGeneratingAll();  
    res = m_context.WaitAndUpdateAll(); 
    int imageCount = 0;	//Count frame's number   
    while(!m_context.WaitAndUpdateAll())	  
    {   
		res = m_context.WaitAndUpdateAll();  
		m_imageGenerator.GetMetaData(ImageMap);
		m_depthGenerator.GetMetaData(DepthMap);
		memcpy(image->imageData,ImageMap.Data(),IMAGEWIDTH*IMAGEHEIGTH*3);
		memcpy(depthImg->imageData,DepthMap.Data(),IMAGEWIDTH*IMAGEHEIGTH*2);
		/*************Get tow pictures from different time**********/
		if(0 == imageCount)
		{
			/*************Get interested rect*************/
			GetROIdata(image,depthImg,ROIimage1,ROIdepth1,ROIimagePoint1);
			imageCount++;
		}
		else if(1 == imageCount)
		{
			GetROIdata(image,depthImg,ROIimage2,ROIdepth2,ROIimagePoint2);
			imageCount = 0;
			
			ShowROIimage(ROIimage1,ROIimage2,
				  ROIimagePoint1,ROIimagePoint2)
			/***********Clear all vector*******************/
			ROIimage1.erase(ROIimage1.begin(),ROIimage1.end());
			ROIimage2.erase(ROIimage2.begin(),ROIimage2.end());
			ROIdepth1.erase(ROIdepth1.begin(),ROIdepth1.end());
			ROIdepth2.erase(ROIdepth2.begin(),ROIdepth2.end());
			ROIimagePoint1.erase(ROIimagePoint1.begin(),ROIimagePoint1.end());
			ROIimagePoint2.erase(ROIimagePoint2.begin(),ROIimagePoint2.end());
		}
		else
			imageCount++;
		cvWaitKey(20);
    }//end while 
	m_context.StopGeneratingAll();  
    m_context.Shutdown(); 
    cvReleaseImage(&depthImg); 
	cvReleaseImage(&image); 

	return 0;
}


以上代码运行结果跟我贴的图有所区别,这是我修改过的,没有边缘检测和显示部分,最后显示的是不同时刻采集到的两幅RGB图像。当物体距离传感器1米到1.5米范围内会被显示。

 

程序代码就不解释了,比较基础,大家自己看吧。。。适合给刚接触Kinect的朋友们参考。

你可能感兴趣的:(简单的Kinect前景抠图)