kinect 学习笔记一

颜色深度图像的显示:初始化、绑定流、提取流。

1、提取颜色数据:

#include 
#include "Windows.h"
#include "MSR_NuiApi.h"
#include "cv.h"
#include "highgui.h"

using namespace std;

int main(int argc,char * argv[])
{
	IplImage *colorImage=NULL;
	colorImage = cvCreateImage(cvSize(640, 480), 8, 3);

	//初始化NUI
	HRESULT hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_COLOR);
	if( hr != S_OK )
	{
		cout<<"NuiInitialize failed"<pFrameTexture;
			KINECT_LOCKED_RECT LockedRect;
			pTexture->LockRect(0, &LockedRect, NULL, 0);//提取数据帧到LockedRect,它包括两个数据对象:pitch每行字节数,pBits第一个字节地址
			if( LockedRect.Pitch != 0 )
			{
				cvZero(colorImage);
				for (int i=0; i<480; i++)
				{
					uchar* ptr = (uchar*)(colorImage->imageData+i*colorImage->widthStep);
					BYTE * pBuffer = (BYTE*)(LockedRect.pBits)+i*LockedRect.Pitch;//每个字节代表一个颜色信息,直接使用BYTE
					for (int j=0; j<640; j++)
					{
						ptr[3*j] = pBuffer[4*j];//内部数据是4个字节,0-1-2是BGR,第4个现在未使用
						ptr[3*j+1] = pBuffer[4*j+1];
						ptr[3*j+2] = pBuffer[4*j+2];
					}
				}

				cvShowImage("colorImage", colorImage);//显示图像
				
			}
			else
			{
				cout<<"Buffer length of received texture is bogus\r\n"<


实验结果:


2、提取带有用户ID的深度数据

#include 
#include "Windows.h"
#include "MSR_NuiApi.h"
#include "cv.h"
#include "highgui.h"

using namespace std;

RGBQUAD Nui_ShortToQuad_Depth( USHORT s )//该函数我是调用的SDK自带例子的函数。

{
	USHORT RealDepth = (s & 0xfff8) >> 3;//提取距离信息
	USHORT Player =  s & 7 ;//提取ID信息

	//16bit的信息,其中最低3位是ID(所捕捉到的人的ID),剩下的13位才是信息

	BYTE l = 255 - (BYTE)(256*RealDepth/0x0fff);//因为提取的信息时距离信息,这里归一化为0-255。======这里一直不明白为什么是除以0x0fff,希望了解的同志给解释一下。

	RGBQUAD q;
	q.rgbRed = q.rgbBlue = q.rgbGreen = 0;

	switch( Player )
	{
	case 0:
		q.rgbRed = l / 2;
		q.rgbBlue = l / 2;
		q.rgbGreen = l / 2;
		break;
	case 1:
		q.rgbRed = l;
		break;
	case 2:
		q.rgbGreen = l;
		break;
	case 3:
		q.rgbRed = l / 4;
		q.rgbGreen = l;
		q.rgbBlue = l;
		break;
	case 4:
		q.rgbRed = l;
		q.rgbGreen = l;
		q.rgbBlue = l / 4;
		break;
	case 5:
		q.rgbRed = l;
		q.rgbGreen = l / 4;
		q.rgbBlue = l;
		break;
	case 6:
		q.rgbRed = l / 2;
		q.rgbGreen = l / 2;
		q.rgbBlue = l;
		break;
	case 7:
		q.rgbRed = 255 - ( l / 2 );
		q.rgbGreen = 255 - ( l / 2 );
		q.rgbBlue = 255 - ( l / 2 );
	}

	return q;
}

int main(int argc,char * argv[])
{
	IplImage *depthIndexImage=NULL;
	depthIndexImage = cvCreateImage(cvSize(320, 240), 8, 3);

	//初始化NUI
	HRESULT hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX );
	if( hr != S_OK )
	{
		cout<<"NuiInitialize failed"<pFrameTexture;
			KINECT_LOCKED_RECT LockedRect;
			pTexture->LockRect(0, &LockedRect, NULL, 0);
			if( LockedRect.Pitch != 0 )
			{
				cvZero(depthIndexImage);
				for (int i=0; i<240; i++)
				{
					uchar* ptr = (uchar*)(depthIndexImage->imageData+i*depthIndexImage->widthStep);
					BYTE * pBuffer = (BYTE *)(LockedRect.pBits)+i*LockedRect.Pitch;
					USHORT * pBufferRun = (USHORT*) pBuffer;//注意这里需要转换,因为每个数据是2个字节,存储的同上面的颜色信息不一样,这里是2个字节一个信息,不能再用BYTE,转化为USHORT
					for (int j=0; j<320; j++)
					{
						RGBQUAD rgb = Nui_ShortToQuad_Depth(pBufferRun[j]);//调用函数进行转化
						ptr[3*j] = rgb.rgbBlue;
						ptr[3*j+1] = rgb.rgbGreen;
						ptr[3*j+2] = rgb.rgbRed;
					}
				}

				cvShowImage("depthIndexImage", depthIndexImage);
			}
			else
			{
				cout<<"Buffer length of received texture is bogus\r\n"<

实验结果:


3、不带ID的深度数据的提取

#include 
#include "Windows.h"
#include "MSR_NuiApi.h"
#include "cv.h"
#include "highgui.h"

using namespace std;


int main(int argc,char * argv[])
{
	IplImage *depthIndexImage=NULL;
	depthIndexImage = cvCreateImage(cvSize(320, 240), 8, 1);//这里我们用灰度图来表述深度数据,越远的数据越暗。

	//初始化NUI
	HRESULT hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH);
	if( hr != S_OK )
	{
		cout<<"NuiInitialize failed"<pFrameTexture;
			KINECT_LOCKED_RECT LockedRect;
			pTexture->LockRect(0, &LockedRect, NULL, 0);
			if( LockedRect.Pitch != 0 )
			{
				cvZero(depthIndexImage);
				for (int i=0; i<240; i++)
				{
					uchar* ptr = (uchar*)(depthIndexImage->imageData+i*depthIndexImage->widthStep);
					BYTE * pBuffer = (BYTE *)(LockedRect.pBits)+i*LockedRect.Pitch;
					USHORT * pBufferRun = (USHORT*) pBuffer;//注意这里需要转换,因为每个数据是2个字节,存储的同上面的颜色信息不一样,这里是2个字节一个信息,不能再用BYTE,转化为USHORT
					for (int j=0; j<320; j++)
					{
						ptr[j] = 255 - (BYTE)(256*pBufferRun[j]/0x0fff);//直接将数据归一化处理
					}
				}

				cvShowImage("depthIndexImage", depthIndexImage);
			}
			else
			{
				cout<<"Buffer length of received texture is bogus\r\n"<

实验结果:


4、需要注意的地方

NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX与NUI_INITIALIZE_FLAG_USES_DEPTH不能同时创建数据流。这个我在试验中证实了。而且单纯的深度图像是左右倒置的。

文中归一化的地方除以0x0fff的原因是kinect的有效距离是1.2m到3.5m(官方文档),如果是3.5m那用十六进制表示是0x0DAC,我在实际测试中我的实验室能够测到的最大距离是0x0F87也就是3975mm。估计是官方他们直接使用极限距离0x0FFF来作为除数的。

文中的cv.h,highgui.h是我使用的opencv中的库,因为对这个比较熟悉。


5、骨骼数据的提取

#include   
#include "Windows.h"  
#include "MSR_NuiApi.h"  
#include "cv.h"  
#include "highgui.h"  

using namespace std;  

void Nui_DrawSkeleton(NUI_SKELETON_DATA * pSkel,int whichone, IplImage *SkeletonImage)//画出骨骼,第二个参数未使用,想跟踪多人的童鞋可以考虑使用
{
	float fx, fy;
	CvPoint SkeletonPoint[NUI_SKELETON_POSITION_COUNT];
    for (int i = 0; i < NUI_SKELETON_POSITION_COUNT; i++)//所有的坐标转化为深度图的坐标
    {
        NuiTransformSkeletonToDepthImageF( pSkel->SkeletonPositions[i], &fx, &fy );
		SkeletonPoint[i].x = (int)(fx*320+0.5f);
		SkeletonPoint[i].y = (int)(fy*240+0.5f);
    }

    for (int i = 0; i < NUI_SKELETON_POSITION_COUNT ; i++)
    {
        if (pSkel->eSkeletonPositionTrackingState[i] != NUI_SKELETON_POSITION_NOT_TRACKED)//跟踪点一用有三种状态:1没有被跟踪到,2跟踪到,3根据跟踪到的估计到
        {
            cvCircle(SkeletonImage, SkeletonPoint[i], 3, cvScalar(0, 255, 255), -1, 8, 0);
        }
    }
    return;

} 

int main(int argc,char * argv[])  
{  
	IplImage *skeletonImage=NULL;  
	skeletonImage = cvCreateImage(cvSize(320, 240), 8, 3);  

	//初始化NUI  
	HRESULT hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_SKELETON );  
	if( hr != S_OK )  
	{  
		cout<<"NuiInitialize failed"<


实验结果:没有画出连线,大家如果想继续做可以对那个数组进行处理连线就可以了。



你可能感兴趣的:(模式识别项目总结)