kinect 运用Openni2.0 产生点云

我的Kinect型号:PrimeSense  Carmine 

使用Openni2.0

这次比较粗糙,就是想将摄像头采集的深度信息转化为现实世界的三维坐标,然后用Opengl画出来。

这里没有降噪、取样,所以也称不上是“点云”

原理什么的,请参考这位仁兄:http://blog.csdn.net/opensource07/article/details/7804246

他是Openni1.0做的,代码写的也比较规整,我就比较随意了,适合像我一样的新手理解


#include<gl/glut.h> 
#include <iostream>
#include <vector>
#include <OpenNI.h>
#include "OniSampleUtilities.h"
#define SAMPLE_READ_WAIT_TIMEOUT 200 //2000ms
using namespace openni;
using namespace std;

typedef struct data
{
	float x;
	float y;
	float z;
}data;


VideoStream depth;
VideoFrameRef frame;
int width;
int height;

void OpenGL_init(void)
{
	glClearColor(0.0f, 0.0f, 0.0f, 1.0f);	// background color: white 
	glShadeModel(GL_FLAT);
	glClear(GL_COLOR_BUFFER_BIT);

}

void display()
{
	vector<data> point;
	data mydata;
	float x=0.0,y=0.0,z=0.0,xx=0.0;
	float i,j;

	//读取一帧
	int changedStreamDummy;
	VideoStream* pStream = &depth;
    OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);

	depth.readFrame(&frame);
	width = frame.getWidth();
	height = frame.getHeight();

	DepthPixel *pDepth = (DepthPixel*)frame.getData();

	for(i=0;i<frame.getHeight();i++)
	{	
		for(j=0;j<frame.getWidth();j++)
		{
			int k = i;
			int m = j;
			xx = pDepth[k*frame.getWidth()+m];
			CoordinateConverter::convertDepthToWorld (depth,i,j,xx,&x,&y,&z);
			mydata.x=x*0.001;
			mydata.y=y*0.001;
			mydata.z=z*0.001;
			point.push_back(mydata);
			//cout<<xx<<endl;
		}
	}
	
	glColor3f(1.0f, 1.0f, 1.0f);
	glPointSize(0.1f);
	glBegin(GL_POINTS);

	if (point.size()!=0)
	{
		glBegin(GL_POINTS);
		for (vector<data>::iterator iter = point.begin();iter!=point.end();iter++)
		{
			glVertex3f(iter->x,iter->y,iter->z);
		}
		
	}	
	glEnd();
	glFlush();
	glutSwapBuffers();
}
void OpenGL_changeSize(int w, int h)
{
	glViewport(0, 0, GLsizei(w), GLsizei(h));
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	// 	if (w <= h)				// 正交投影
	// 		glOrtho(-2.0, 2.0, -2.0*(GLfloat)h/(GLfloat)w, 2.0*(GLfloat)h/(GLfloat)w, -10.0, 10.0);
	// 	else
	// 		glOrtho(-2.0*(GLfloat)w/(GLfloat)h, 2.0*(GLfloat)w/(GLfloat)h, -2.0, 2.0, -10.0, 10.0);
	gluPerspective(60.0, (GLfloat)w/(GLfloat)h, 0.1f, 50.0f);
	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	gluLookAt(0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.1f, 0.0f, 1.0f, 0.0f);
}

void OpenGL_Idel()
{
	display();	// 刷新显示
	glClear(GL_COLOR_BUFFER_BIT);
}

int main(int argc, char **argv)
{
	Status rc = OpenNI::initialize();
	vector<data> point;
	
	Device device;
	rc = device.open(ANY_DEVICE);
	if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
	{
		rc = depth.create(device, SENSOR_DEPTH);
	}

	rc = depth.start();

	glutInit(&argc,argv);  
	glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGBA);  
	glutInitWindowPosition(100,100);  

	glutInitWindowSize(800, 600);  
	glutCreateWindow("first blood");  
	OpenGL_init();
	glutDisplayFunc(&display);  
	glutIdleFunc(OpenGL_Idel);
	glutMainLoop();

	return 0;
}

OPENNI2.0 sample里带的图:

背景是书架+我的手

kinect 运用Openni2.0 产生点云_第1张图片


下面这幅是还原三维世界:

kinect 运用Openni2.0 产生点云_第2张图片


下次将学习PCL的东西,争取给出以下效果:

kinect 运用Openni2.0 产生点云_第3张图片


你可能感兴趣的:(cloud,kinect,point)