Kinect For Windows V2开发日志九:侦测并绘制人体骨架

在上一篇《侦测、追踪人体骨架》里讲述了如何调用Kinect侦测关节点的功能,但是没有画图。此篇用来记录如何结合OpenCV绘制出人体骨骼和关节点的轮廓。

简单思路

一条线段要由两个端点来确定,Kinect可以侦测到关节点的位置,所以可以利用关节点来当端点,把然后利用它们来画线,再把画出来的图贴进彩色图或者深度图中,就绘制出了骨骼。不过Joint里的坐标是摄像机坐标系下的,所以需要转换一下,把这些点映射到彩色空间或者深度空间就行了。

代码

#include <iostream>
#include <opencv2\imgproc.hpp>
#include <opencv2\calib3d.hpp>
#include <opencv2\highgui.hpp>
#include <Kinect.h>

using   namespace   std;
using   namespace   cv;

void    draw(Mat & img, Joint & r_1, Joint & r_2,ICoordinateMapper * myMapper);
int main(void)
{
    IKinectSensor   * mySensor = nullptr;
    GetDefaultKinectSensor(&mySensor);
    mySensor->Open();

    IColorFrameSource   * myColorSource = nullptr;
    mySensor->get_ColorFrameSource(&myColorSource);

    IColorFrameReader   * myColorReader = nullptr;
    myColorSource->OpenReader(&myColorReader);

    int colorHeight = 0, colorWidth = 0;
    IFrameDescription   * myDescription = nullptr;
    myColorSource->get_FrameDescription(&myDescription);
    myDescription->get_Height(&colorHeight);
    myDescription->get_Width(&colorWidth);

    IColorFrame * myColorFrame = nullptr;
    Mat original(colorHeight,colorWidth,CV_8UC4);

//**********************以上为ColorFrame的读取前准备**************************

    IBodyFrameSource    * myBodySource = nullptr;
    mySensor->get_BodyFrameSource(&myBodySource);

    IBodyFrameReader    * myBodyReader = nullptr;
    myBodySource->OpenReader(&myBodyReader);

    int myBodyCount = 0;
    myBodySource->get_BodyCount(&myBodyCount);

    IBodyFrame  * myBodyFrame = nullptr;

    ICoordinateMapper   * myMapper = nullptr;
    mySensor->get_CoordinateMapper(&myMapper);

//**********************以上为BodyFrame以及Mapper的准备***********************
    while (1)
    {

        while (myColorReader->AcquireLatestFrame(&myColorFrame) != S_OK);
        myColorFrame->CopyConvertedFrameDataToArray(colorHeight * colorWidth * 4, original.data, ColorImageFormat_Bgra);
        Mat copy = original.clone();        //读取彩色图像并输出到矩阵

        while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); //读取身体图像
        IBody   **  myBodyArr = new IBody *[myBodyCount];       //为存身体数据的数组做准备
        for (int i = 0; i < myBodyCount; i++)
            myBodyArr[i] = nullptr;

        if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK)     //把身体数据输入数组
            for (int i = 0; i < myBodyCount; i++)
            {
                BOOLEAN     result = false;
                if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) //先判断是否侦测到
                {
                    Joint   myJointArr[JointType_Count];            
                    if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK)   //如果侦测到就把关节数据输入到数组并画图
                    {
                        draw(copy,myJointArr[JointType_Head],myJointArr[JointType_Neck],myMapper);
                        draw(copy,myJointArr[JointType_Neck],myJointArr[JointType_SpineShoulder],myMapper);

                        draw(copy,myJointArr[JointType_SpineShoulder],myJointArr[JointType_ShoulderLeft],myMapper);
                        draw(copy,myJointArr[JointType_SpineShoulder],myJointArr[JointType_SpineMid],myMapper);
                        draw(copy,myJointArr[JointType_SpineShoulder],myJointArr[JointType_ShoulderRight],myMapper);

                        draw(copy,myJointArr[JointType_ShoulderLeft],myJointArr[JointType_ElbowLeft],myMapper);
                        draw(copy,myJointArr[JointType_SpineMid],myJointArr[JointType_SpineBase],myMapper);
                        draw(copy,myJointArr[JointType_ShoulderRight],myJointArr[JointType_ElbowRight],myMapper);

                        draw(copy,myJointArr[JointType_ElbowLeft],myJointArr[JointType_WristLeft],myMapper);
                        draw(copy,myJointArr[JointType_SpineBase],myJointArr[JointType_HipLeft],myMapper);
                        draw(copy,myJointArr[JointType_SpineBase],myJointArr[JointType_HipRight],myMapper);
                        draw(copy,myJointArr[JointType_ElbowRight],myJointArr[JointType_WristRight],myMapper);

                        draw(copy,myJointArr[JointType_WristLeft],myJointArr[JointType_ThumbLeft],myMapper);
                        draw(copy,myJointArr[JointType_WristLeft],myJointArr[JointType_HandLeft],myMapper);
                        draw(copy,myJointArr[JointType_HipLeft],myJointArr[JointType_KneeLeft],myMapper);
                        draw(copy,myJointArr[JointType_HipRight],myJointArr[JointType_KneeRight],myMapper);
                        draw(copy,myJointArr[JointType_WristRight],myJointArr[JointType_ThumbRight],myMapper);
                        draw(copy,myJointArr[JointType_WristRight],myJointArr[JointType_HandRight],myMapper);

                        draw(copy,myJointArr[JointType_HandLeft],myJointArr[JointType_HandTipLeft],myMapper);
                        draw(copy,myJointArr[JointType_KneeLeft],myJointArr[JointType_FootLeft],myMapper);
                        draw(copy,myJointArr[JointType_KneeRight],myJointArr[JointType_FootRight],myMapper);
                        draw(copy,myJointArr[JointType_HandRight],myJointArr[JointType_HandTipRight],myMapper);
                    }
                }
            }
        delete[]myBodyArr;
        myBodyFrame->Release();
        myColorFrame->Release();

        imshow("TEST",copy);
        if (waitKey(30) == VK_ESCAPE)
            break;
    }
    myMapper->Release();

    myDescription->Release();
    myColorReader->Release();
    myColorSource->Release();

    myBodyReader->Release();
    myBodySource->Release();
    mySensor->Close();
    mySensor->Release();

    return  0;
}

void    draw(Mat & img, Joint & r_1, Joint & r_2,ICoordinateMapper * myMapper)
{
    //用两个关节点来做线段的两端,并且进行状态过滤
    if (r_1.TrackingState == TrackingState_Tracked && r_2.TrackingState == TrackingState_Tracked)
    {
        ColorSpacePoint t_point;    //要把关节点用的摄像机坐标下的点转换成彩色空间的点
        Point   p_1, p_2;
        myMapper->MapCameraPointToColorSpace(r_1.Position,&t_point);
        p_1.x = t_point.X;
        p_1.y = t_point.Y;
        myMapper->MapCameraPointToColorSpace(r_2.Position,&t_point);
        p_2.x = t_point.X;
        p_2.y = t_point.Y;

        line(img,p_1,p_2,Vec3b(0,255,0),5);
        circle(img,p_1,10,Vec3b(255,0,0),-1);
        circle(img,p_2,10,Vec3b(255,0,0),-1);
    }
}

详细解释

代码的前面和上一篇类似,主要区别在于绘图部分。我的思路是宽度优先,从上到下一层层选出关节点,然后把它和它下面的一个关节点相连。

这里的Draw()函数第一个参数是要在上面画线的图,接下来两个参数是两个关节点,最后一个参数是一个Mapper,用来转换点的坐标。在函数里面,首先判断传入的两个关节点是否都是处于Tracked的状态,如果都是的话,就把他们的坐标转换成彩色坐标,然后存入OpenCV要用的Point里,最后,调用line()函数,第一个参数是要在上面画线的图,第二、三个是线段的两个端点,然后是颜色,然后是线段的粗细。最后为了能看出关节点在哪里,顺便再在关节点的位置上画个实心的圆,它的最后一个参数也是线段的粗细,如果填负数的话,就是实心。

效果图

Kinect For Windows V2开发日志九:侦测并绘制人体骨架_第1张图片




- - -
好了,到目前为止算是学的差不多了,下一步就是正式的开发了,真是好玩!






你可能感兴趣的:(Kinect For Windows V2开发日志九:侦测并绘制人体骨架)