深度相机Kinect2.0三维点云拼接实验(四)

文章目录

    • 前言
    • Kinect2.0相机可视化测试
    • 总结

前言

  Kinect2.0是微软推出的一款RGB-D相机,它即支持普通相机的拍摄,也支持脉冲测量深度信息。本系列文章基于该传感器给出基本的环境搭建、原理讲解、点云数据捕捉、三维点云拼接等开发源码,当然也会总结经验帮大家排雷避坑。本小节介绍点云图像的捕获。

本文测试代码依赖Kinect2.0驱动程序、OpenCV开发环境等(环境搭建与配置见实验二),可通过驱动程序API获取Kinect2.0相机的各种操作;

Kinect2.0相机可视化测试

//安全释放指针
template<class Interface>
inline void SafeRelease(Interface*& pInterfaceToRelease)
{
    if (pInterfaceToRelease != NULL)
    {
        pInterfaceToRelease->Release();
        pInterfaceToRelease = NULL;
    }
}

int kinect2_visualation()
{
    // 获取Kinect设备
    IKinectSensor* m_pKinectSensor; //Represents a Kinect sensor device.
    HRESULT hr; // 用于描述错误或警告,包含了严重级别等信息,32位整型变量
    hr = GetDefaultKinectSensor(&m_pKinectSensor);
    if (FAILED(hr))
    {
        return hr;
    }

    //IMultiSourceFrameReader: Represents a reader for multi source frames.
    IMultiSourceFrameReader* m_pMultiFrameReader = NULL;
    if (m_pKinectSensor)
    {
        hr = m_pKinectSensor->Open();
        if (SUCCEEDED(hr))
        {
            // 获取多数据源到读取器
            hr = m_pKinectSensor->OpenMultiSourceFrameReader(
                FrameSourceTypes::FrameSourceTypes_Color |
                FrameSourceTypes::FrameSourceTypes_Infrared |
                FrameSourceTypes::FrameSourceTypes_Depth,
                &m_pMultiFrameReader);
        }
    }

    if (!m_pKinectSensor || FAILED(hr))
    {
        return E_FAIL;
    }

    // 三个数据帧及引用
    IDepthFrameReference* m_pDepthFrameReference = NULL; //深度图引用
    IColorFrameReference* m_pColorFrameReference = NULL; //RGB图引用
    IInfraredFrameReference* m_pInfraredFrameReference = NULL; //红外图引用
    IInfraredFrame* m_pInfraredFrame = NULL; //红外图像
    IDepthFrame* m_pDepthFrame = NULL;
    IColorFrame* m_pColorFrame = NULL;

    // 三个图片格式
    Mat i_rgb(1080, 1920, CV_8UC4);      //注意:这里必须为4通道的图,Kinect的数据只能以Bgra格式传出
    Mat i_depth(424, 512, CV_8UC1);
    Mat i_depth2B(424, 512, CV_16UC1);
    Mat i_ir(424, 512, CV_16UC1);

    UINT16* depthData = new UINT16[424 * 512];
    IMultiSourceFrame* m_pMultiFrame = nullptr;

    bool save_flag = false; //默认不保存测试环节的图像
    int save_number = 300;  //保存的图像编号
    char name[81];          //保存的图像名
    Sleep(1000);

    while (true)
    {
        //按ESC键结束测试,按空格键获取一帧测试中的图像
        if (waitKey(1) == VK_ESCAPE)
            break;
        else if (waitKey(1) == VK_SPACE)
        {
            save_number++;
            save_flag = true;
            std::cout << "Start capture:" << save_number << std::endl;
        }

        // 循环体内需要延时,否则相机反应不过来
        Sleep(500);
        // 获取新的一个多源数据帧
        hr = m_pMultiFrameReader->AcquireLatestFrame(&m_pMultiFrame);
        if (FAILED(hr) || !m_pMultiFrame)
        {
            cout << "Can't get MultiFrame!" << endl;
            continue;
        }

        // 从多源数据帧中分离出RGB图像,深度图像和红外图像
        if (SUCCEEDED(hr))
            hr = m_pMultiFrame->get_ColorFrameReference(&m_pColorFrameReference);
        if (SUCCEEDED(hr))
            hr = m_pColorFrameReference->AcquireFrame(&m_pColorFrame);
        if (SUCCEEDED(hr))
            hr = m_pMultiFrame->get_DepthFrameReference(&m_pDepthFrameReference);
        if (SUCCEEDED(hr))
            hr = m_pDepthFrameReference->AcquireFrame(&m_pDepthFrame);
        if (SUCCEEDED(hr))
            hr = m_pMultiFrame->get_InfraredFrameReference(&m_pInfraredFrameReference);
        if (SUCCEEDED(hr))
            hr = m_pInfraredFrameReference->AcquireFrame(&m_pInfraredFrame);

        // color拷贝到图片中
        UINT nColorBufferSize = 1920 * 1080 * 4;
        if (SUCCEEDED(hr))
            hr = m_pColorFrame->CopyConvertedFrameDataToArray(nColorBufferSize, 
                 reinterpret_cast<BYTE*>(i_rgb.data), ColorImageFormat::ColorImageFormat_Bgra);
        if (save_flag)
        {
            std::cout << "Saving PNG file!" << std::endl;
            sprintf_s(name, "./opencv_save/rgba_%03d.png", save_number);
            cv::imwrite(name, i_rgb);
        }

        // depth拷贝到图片中
        if (SUCCEEDED(hr))
        {
            // UINT16* depthData = new UINT16[424 * 512];
            hr = m_pDepthFrame->CopyFrameDataToArray(424 * 512, depthData);
            for (int i = 0; i < 512 * 424; i++)
            {
                // 0-255深度图,为了显示明显,只取深度数据的低8位
                reinterpret_cast<UINT16*>(i_depth2B.data)[i] = depthData[i];
                BYTE intensity = static_cast<BYTE>(depthData[i] % 256);
                reinterpret_cast<BYTE*>(i_depth.data)[i] = intensity;
            }

            if (save_flag)
            {
                std::cout << "Saving 16bit PGM file!" << std::endl;
                sprintf_s(name, "./opencv_save/depth_16b_%03d.pgm", save_number);
                cv::imwrite(name, i_depth2B);
            }

            ICoordinateMapper* m_pCoordinateMapper = NULL;
            hr = m_pKinectSensor->get_CoordinateMapper(&m_pCoordinateMapper);
            ColorSpacePoint* m_pColorCoordinates = new ColorSpacePoint[512 * 424];
            HRESULT hr = m_pCoordinateMapper->MapDepthFrameToColorSpace(512 * 424, 
                depthData, 512 * 424, m_pColorCoordinates);

            Mat i_depthToRgb(424, 512, CV_8UC4);
            if (SUCCEEDED(hr))
            {
                for (int i = 0; i < 424 * 512; i++)
                {
                    ColorSpacePoint p = m_pColorCoordinates[i];
                    if (p.X != -std::numeric_limits<float>::infinity() && \
                    	p.Y != -std::numeric_limits<float>::infinity())
                    {
                        int colorX = static_cast<int>(p.X + 0.5f);
                        int colorY = static_cast<int>(p.Y + 0.5f);

                        if ((colorX >= 0 && colorX < 1920) && (colorY >= 0 && colorY < 1080))
                        {
                            i_depthToRgb.data[i * 4] = i_rgb.data[(colorY * 1920 + colorX) * 4];
                            i_depthToRgb.data[i * 4 + 1] = i_rgb.data[(colorY * 1920 + colorX) * 4 + 1];
                            i_depthToRgb.data[i * 4 + 2] = i_rgb.data[(colorY * 1920 + colorX) * 4 + 2];
                            i_depthToRgb.data[i * 4 + 3] = i_rgb.data[(colorY * 1920 + colorX) * 4 + 3];
                        }
                    }
                }
            }
            delete[]m_pColorCoordinates;
            imshow("rgb2depth", i_depthToRgb);
            if (save_flag)
            {
                std::cout << "Saving R2D-PNG!" << std::endl;
                sprintf_s(name, "./opencv_save/depth2rgb_%03d.png", save_number);
                cv::imwrite(name, i_depthToRgb);
            }

            CameraSpacePoint* m_pCameraCoordinates = new CameraSpacePoint[512 * 424];
            if (SUCCEEDED(hr))
            {
                HRESULT hr = m_pCoordinateMapper->MapDepthFrameToCameraSpace(512 * 424, 
                             depthData, 512 * 424, m_pCameraCoordinates);
            }
            if (SUCCEEDED(hr))
            {
                for (int i = 0; i < 512 * 424; i++)
                {
                    CameraSpacePoint p = m_pCameraCoordinates[i];
                    if (p.X != -std::numeric_limits<float>::infinity() && \
                        p.Y != -std::numeric_limits<float>::infinity() && \
                        p.Z != -std::numeric_limits<float>::infinity())
                    {
                        float cameraX = static_cast<float>(p.X);
                        float cameraY = static_cast<float>(p.Y);
                        float cameraZ = static_cast<float>(p.Z);
                    }
                }
            }

        }
        
        // 显示
        imshow("depth", i_depth);
        if (save_flag)
        {
            std::cout << "Saving PGM file!" << std::endl;
            sprintf_s(name, "./opencv_save/depth_L8b_%03d.pgm", save_number);
            cv::imwrite(name, i_depth);
            save_flag = false;
            std::cout << "Saved done!" << std::endl;
        }

        // 释放资源
        SafeRelease(m_pColorFrame);
        SafeRelease(m_pDepthFrame);
        SafeRelease(m_pInfraredFrame);
        SafeRelease(m_pColorFrameReference);
        SafeRelease(m_pDepthFrameReference);
        SafeRelease(m_pInfraredFrameReference);
        SafeRelease(m_pMultiFrame);
    }
    // 关闭窗口,设备
    cv::destroyAllWindows();
    m_pKinectSensor->Close();
    //std::system("pause");
    delete[] depthData;

    return 0;
}

总结

  后续开发笔者选择的是Windows系统,给出的源码也是VS2019下的,后续开源出来的源码有一定基础的小伙伴可以参考代码自行修改。笔者将搭建环境所需要的所有软件、脚本、项目模板全部开源至微信公众号 “ 24K纯学渣 ” ,回复关键字 “ 深度相机 ” 即可获取。

你可能感兴趣的:(经验分享,opencv,计算机视觉,人工智能)