本科毕设就是去除深度图中物体边缘的噪声,其中利用深度图生成点云后,视觉干扰最大的就是飞行像素了。关于飞行像素的空间特性可以查看文章《Identification and correction of flying pixels in range camera data》。里面也介绍了一些恢复飞行像素正确深度的方法,但作者提出的方法用到平面检测的思想,对一般曲面物体可能效果不好,而且该方法是离线处理的。本文记录的去除飞行像素的方法利用了与深度图对齐的彩色图的物体边缘信息,删除深度图和彩色图边缘之间的像素,虽然由于找到更好的方法删除和恢复飞行像素的正确深度值,这个方法没有写在本科毕设里面,但该方法是快速的,可以用于对深度图中重要的主体(比如人物)进行去噪处理。
原来实现飞行像素去除是在Kinect v2上做的,昨天把它在Intel Realsense D435i 上实现一遍。本文记录了三部分:
-
DLL文件打包
-
标定和配准原理,实现
-
边缘检测删除飞行像素
先放实验最终的效果吧,下面两张图分别是深度图原图和处理后的深度图的可视化图像:
可以看到第二张图像人物边缘的黑边扩张了,这是因为删掉了边缘的飞行像素。
打包.dll文件
打包成.dll文件只是我想放在其它工程里面而已,并不是必须的。这里仅记录打包的过程防止后面忘掉了。
1.新建c++项目,win32控制台程序,命名为RealsenseFilter.
2.下一步的“应用程序类型”选择“DLL”,“附加选项”勾选“空项目”。
3.在头文件处新建一个名为MyRealsenseFilter.h的头文件,内容如下:
#pragma once
#ifndef LIBFREENECT_EXPORT
#define MYREALSENSE_API _declspec(dllexport)
#else
#define MYREALSENSE_API _declspec(dllimport)
#endif // LIBFREENECT_EXPORT
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
extern "C"
{
namespace MyRealsense
{
MYREALSENSE_API unsigned short* DepthFilter(unsigned char* rgb_data, unsigned short* depth_data, int width_depth, int high_depth, int width_color, int high_color);
}
}
在源文件处新建一个名为MyRealsenseFilter.cpp的源文件,内容如下:
#include "MyRealsenseFilter.h"
using namespace cv;
using namespace std;
#define N1 18
#define N2 8
#define N3 2
#define winsize 3
#define threshold 19000
MYREALSENSE_API unsigned short* MyRealsense::DepthFilter(unsigned char* rgb_data, unsigned short* depth_data, int width_depth, int high_depth, int width_color, int high_color)
{
////设置相机内外参数
float fx_rgb = 611.538;
float fy_rgb = 611.753;
float cx_rgb = 323.879;
float cy_rgb = 237.827;
float fx_ir = 382.913;
float fy_ir = 382.913;
float cx_ir = 319.329;
float cy_ir = 234.574;
Eigen::Matrix3f R_ir2rgb;
Eigen::Vector3f T_ir2rgb;
R_ir2rgb << 0.999879, -0.00836045, 0.0131368, 0.00840608, 0.999959, -0.00342227, -0.0131076, 0.00353228, 0.999908;
T_ir2rgb << 0.0150998, 0.000269174, 0.000144462;
//cout << "R_M is:" << R_ir2rgb << endl;
//cout << "T_M is :" << T_ir2rgb << endl;
////显示传进来的图片
Mat rgb_raw = Mat(high_color, width_color, CV_8UC3);
Mat depth_raw = Mat(high_depth, width_depth, CV_16UC1);
Mat depth_raw_low = Mat(high_depth, width_depth, CV_8UC1);
memcpy(rgb_raw.data, rgb_data, width_color*high_color * 3);
memcpy(depth_raw.data, depth_data, width_depth*high_depth * 2);
depth_raw.convertTo(depth_raw_low, CV_8UC1);
//imshow("raw_depth:", depth_raw_low);
//imshow("raw_rgb:", rgb_raw);
//waitKey(1);
////设置相机内参(非齐次形式)
Eigen::Matrix3f K_ir; // ir内参矩阵
K_ir <<
fx_ir, 0, cx_ir,
0, fy_ir, cy_ir,
0, 0, 1;
Eigen::Matrix3f K_rgb; // rgb内参矩阵
K_rgb <<
fx_rgb, 0, cx_rgb,
0, fy_rgb, cy_rgb,
0, 0, 1;
////计算等效 R,T 具体参考俺大师兄博客:https://blog.csdn.net/jiaojialulu/article/details/53154045
Eigen::Matrix3f R;
Eigen::Vector3f T;
R = K_rgb*R_ir2rgb*K_ir.inverse();
T = K_rgb*T_ir2rgb;
/*
cout << "K_rgb:\n" << K_rgb << endl;
cout << "K_ir:\n" << K_ir << endl;
cout << "R:\n" << R << endl;
cout << "T:\n" << T << endl;
*/
////计算投影
Mat result(high_color, width_color, CV_8UC3); //为深度图每一点像素找到对应颜色值
int i = 0;
for (int row = 0; row < high_depth; row++)
{
for (int col = 0; col < width_depth; col++)
{
unsigned short* p = (unsigned short*)depth_raw.data;
unsigned short depthValue = p[row * 640 + col];
if (depthValue != -std::numeric_limits::infinity() && depthValue != 0 && depthValue != 65535)
{
// 投影到彩色图上的坐标
Eigen::Vector3f uv_depth(col, row, 1.0f); // !!!p_ir
Eigen::Vector3f uv_color = depthValue / 1000.f*R*uv_depth + T / 1000; // !!!Z_rgb*p_rgb=R*Z_ir*p_ir+T; (除以1000,是为了从毫米变米)
int X = static_cast(uv_color[0] / uv_color[2]); // !!!Z_rgb*p_rgb -> p_rgb
int Y = static_cast(uv_color[1] / uv_color[2]); // Z相当于缩放因子,在我手写笔记里面有说明
if ((X >= 0 && X < width_color) && (Y >= 0 && Y < high_color))
{
//cout << "X: " << X << " Y: " << Y << endl;
result.data[i * 3] = rgb_raw.data[3 * (Y * 640 + X)];
result.data[i * 3 + 1] = rgb_raw.data[3 * (Y * 640 + X) + 1];
result.data[i * 3 + 2] = rgb_raw.data[3 * (Y * 640 + X) + 2];
}
else //深度图像素对应的彩色图像素超出彩色图像范围,将该点的值置零。
{
result.data[i * 3] = 0;
result.data[i * 3 + 1] = 0;
result.data[i * 3 + 2] = 0;
}
}
else //深度值为0,最大值65535或者NAN
{
result.data[i * 3] = 0;
result.data[i * 3 + 1] = 0;
result.data[i * 3 + 2] = 0;
}
i++;
}
}
//cout << "result size is :" << result.size() << endl;
//imshow("reg_rgb:", result);
//waitKey(1);
////边缘检测
static unsigned short* new_depth_data = new unsigned short[width_depth * high_depth];
Mat src2 = Mat(high_depth, width_depth, CV_8UC1);
Mat rgbMat = Mat(high_color, width_color, CV_8UC3);
Mat depthMat = Mat(high_depth, width_depth, CV_16UC1);
Mat src1, src3, out1, out2;
memcpy(rgbMat.data, result.data, width_color*high_color * 3);
memcpy(depthMat.data, depth_data, width_depth*high_depth * 2);
src1 = rgbMat.clone();
src3 = depthMat.clone();
src3.convertTo(src2, CV_8UC1);
blur(src2, src2, Size(3, 3));
Canny(src2, out2, 500, 350);
//imshow("depth_edge:", out2);
Canny(src1, out1, 480, 250);
//imshow("rgb_edge", out1);
//imshow("color", rgbMat);
//imshow("depth", src2);
//获得良好的配准彩色图边缘
for (int i = N3; i < high_depth - N3; i++)
{
for (int j = N3; j < width_depth - N3; j++)
{
if (*(unsigned short*)(result.data + i*result.step[0] + j *result.step[1]) == 0)
{
for (int n = 0; n < N3; n++)
{
if (*(out1.data + i*out1.step[0] + (j + n) *out1.step[1]) != 0)
*(out1.data + i*out1.step[0] + (j + n) *out1.step[1]) = 0;
if (*(out1.data + i*out1.step[0] + (j - n) *out1.step[1]) != 0)
*(out1.data + i*out1.step[0] + (j - n) *out1.step[1]) = 0;
if (*(out1.data + (i + n)*out1.step[0] + j *out1.step[1]) != 0)
*(out1.data + (i + n)*out1.step[0] + j *out1.step[1]) = 0;
if (*(out1.data + (i - n)*out1.step[0] + j *out1.step[1]) != 0)
*(out1.data + (i - n)*out1.step[0] + j *out1.step[1]) = 0;
}
}
}
}
////去除离群点
for (int i = winsize; i < high_depth - winsize; i++)
{
for (int j = winsize; j < width_depth - winsize; j++)
{
int num = 0;
for (int k = i - winsize; k <= i + winsize; k++)
{
for (int l = j - winsize; l <= j + winsize; l++)
{
if (*(out1.data + k*out1.step[0] + l *out1.step[1]) != 0)
num++;
}
}
if (num < 4)
*(out1.data + i*out1.step[0] + j *out1.step[1]) = 0;
}
}
//imshow("rgb_edge_processed:", out1);
for (int i = N2; i < high_depth - N2; i++)
{
for (int j = N1; j < width_depth - N1; j++)
{
int pro = 1;
if (*(out2.data + i*out2.step[0] + j *out2.step[1]) != 0 && *(out1.data + i*out1.step[0] + j *out1.step[1]) == 0)
{
for (int index1 = 1; index1 <= N1; index1++)
{
if (*(out1.data + i*out1.step[0] + (j + index1) *out1.step[1]) != 0 )
{
pro = 0;
if (*(out1.data + i*out1.step[0] + (j + index1) *out1.step[1]) != 0 && index1 !=1 )
{
for (int k = 0; k < index1-1 ; k++)
{
*((unsigned short*)(depthMat.data + i*depthMat.step[0] + (j + k)*depthMat.step[1])) = 0;
}
}
/*
if (*(out1.data + i*out1.step[0] + (j - index1) *out1.step[1]) != 0 )
{
for (int k = 0; k < index1 ; k++)
{
*((unsigned short*)(depthMat.data + i*depthMat.step[0] + (j - k)*depthMat.step[1])) = 0;
}
}
*/
break;
}
}
if (pro == 1)
{
for (int index2 = 1; index2 <= N2; index2++)
{
if (*(out1.data + (i + index2)*out1.step[0] + j *out1.step[1]) == 0 && *(out1.data + (i - index2)*out1.step[0] + j *out1.step[1]) == 0)
continue;
else
{
if (*(out1.data + (i + index2)*out1.step[0] + j *out1.step[1]) != 0 )
{
for (int k = 0; k < index2 ; k++)
{
*((unsigned short*)(depthMat.data + (i + k)*depthMat.step[0] + j*depthMat.step[1])) = 0;
}
}
if (*(out1.data + (i - index2)*out1.step[0] + j *out1.step[1]) != 0 )
{
for (int k = 0; k < index2 ; k++)
{
*((unsigned short*)(depthMat.data + (i - k)*depthMat.step[0] + j*depthMat.step[1])) = 0;
}
}
break;
}
}
}
}
}
}
////统计滤波删除离群点
Mat depth_threshold = Mat::zeros(high_depth, width_depth, CV_16UC1);
for (int i = winsize; i < high_depth - winsize; i++)
{
for (int j = winsize; j < width_depth - winsize; j++)
{
unsigned short sum_depth = 0;
for (int k = i - winsize + 1; k <= i + winsize - 1; k++)
{
for (int l = j - winsize + 1; l <= j + winsize - 1; l++)
{
sum_depth += abs((*((unsigned short*)(depthMat.data + (k - 1) *depthMat.step[0] + l*depthMat.step[1]))
- *((unsigned short*)(depthMat.data + k *depthMat.step[0] + l *depthMat.step[1]))));
sum_depth += abs((*((unsigned short*)(depthMat.data + (k + 1) *depthMat.step[0] + l*depthMat.step[1]))
- *((unsigned short*)(depthMat.data + k *depthMat.step[0] + l *depthMat.step[1]))));
sum_depth += abs((*((unsigned short*)(depthMat.data + k *depthMat.step[0] + (l + 1)*depthMat.step[1]))
- *((unsigned short*)(depthMat.data + k *depthMat.step[0] + l *depthMat.step[1]))));
sum_depth += abs((*((unsigned short*)(depthMat.data + k *depthMat.step[0] + (l - 1)*depthMat.step[1]))
- *((unsigned short*)(depthMat.data + k *depthMat.step[0] + l *depthMat.step[1]))));
}
}
*((unsigned short*)(depth_threshold.data + i *depth_threshold.step[0] + j * depth_threshold.step[1])) = sum_depth;
}
}
//cout << depth_threshold << endl;
for (int i = winsize; i < high_depth - winsize; i++)
{
for (int j = winsize; j < width_depth - winsize; j++)
{
if (*((unsigned short*)(depth_threshold.data + i *depth_threshold.step[0] + j * depth_threshold.step[1])) > threshold)
*((unsigned short*)(depthMat.data + i *depthMat.step[0] + j * depthMat.step[1])) = 0;
}
}
memcpy(new_depth_data, depthMat.data, width_depth*high_depth * 2);
depthMat.convertTo(depthMat, CV_8UC1);
//imshow("depth after prosess:", depthMat);
//waitKey(1);
return new_depth_data;
}
这时候编译运行就可以生成.dll文件了,文件在你工程目录的x64/Release/下。
4.测试一下生成的.dll文件是否可用,在同一工程下新建一个项目,命名为DLLTest,该项目的配置就按写正常c++项目的配置即可:“win32控制台应用程序”,应用程序类型:“控制台应用程序”,附加选项勾选“预编译头”。在源文件新建一个DllTest.cpp,内容如下:
// DllTest.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include
#include
#include // Include RealSense Cross Platform API
#include // Include OpenCV API
#include
#include "example.hpp" // Include short list of convenience functions for rendering
#include
using namespace cv;
using namespace std;
// 定义指向DLL中函数的指针
typedef unsigned short* (*DepthFilter)(unsigned char* , unsigned short* , int , int , int , int );
int _tmain(int argc, _TCHAR* argv[]) try
{
static unsigned short* depthData = new unsigned short[640 * 480 ];
static unsigned char* rgbData = new unsigned char[640 * 480 * 3];
static unsigned short* depthData1 = new unsigned short[640 * 480];
HINSTANCE hlib = LoadLibrary(_T("RealSenseFilter.dll")); // 获得DLL的实例(HINSTANCE类型是实例的句柄)
if (!hlib)
{
std::cout << "获取DLL实例失败!" << std::endl;
FreeLibrary(hlib); // 调用函数FreeLibrary释放DLL获得的内存。
return -1;
}
DepthFilter MyDepthFilter = (DepthFilter)GetProcAddress(hlib, "DepthFilter");
if (!MyDepthFilter)
{
std::cout << "函数指针为空!" << std::endl;
FreeLibrary(hlib);
return -2;
}
float get_depth_scale(rs2::device dev); //获取深度图像的缩放因子,这里是0.001
rs2::colorizer color_map1;
rs2::colorizer color_map2;
rs2::pipeline pipe;
rs2::config cfg;
cfg.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30);//自定义相机输出流的参数
cfg.enable_stream(RS2_STREAM_COLOR,640,480,RS2_FORMAT_RGB8,30);
rs2::pipeline_profile profile = pipe.start(cfg);
float depth_scale = get_depth_scale(profile.get_device());
cout << "depth_scale is:" << depth_scale << endl;
while (waitKey(1) < 0)
{
rs2::frameset data = pipe.wait_for_frames();
rs2::frame depth = data.get_depth_frame();
rs2::frame color = data.get_color_frame();
memcpy(depthData, (void*)depth.get_data(), 640 * 480 * 2);
memcpy(rgbData, (void*)color.get_data(), 640 * 480 * 3);
rs2::frame depth1 = depth.apply_filter(color_map1);
depthData1 = MyDepthFilter(rgbData, depthData, 640, 480, 640, 480);
memcpy((void*)depth.get_data(), depthData1, 640 * 480 * 2);
rs2::frame depth2 = depth.apply_filter(color_map2);
const int w = depth2.as().get_width();
const int h = depth2.as().get_height();
// Create OpenCV matrix of size (w,h) from the colorized depth data
Mat image1(Size(w, h), CV_8UC3, (void*)depth1.get_data(), Mat::AUTO_STEP);
Mat image2(Size(w, h), CV_8UC3, (void*)depth2.get_data(), Mat::AUTO_STEP);
imshow("color_depth_raw", image1);
imshow("color_depth_process", image2);
waitKey(1);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.as())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
如果读取.dll失败再从网上寻找解决方法把,可能是.dll文件依赖于其他的.dll而程序没找到这些.dll,可以下载一个工具“Dependency Walker”来寻找缺失的.dll文件。
DLL文件配置到此结束。
相机标定和配准
原理公式手打麻烦,老样子手写拍照(捂脸)
Intel Realsense D435i相机内参,深度相机到彩色相机的旋转平移矩阵都可以在Realsense SDK 2.0的例程里面找到。(项目:"sensor-control")。不同大小的帧内参不一样,我代码里面深度相机内参对应的是:640 * 480,Z16格式,30FPS的帧流。彩色相机内参对应的是:640 * 480,RGB8格式,30FPS的帧流。深度图和彩色图对齐,我这里是彩色图对齐到深度图,即为深度图上每一个点找到对应的颜色值。但注意深度相机和彩色相机是有一定水平距离的,而且两个相机的视场角不一样(深度相机视场角大一些),实际上不能为每个深度值都找到颜色值的。但为了处理深度图也简单这样做了。至于代码我是参考我大师兄的,大师兄博客链接在我代码里面。想详细理解配准的过程可以看链接里的博客。
边缘检测除去飞行像素
思路很简单,因为深度图中物体边缘存在飞行像素,故相对于“正确”的边缘,深度图中的物体边缘会稍微延申出来一部分,怎么得到正确的边缘呢,这就是为什么要得到与深度图对齐的彩色图的原因了,对齐后的彩色图用Canny边缘检测得到一个含物体边缘的图像,再结合深度图的物体边缘,删除它们之间的像素就可以了。具体可以看代码。