双目立体三维重建原理及介绍

1、双目立体视觉深度相机简化流程

下面简单的总结一下双目立体视觉深度相机的深度测量过程,如下:

1)、首先需要对双目相机进行标定,得到两个相机的内外参数、单应矩阵。

2)、根据标定结果对原始图像校正,校正后的两张图像位于同一平面且互相平行。

3)、对校正后的两张图像进行像素点匹配。

4)、根据匹配结果计算每个像素的深度,从而获得深度图

2、应用场景

双目测距的精度和基线长度(两台相机之间的距离)有关,两台相机布放的距离越远,测距精度越高。
但问题是:往往在实际应用中,相机的布放空间是有限的,最多也只有几米或几十米的基线长度,这就导致双目测距在远距离条件下的精度大打折扣。

所以,双目测距一般用于近距离的高精度测量,而远距离测距一般用脉冲式的激光测距机。
图像测量方法的优点是近距离精度高,但是图像质量受外界光照等条件制约太大,且由于相机性能往往不够稳定,加上算法相对复杂些,这些都会限制它的应用。


在相机标定后,我们就可以用得到的相机内参矩阵、畸变矩阵、两相机相对位置变换矩阵进行三维重建了。用数学方法进行空间变换很容易得到图像坐标(在两相机拍摄的同一帧图像上对应点坐标)与三维坐标(相对某一相机建模的三维坐标)之间的关系,原理参考https://blog.csdn.net/tiemaxiaosu/article/details/51734667#commentsedit

用 OpenCV goodFeaturesToTrack()函数得到角点坐标,取得左右相机同一帧图像对应点AB的像素坐标。

再计算得到空间两点AB双目三维空间计算距离和实际测量距离,得到三维建模测距误差,三维建模代码如下:

3、代码实现
Point2f xyz2uv(Point3f worldPoint,float intrinsic[3][3],float translation[1][3],float rotation[3][3]);
Point3f uv2xyz(Point2f uvLeft,Point2f uvRight);
 
//左相机内参数矩阵
float leftIntrinsic[3][3] = {294.0911635717881,	 0,		310.6171586702577,
                             0,	295.3905526589596,		256.4320568139868,
                             0,			 0,				1};
 
//左相机旋转矩阵
float leftRotation[3][3] = {1,	        0,		0,
                            0,		1,		0,
                            0,		0,		1};
//左相机平移向量
float leftTranslation[1][3] = {0, 0, 0};
 
//右相机内参数矩阵
float rightIntrinsic[3][3] = {293.27225104276044,0,335.4364278875495,
                              0, 295.1891754871827, 263.677364491931,
                              0,		0,		  1};
//右相机旋转矩阵
float rightRotation[3][3] = 
                {0.9997690293617348,-0.015539793483491028,0.014845967384545062,
                 0.01554105039759545,0.99987923011213,3.070686448984299e-05,
                 -0.014844651617061421,0.00020002215521852068,0.9998897920818591};
//右相机平移向量
float rightTranslation[1][3] = {0.05875655764200672, -0.0013795019307868321, -0.00044022562044059466};
 
int main(int argc, char *argv[]) {
 
    //左相机图像坐标
    Point2f left(236,153);
    //对应点右相机坐标
    Point2f right(281,162);
    Point3f worldPoint;
    worldPoint = uv2xyz(left,right);
    cout<<"A点空间坐标为:"<<endl<<uv2xyz(left,right)<<endl;
    double distance = sqrt(worldPoint.x * worldPoint.x + worldPoint.y * worldPoint.y + worldPoint.z * worldPoint.z);
    cout<<"A距离坐标原点距离为:"<<endl<<distance<<endl;
 
    //左相机图像坐标
    Point2f left1(383,151);
    //对应点右相机坐标
    Point2f right1(428,158);
    Point3f worldPoint1;
    worldPoint1 = uv2xyz(left1,right1);
    cout<<"B点空间坐标为:"<<endl<<uv2xyz(left1,right1)<<endl;
    double distance1 = sqrt(worldPoint1.x * worldPoint1.x + worldPoint1.y * worldPoint1.y + worldPoint1.z * worldPoint1.z);
    cout<<"B距离坐标原点距离为:"<<endl<<distance1<<endl;
 
    double ab_distance = sqrt(abs(worldPoint.x-worldPoint1.x)* abs(worldPoint.x-worldPoint1.x) +
            abs(worldPoint.y -worldPoint1.y)* abs(worldPoint.y -worldPoint1.y) +
                     abs(worldPoint.z -worldPoint1.z ) * abs(worldPoint.z -worldPoint1.z ));
    cout<<"AB两点距离:"<<ab_distance<<endl;
    double AB_truth = 0.66;
    cout<<"误差:"<<abs(ab_distance-AB_truth)/AB_truth<<endl;
 
    return 0;
}
 
// Description: 根据左右相机中成像坐标求解空间坐标
Point3f uv2xyz(Point2f uvLeft,Point2f uvRight)
{
    //  [u1]      |X|                [u2]      |X|
    //Z*|v1| = Ml*|Y|              Z*|v2| = Mr*|Y|
    //  [ 1]      |Z|                [ 1]      |Z|
    //         |1|                       |1|
    Mat mLeftRotation = Mat(3,3,CV_32F,leftRotation);
    Mat mLeftTranslation = Mat(3,1,CV_32F,leftTranslation);
    Mat mLeftRT = Mat(3,4,CV_32F);//左相机M矩阵
    hconcat(mLeftRotation,mLeftTranslation,mLeftRT);
    Mat mLeftIntrinsic = Mat(3,3,CV_32F,leftIntrinsic);
    Mat mLeftM = mLeftIntrinsic * mLeftRT;
    //cout<<"左相机M矩阵 = "<
 
    Mat mRightRotation = Mat(3,3,CV_32F,rightRotation);
    Mat mRightTranslation = Mat(3,1,CV_32F,rightTranslation);
    Mat mRightRT = Mat(3,4,CV_32F);//右相机M矩阵
    hconcat(mRightRotation,mRightTranslation,mRightRT);
    Mat mRightIntrinsic = Mat(3,3,CV_32F,rightIntrinsic);
    Mat mRightM = mRightIntrinsic * mRightRT;
    //cout<<"右相机M矩阵 = "<
 
    //最小二乘法A矩阵
    Mat A = Mat(4,3,CV_32F);
    A.at<float>(0,0) = uvLeft.x * mLeftM.at<float>(2,0) - mLeftM.at<float>(0,0);
    A.at<float>(0,1) = uvLeft.x * mLeftM.at<float>(2,1) - mLeftM.at<float>(0,1);
    A.at<float>(0,2) = uvLeft.x * mLeftM.at<float>(2,2) - mLeftM.at<float>(0,2);
 
    A.at<float>(1,0) = uvLeft.y * mLeftM.at<float>(2,0) - mLeftM.at<float>(1,0);
    A.at<float>(1,1) = uvLeft.y * mLeftM.at<float>(2,1) - mLeftM.at<float>(1,1);
    A.at<float>(1,2) = uvLeft.y * mLeftM.at<float>(2,2) - mLeftM.at<float>(1,2);
 
    A.at<float>(2,0) = uvRight.x * mRightM.at<float>(2,0) - mRightM.at<float>(0,0);
    A.at<float>(2,1) = uvRight.x * mRightM.at<float>(2,1) - mRightM.at<float>(0,1);
    A.at<float>(2,2) = uvRight.x * mRightM.at<float>(2,2) - mRightM.at<float>(0,2);
 
    A.at<float>(3,0) = uvRight.y * mRightM.at<float>(2,0) - mRightM.at<float>(1,0);
    A.at<float>(3,1) = uvRight.y * mRightM.at<float>(2,1) - mRightM.at<float>(1,1);
    A.at<float>(3,2) = uvRight.y * mRightM.at<float>(2,2) - mRightM.at<float>(1,2);
 
    //最小二乘法B矩阵
    Mat B = Mat(4,1,CV_32F);
    B.at<float>(0,0) = mLeftM.at<float>(0,3) - uvLeft.x * mLeftM.at<float>(2,3);
    B.at<float>(1,0) = mLeftM.at<float>(1,3) - uvLeft.y * mLeftM.at<float>(2,3);
    B.at<float>(2,0) = mRightM.at<float>(0,3) - uvRight.x * mRightM.at<float>(2,3);
    B.at<float>(3,0) = mRightM.at<float>(1,3) - uvRight.y * mRightM.at<float>(2,3);
 
    Mat XYZ = Mat(3,1,CV_32F);
    //采用SVD最小二乘法求解XYZ
    solve(A,B,XYZ,DECOMP_SVD);
 
    //cout<<"空间坐标为 = "<
 
    //世界坐标系中坐标
    Point3f world;
    world.x = XYZ.at<float>(0,0);
    world.y = XYZ.at<float>(1,0);
    world.z = XYZ.at<float>(2,0);
 
    return world;
}
// Description: 将世界坐标系中的点投影到左右相机成像坐标系中
Point2f xyz2uv(Point3f worldPoint,float intrinsic[3][3],float translation[1][3],float rotation[3][3])
{
    //    [fx s x0]							[Xc]		[Xw]		[u]	  1		[Xc]
    //K = |0 fy y0|       TEMP = [R T]		|Yc| = TEMP*|Yw|		| | = —*K *|Yc|
    //    [ 0 0 1 ]							[Zc]		|Zw|		[v]	  Zc	[Zc]
    //													[1 ]
    Point3f c;
    c.x = rotation[0][0]*worldPoint.x + rotation[0][1]*worldPoint.y + rotation[0][2]*worldPoint.z + translation[0][0]*1;
    c.y = rotation[1][0]*worldPoint.x + rotation[1][1]*worldPoint.y + rotation[1][2]*worldPoint.z + translation[0][1]*1;
    c.z = rotation[2][0]*worldPoint.x + rotation[2][1]*worldPoint.y + rotation[2][2]*worldPoint.z + translation[0][2]*1;
 
    Point2f uv;
    uv.x = (intrinsic[0][0]*c.x + intrinsic[0][1]*c.y + intrinsic[0][2]*c.z)/c.z;
    uv.y = (intrinsic[1][0]*c.x + intrinsic[1][1]*c.y + intrinsic[1][2]*c.z)/c.z;
 
    return uv;
}

你可能感兴趣的:(图像处理,计算机视觉,opencv)