应用场景:
双目测距的精度和基线长度(两台相机之间的距离)有关,两台相机布放的距离越远,测距精度越高。
但问题是:往往在实际应用中,相机的布放空间是有限的,最多也只有几米或几十米的基线长度,这就导致双目测距在远距离条件下的精度大打折扣。
所以,双目测距一般用于近距离的高精度测量,而远距离测距一般用脉冲式的激光测距机。
图像测量方法的优点是近距离精度高,但是图像质量受外界光照等条件制约太大,且由于相机性能往往不够稳定,加上算法相对复杂些,这些都会限制它的应用。
在相机标定后,我们就可以用得到的相机内参矩阵、畸变矩阵、两相机相对位置变换矩阵进行三维重建了。用数学方法进行空间变换很容易得到图像坐标(在两相机拍摄的同一帧图像上对应点坐标)与三维坐标(相对某一相机建模的三维坐标)之间的关系,原理参考https://blog.csdn.net/tiemaxiaosu/article/details/51734667#commentsedit
用 OpenCV goodFeaturesToTrack()函数得到角点坐标,取得左右相机同一帧图像对应点AB的像素坐标。
再计算得到空间两点AB双目三维空间计算距离和实际测量距离,得到三维建模测距误差,三维建模代码如下:
Point2f xyz2uv(Point3f worldPoint,float intrinsic[3][3],float translation[1][3],float rotation[3][3]);
Point3f uv2xyz(Point2f uvLeft,Point2f uvRight);
//左相机内参数矩阵
float leftIntrinsic[3][3] = {294.0911635717881, 0, 310.6171586702577,
0, 295.3905526589596, 256.4320568139868,
0, 0, 1};
//左相机旋转矩阵
float leftRotation[3][3] = {1, 0, 0,
0, 1, 0,
0, 0, 1};
//左相机平移向量
float leftTranslation[1][3] = {0, 0, 0};
//右相机内参数矩阵
float rightIntrinsic[3][3] = {293.27225104276044,0,335.4364278875495,
0, 295.1891754871827, 263.677364491931,
0, 0, 1};
//右相机旋转矩阵
float rightRotation[3][3] =
{0.9997690293617348,-0.015539793483491028,0.014845967384545062,
0.01554105039759545,0.99987923011213,3.070686448984299e-05,
-0.014844651617061421,0.00020002215521852068,0.9998897920818591};
//右相机平移向量
float rightTranslation[1][3] = {0.05875655764200672, -0.0013795019307868321, -0.00044022562044059466};
int main(int argc, char *argv[]) {
//左相机图像坐标
Point2f left(236,153);
//对应点右相机坐标
Point2f right(281,162);
Point3f worldPoint;
worldPoint = uv2xyz(left,right);
cout<<"A点空间坐标为:"<(0,0) = uvLeft.x * mLeftM.at(2,0) - mLeftM.at(0,0);
A.at(0,1) = uvLeft.x * mLeftM.at(2,1) - mLeftM.at(0,1);
A.at(0,2) = uvLeft.x * mLeftM.at(2,2) - mLeftM.at(0,2);
A.at(1,0) = uvLeft.y * mLeftM.at(2,0) - mLeftM.at(1,0);
A.at(1,1) = uvLeft.y * mLeftM.at(2,1) - mLeftM.at(1,1);
A.at(1,2) = uvLeft.y * mLeftM.at(2,2) - mLeftM.at(1,2);
A.at(2,0) = uvRight.x * mRightM.at(2,0) - mRightM.at(0,0);
A.at(2,1) = uvRight.x * mRightM.at(2,1) - mRightM.at(0,1);
A.at(2,2) = uvRight.x * mRightM.at(2,2) - mRightM.at(0,2);
A.at(3,0) = uvRight.y * mRightM.at(2,0) - mRightM.at(1,0);
A.at(3,1) = uvRight.y * mRightM.at(2,1) - mRightM.at(1,1);
A.at(3,2) = uvRight.y * mRightM.at(2,2) - mRightM.at(1,2);
//最小二乘法B矩阵
Mat B = Mat(4,1,CV_32F);
B.at(0,0) = mLeftM.at(0,3) - uvLeft.x * mLeftM.at(2,3);
B.at(1,0) = mLeftM.at(1,3) - uvLeft.y * mLeftM.at(2,3);
B.at(2,0) = mRightM.at(0,3) - uvRight.x * mRightM.at(2,3);
B.at(3,0) = mRightM.at(1,3) - uvRight.y * mRightM.at(2,3);
Mat XYZ = Mat(3,1,CV_32F);
//采用SVD最小二乘法求解XYZ
solve(A,B,XYZ,DECOMP_SVD);
//cout<<"空间坐标为 = "<(0,0);
world.y = XYZ.at(1,0);
world.z = XYZ.at(2,0);
return world;
}
// Description: 将世界坐标系中的点投影到左右相机成像坐标系中
Point2f xyz2uv(Point3f worldPoint,float intrinsic[3][3],float translation[1][3],float rotation[3][3])
{
// [fx s x0] [Xc] [Xw] [u] 1 [Xc]
//K = |0 fy y0| TEMP = [R T] |Yc| = TEMP*|Yw| | | = —*K *|Yc|
// [ 0 0 1 ] [Zc] |Zw| [v] Zc [Zc]
// [1 ]
Point3f c;
c.x = rotation[0][0]*worldPoint.x + rotation[0][1]*worldPoint.y + rotation[0][2]*worldPoint.z + translation[0][0]*1;
c.y = rotation[1][0]*worldPoint.x + rotation[1][1]*worldPoint.y + rotation[1][2]*worldPoint.z + translation[0][1]*1;
c.z = rotation[2][0]*worldPoint.x + rotation[2][1]*worldPoint.y + rotation[2][2]*worldPoint.z + translation[0][2]*1;
Point2f uv;
uv.x = (intrinsic[0][0]*c.x + intrinsic[0][1]*c.y + intrinsic[0][2]*c.z)/c.z;
uv.y = (intrinsic[1][0]*c.x + intrinsic[1][1]*c.y + intrinsic[1][2]*c.z)/c.z;
return uv;
}