前面通过对极约束估计了相机的 R,t,这一节通过三角测量可以恢复深度值,得到特征点的空间位置(估计值)
利用opencv进行三角测量的步骤:
1、定义旋转矩阵和平移向量组成的增广矩阵
2、计算特征点在两个坐标系下的归一化坐标(取前两维)
3、调用triangulatePoints,得到空间点的齐次坐标
4、归一化处理,取前三维作为空间点的非齐次坐标
需要注意的是:三角测量是由平移得到的,纯旋转是无法使用三角测量的,因为对极约束将永远满足。在平移存在的情况下,还要关心三角测量的不确定性,这会引出一个三角测量的矛盾
如果特征点运动一个像素 δx,使得视线角变化了一个角度 δθ,那么测量到深度值将有 δd 的变化。从几何关系可以看到,当 t 较大时,δd 将明显变小,这说明平移较大时,在同样的相机分辨率下,三角化测量将更精确
要增加三角化的精度,其一是提高特征点的提取精度,也就是提高图像分辨率——但这会导致图像变大,提高计算成本
另一方式是使平移量增大。但是,平移量增大会导致图像的外观发生明显的变化,比如箱子原先被挡住的侧面显示出来,比如反射光发生变化等等。外观变化会使得特征提取与匹配变得困难
因此出现了三角化的矛盾:平移增大,可能导致匹配失效;平移减小,三角化精度不够
#include
using namespace std;
#include
#include
#include
#include
#include
using namespace cv;
void OrbFeaturesMatch(const Mat &img1, const Mat &img2,
vector &kp1, vector &kp2,
vector &matches)
{
Mat desc1, desc2;
Ptr detector = ORB::create();
Ptr desc = ORB::create();
Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming");
//1、检测角点
detector->detect(img1, kp1);
detector->detect(img2, kp2);
//2、计算描述子
desc->compute(img1, kp1, desc1);
desc->compute(img2, kp2, desc2);
//3、匹配点对
vector match;
matcher->match(desc1, desc2, match);
//4、检测误匹配
double minDist = 10000, maxDist = 0;
for(int i = 0; i < (int)match.size(); i++)
{
double dist = match[i].distance;
minDist = minDist > dist ? dist : minDist;
maxDist = maxDist < dist ? dist : maxDist;
}
cout << "minDist and maxDist = " << minDist << " and " << maxDist << endl;
double matchDist = max(30.0, minDist * 2);
for(int i = 0; i < (int)match.size(); i++)
if(match[i].distance <= matchDist)
matches.push_back(match[i]);
}
void estimateMoving(const vector &kp1, const vector &kp2, const vector &matches, Mat &R, Mat &t)
{
//取出匹配点对坐标
vector p1, p2;
for(int i = 0; i < matches.size(); i++)
{
//DMatch中有3个成员,queryIdx,trainIdx,distance
//queryIdx是特征点第一张图的索引,trainIdx是特征点在第二张图的索引
p1.push_back(kp1[matches[i].queryIdx].pt);
p2.push_back(kp2[matches[i].trainIdx].pt);
}
//1、计算基础矩阵、本质矩阵、单应矩阵
Mat fundamentalMat = findFundamentalMat(p1, p2, CV_FM_8POINT); //8点法
cout << "fundamentalMat :\n" << fundamentalMat << endl;
double focalLength = 520.9;
Point2d principalPoint(325.1, 249.7);
Mat essentialMat = findEssentialMat(p1, p2, focalLength, principalPoint);
cout << "essentialMax :\n" << essentialMat << endl;
//场景不是平面时,单应矩阵意义不大
Mat homographyMat = findHomography(p1, p2, RANSAC, 3); //基于RANSAC的鲁棒算法,最大允许重投影错误阈值是3
cout << "homographyMat :\n" << homographyMat << endl;
//2、分解矩阵,求解相机运动R,t
recoverPose(essentialMat, p1, p2, R, t, focalLength, principalPoint);
cout << "R :\n" << R << endl;
cout << "t :" << t.t() << endl;
}
Point2d pixelToNor(const Point2d &p, const Mat &k)
{
//像素坐标 = 内参矩阵 * 归一化坐标:p = K * x(齐次坐标运算)
//下式计算的原理:x = fx * p_x + cx,y = fy * p_y + cy(记像素坐标(x, y),归一化坐标(p_x, p_y))
//p_x = (x - cx) / fx,p_y = (y - cy) / fy
return Point2d((p.x - k.at(0, 2)) / k.at(0, 0), (p.y - k.at(1, 2)) / k.at(1, 1));
}
void triangulation(const vector &kp1, const vector &kp2,
const vector &matches,
const Mat &R, const Mat &t,
vector &p)
{
//T是旋转矩阵和平移向量拼接的增广矩阵,不是变换矩阵
//以img1作为参考系,因此T1表示不旋转不平移,T2是img1到img2的旋转、平移
Mat T1 = (Mat_(3, 4) <<
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 0);
Mat T2 = (Mat_(3, 4) <<
R.at(0, 0), R.at(0, 1), R.at(0, 2), t.at(0, 0),
R.at(1, 0), R.at(1, 1), R.at(1, 2), t.at(1, 0),
R.at(2, 0), R.at(2, 1), R.at(2, 2), t.at(2, 0)
);
Mat k = (Mat_(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
//计算归一化坐标的(x, y)
vector p1, p2;
for(auto m : matches)
{
p1.push_back(pixelToNor(kp1[m.queryIdx].pt, k));
p2.push_back(pixelToNor(kp2[m.trainIdx].pt, k));
}
Mat p4d; //空间点的齐次坐标
triangulatePoints(T1, T2, p1, p2, p4d);
//转换为非齐次坐标
for(int i = 0; i < p4d.cols; i++)
{
Mat x = p4d.col(i); //取出一列
x = x / x.at(3, 0); //归一化
Point3d pt(x.at(0, 0), x.at(1, 0), x.at(2, 0));
p.push_back(pt);
}
}
inline cv::Scalar get_color(float depth) {
float up_th = 50, low_th = 10, th_range = up_th - low_th;
if (depth > up_th) depth = up_th;
if (depth < low_th) depth = low_th;
return cv::Scalar(255 * depth / th_range, 0, 255 * (1 - depth / th_range));
}
int main(int argc, char **argv)
{
if(argc != 3)
{
cout << "input 2 images" << endl;
return -1;
}
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
//1、匹配特征点
vector kp1, kp2;
vector matches;
OrbFeaturesMatch(img1, img2, kp1, kp2, matches);
//2、估计相机运动
Mat R, t;
estimateMoving(kp1, kp2, matches, R, t);
//三角测量,得到特征点的空间坐标
vector p;
triangulation(kp1, kp2, matches, R, t, p);
Mat k = ( Mat_ ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
//验证三角化点与特征点的重投影关系
for(int i = 0; i < (int)matches.size(); i++)
{
Point2d p1 = pixelToNor(kp1[matches[i].queryIdx].pt, k); //img1特征点的归一化坐标的(x, y)
double p1Depth = p[i].z; //深度
cv::circle(img1, kp1[matches[i].queryIdx].pt, 2, get_color(p1Depth), 2); //画圈
Mat p2_3d = R * (Mat_(3, 1) << p[i].x, p[i].y, p[i].z) + t; //特征点经过旋转平移
double p2Depth = p2_3d.at(2, 0);
cv::circle(img2, kp2[matches[i].trainIdx].pt, 2, get_color(p2Depth), 2);
Point2d p1Projected(p[i].x, p[i].y); //经过三角测量恢复深度后,提取点在坐标系1中的坐标的前两维
p1Projected = p1Projected / p[i].z; //归一化处理,得到重投影的坐标(投影到相机1的归一化平面上)
printf("第%03d个特征点在img1中:", i);
cout << p1 << ",重投影后:" << p1Projected << "深度:" << p1Depth << endl;
Point2d p2Projected(p2_3d.at(0, 0), p2_3d.at(1, 0)); //点在坐标系2中的坐标的前两维
p2Projected = p2Projected / p2Depth; //归一化处理,得到重投影的坐标(投影到相机2的归一化平面上)
Point2d p2 = pixelToNor(kp2[matches[i].trainIdx].pt, k); //img2特征点的归一化坐标的(x, y)
printf("第%03d个特征点在img2中:", i);
cout << p2 << ",重投影后:" << p2Projected << "深度:" << p2Depth << endl;
}
imshow("img1", img1);
imshow("img2", img2);
waitKey(0);
return 0;
}