2D-2D位姿估计增加三角测量,确定尺度--参考SLAM十四讲7.6

#include
#include
#include
#include
#include
using namespace std;
using namespace cv;

void pose_estimation_2d2d
(std::vector keypoints1,
std::vector keypoints2,
std::vector matches,
Mat& R,Mat & t);

void find_feature_match
(Mat imag1,Mat imag2,
vector &keypoints1,
vector &keypoints2,
vector& matches);

Point2d pixel2cam ( const Point2d& p, const Mat& K );

void triangulation(const vector& keypoints1,
const vector& keypoints2,
const std::vector& matches,
const Mat& R,const Mat& t,
vector& points);//三角化函数

int main(int argc, char **argv)
{
/* if(argc !=3){
cout<<"usage:feature_extraction img1 img2"< return 1;
}*/
//读取图像
Mat imag1=imread("1.png");
Mat imag2=imread("2.png");

std::vector keypoints1,keypoints2;
vector matches;
//特征匹配
find_feature_match(imag1,imag2,keypoints1,keypoints2,matches);
cout<<"一共找到了"< //估计运动
Mat R,t;
pose_estimation_2d2d(keypoints1,keypoints2,matches,R,t);

//验证E=t^R*Scale
Mat t_x=(Mat_(3,3)<<0,-t.at(2,0),t.at(1,0),
t.at(2,0),0,-t.at(0,0),
-t.at(1,0),t.at(0,0),0);
cout<<"t^R="<
//验证对极几何
Mat K=(Mat_(3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
for(DMatch m:matches){
Point2d pt1=pixel2cam(keypoints1[m.queryIdx].pt,K);
Mat y1=(Mat_(3,1)< Point2d pt2=pixel2cam(keypoints2[m.trainIdx].pt,K);
Mat y2=(Mat_(3,1)< Mat d=y2.t()*t_x*R*y1;
cout<<"对极约束="< }

//三角化
vector points;
triangulation(keypoints1,keypoints2,matches,R,t,points);

//验证三角化与特征点的重投影关系
for(int i=0;i Point2d pt1_cam=pixel2cam(keypoints1[matches[i].queryIdx].pt,K);//归一化平面上的坐标,但不知深度
Point2d pt1_cam_3d(
points[i].x/points[i].z,
points[i].y/points[i].z
);//points是通过三角测量以后重投影的坐标,知道了深度
cout<<"point in the first camera frame:"< cout<<"point projected from 3D "<
Point2d pt2_cam=pixel2cam(keypoints2[matches[i].trainIdx].pt,K);
Mat pt2_trans=R*(Mat_(3,1)< pt2_trans /=pt2_trans.at(2,0);
cout<<"point in the second camera frame:"< cout<<"point projected from second frame: "< cout<
}
return 0;
}


Point2d pixel2cam ( const Point2d& p, const Mat& K )
{
return Point2d
(
( p.x - K.at ( 0,2 ) ) / K.at ( 0,0 ),
( p.y - K.at ( 1,2 ) ) / K.at ( 1,1 )
);
}
void pose_estimation_2d2d(std::vector keypoints1,
std::vector keypoints2,
std::vector matches,
Mat& R,Mat & t)
{
//相机内参
Mat K=(Mat_ (3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
//把匹配点转换为vector
vector points1;
vector points2;
for(int i=0;i<(int)matches.size();i++){
points1.push_back( keypoints1[matches[i].queryIdx].pt );
points2.push_back( keypoints2[matches[i].trainIdx].pt );
}
//计算基础矩阵F
Mat F;
F=findFundamentalMat(points1,points2,CV_FM_8POINT);
cout<<"F="< //计算本质矩阵F
Point2d principal_point(325.1,249.7);//光心
int focus_length=521;//焦距
Mat E=findEssentialMat(points1,points2,focus_length,principal_point,RANSAC);
cout<<"E="< //计算单位矩阵H
Mat H;
H=findHomography(points1,points2,RANSAC,3,noArray(),2000,0.99);
cout<<"H="< recoverPose(E,points1,points2,R,t,focus_length,principal_point);
cout<<"R="< cout<<"t="< }
void find_feature_match(Mat imag1,Mat imag2,
vector &keypoints1,
vector &keypoints2,
vector &matches)
{
Mat descriptors1,descriptors2;
Ptr orb=ORB::create();//(500,1.2f,8,31,0,2,ORB::HARRIS_SCORE,31,20);
//第一步:检测角点位置
orb->detect(imag1,keypoints1);
orb->detect(imag2,keypoints2);

//第二步:根据角点位置计算BRIEF描述子
orb->compute(imag1,keypoints1,descriptors1);
orb->compute(imag2,keypoints2,descriptors2);
/* //画出关键点的位置
Mat outimage1;
drawKeypoints(imag1,keypoints1,outimage1);//,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
// imshow("ORB特征点",outimage1);*/

//第三步:对两幅图像中的BRIEF描述子进行匹配,使用Hamming距离
vector match1;
BFMatcher matcher (NORM_HAMMING);
matcher.match(descriptors1,descriptors2,match1);

//第四步:匹配点筛选
double min_dist=10000,max_dist=0;
//找出所有匹配之间的最小距离和最大距离
for(int i=0;i double dist=match1[i].distance;
if(dist min_dist=dist;
if(dist>max_dist)
max_dist=dist;
}
printf("max_dist: %f \n",max_dist);
printf("min_dist: %f\n",min_dist);
//认为当描述子之间的距离大于两倍最小距离时,为误匹配
//但有时最小距离会非常小,所以应设置下限(经验值)
// std::vector good_matches;
for(int i=0;i if(match1[i].distance<=max(2*min_dist,30.0)){//这个决定了匹配的点数
// good_matches.push_back(matches[i]);
matches.push_back(match1[i]);
}
}

/* //第五步:绘制匹配结果
Mat imag_match;
Mat imag_goodmatch;
drawMatches(imag1,keypoints1,imag2,keypoints2,matches,imag_match);
drawMatches(imag1,keypoints1,imag2,keypoints2,good_matches,imag_goodmatch);
imshow("所有匹配点对",imag_match);
imshow("筛选过后的点对",imag_goodmatch);*/

}

void triangulation(const vector& keypoints1,
const vector& keypoints2,
const std::vector& matches,
const Mat& R,const Mat& t,
vector& points)
{
Mat T1=(Mat_(3,4)<<
1,0,0,0,
0,1,0,0,
0,0,1,0);
Mat T2=(Mat_(3,4)<<
R.at(0,0),R.at(0,1),R.at(0,2),t.at(0,0),
R.at(1,0),R.at(1,1),R.at(1,2),t.at(1,0),
R.at(2,0),R.at(2,1),R.at(2,2),t.at(2,0));
Mat K=(Mat_ (3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
vector pts_1,pts_2;
for(DMatch m:matches)
{
//将像素坐标转为相机坐标
pts_1.push_back(pixel2cam(keypoints1[m.queryIdx].pt,K));
pts_2.push_back(pixel2cam(keypoints2[m.trainIdx].pt,K));
}
Mat pts_4d;
cv::triangulatePoints(T1,T2,pts_1,pts_2,pts_4d);
//转成非齐次坐标
for(int i=0;i Mat x=pts_4d.col(i);
x/=x.at(3,0);
Point3d p(
x.at(0,0),
x.at(1,0),
x.at(2,0)
);
points.push_back(p);
}
}

你可能感兴趣的:(SLAM)