#include
#include
#include
#include
#include
using namespace std;
using namespace cv;
void pose_estimation_2d2d
(std::vector
std::vector
std::vector
Mat& R,Mat & t);
void find_feature_match
(Mat imag1,Mat imag2,
vector
vector
vector
Point2d pixel2cam ( const Point2d& p, const Mat& K );
void triangulation(const vector
const vector
const std::vector
const Mat& R,const Mat& t,
vector
int main(int argc, char **argv)
{
/* if(argc !=3){
cout<<"usage:feature_extraction img1 img2"<
}*/
//读取图像
Mat imag1=imread("1.png");
Mat imag2=imread("2.png");
std::vector
vector
//特征匹配
find_feature_match(imag1,imag2,keypoints1,keypoints2,matches);
cout<<"一共找到了"<
Mat R,t;
pose_estimation_2d2d(keypoints1,keypoints2,matches,R,t);
//验证E=t^R*Scale
Mat t_x=(Mat_
t.at
-t.at
cout<<"t^R="<
//验证对极几何
Mat K=(Mat_
for(DMatch m:matches){
Point2d pt1=pixel2cam(keypoints1[m.queryIdx].pt,K);
Mat y1=(Mat_
Mat y2=(Mat_
cout<<"对极约束="<
//三角化
vector
triangulation(keypoints1,keypoints2,matches,R,t,points);
//验证三角化与特征点的重投影关系
for(int i=0;i
Point2d pt1_cam_3d(
points[i].x/points[i].z,
points[i].y/points[i].z
);//points是通过三角测量以后重投影的坐标,知道了深度
cout<<"point in the first camera frame:"<
Point2d pt2_cam=pixel2cam(keypoints2[matches[i].trainIdx].pt,K);
Mat pt2_trans=R*(Mat_
cout<<"point in the second camera frame:"<
}
return 0;
}
Point2d pixel2cam ( const Point2d& p, const Mat& K )
{
return Point2d
(
( p.x - K.at
( p.y - K.at
);
}
void pose_estimation_2d2d(std::vector
std::vector
std::vector
Mat& R,Mat & t)
{
//相机内参
Mat K=(Mat_
//把匹配点转换为vector
vector
vector
for(int i=0;i<(int)matches.size();i++){
points1.push_back( keypoints1[matches[i].queryIdx].pt );
points2.push_back( keypoints2[matches[i].trainIdx].pt );
}
//计算基础矩阵F
Mat F;
F=findFundamentalMat(points1,points2,CV_FM_8POINT);
cout<<"F="<
Point2d principal_point(325.1,249.7);//光心
int focus_length=521;//焦距
Mat E=findEssentialMat(points1,points2,focus_length,principal_point,RANSAC);
cout<<"E="<
Mat H;
H=findHomography(points1,points2,RANSAC,3,noArray(),2000,0.99);
cout<<"H="<
cout<<"R="<
void find_feature_match(Mat imag1,Mat imag2,
vector
vector
vector
{
Mat descriptors1,descriptors2;
Ptr
//第一步:检测角点位置
orb->detect(imag1,keypoints1);
orb->detect(imag2,keypoints2);
//第二步:根据角点位置计算BRIEF描述子
orb->compute(imag1,keypoints1,descriptors1);
orb->compute(imag2,keypoints2,descriptors2);
/* //画出关键点的位置
Mat outimage1;
drawKeypoints(imag1,keypoints1,outimage1);//,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
// imshow("ORB特征点",outimage1);*/
//第三步:对两幅图像中的BRIEF描述子进行匹配,使用Hamming距离
vector
BFMatcher matcher (NORM_HAMMING);
matcher.match(descriptors1,descriptors2,match1);
//第四步:匹配点筛选
double min_dist=10000,max_dist=0;
//找出所有匹配之间的最小距离和最大距离
for(int i=0;i
if(dist
if(dist>max_dist)
max_dist=dist;
}
printf("max_dist: %f \n",max_dist);
printf("min_dist: %f\n",min_dist);
//认为当描述子之间的距离大于两倍最小距离时,为误匹配
//但有时最小距离会非常小,所以应设置下限(经验值)
// std::vector
for(int i=0;i
// good_matches.push_back(matches[i]);
matches.push_back(match1[i]);
}
}
/* //第五步:绘制匹配结果
Mat imag_match;
Mat imag_goodmatch;
drawMatches(imag1,keypoints1,imag2,keypoints2,matches,imag_match);
drawMatches(imag1,keypoints1,imag2,keypoints2,good_matches,imag_goodmatch);
imshow("所有匹配点对",imag_match);
imshow("筛选过后的点对",imag_goodmatch);*/
}
void triangulation(const vector
const vector
const std::vector
const Mat& R,const Mat& t,
vector
{
Mat T1=(Mat_
1,0,0,0,
0,1,0,0,
0,0,1,0);
Mat T2=(Mat_
R.at
R.at
R.at
Mat K=(Mat_
vector
for(DMatch m:matches)
{
//将像素坐标转为相机坐标
pts_1.push_back(pixel2cam(keypoints1[m.queryIdx].pt,K));
pts_2.push_back(pixel2cam(keypoints2[m.trainIdx].pt,K));
}
Mat pts_4d;
cv::triangulatePoints(T1,T2,pts_1,pts_2,pts_4d);
//转成非齐次坐标
for(int i=0;i
x/=x.at
Point3d p(
x.at
x.at
x.at
);
points.push_back(p);
}
}