slambook2+ch7+pose_estimation_2d2d+估计多张图像之间的位姿

算法

  1. 计算第一张图和第二张图的关键点并匹配
  2. 以第一张图的相机坐标为世界坐标,计算第二张图相对第一张图的旋转矩阵、平移矩阵
  3. 不断更新第一张图,在进行第二次计算时,以第二张图为第一张图,以第二张图的相机坐标系为世界坐标系

代码

//
// Created by automobile on 2020/7/19.
//
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

using namespace std;
using namespace cv;

/*********************************
*本程序演示了如何使用2D-2D的特征匹配估计相机运动
***********************************/

void find_feature_matches(
        Mat &img_1,Mat &img_2,
        std::vector<KeyPoint> &keypoints_1,
        std::vector<KeyPoint> &keypoints_2,
        std::vector<DMatch> &matches
);

void pose_estimation_2d2d(
        std::vector<KeyPoint> &keypoints_1,
        std::vector<KeyPoint> &keypoints_2,
        std::vector<DMatch> &matches,
        Mat &R, Mat &t
);

//像素坐标转化为相机归一化坐标
Point2d pixel2cam(const Point2d &p,const Mat &K);

int main(){
    //--读取图像
   // cv::String path = "/home/automobile/wcm/database/1080img" ;
    cv::String path = "/home/automobile/wcm/database/img_data" ;
    std::vector<cv::String> filenames;
    //cv::glob()读取文件夹中数据
    cv::glob(path, filenames);

    if (filenames.size() == 0){
        cout << "NO image files [png]" << endl;
    }

    chrono::steady_clock::time_point t1 = chrono::steady_clock::now();

    //--寻找关键点,计算描述子,匹配关键点
    vector<KeyPoint> keypoints_1, keypoints_2;
    vector<DMatch> matches;
    Mat img_1, img_2;
    Mat R, t;

    Mat K = (Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);//给相机内参数矩阵赋值


    //开始计算文件夹下的图像之间的位姿
   for (int size = filenames.size() - 1 ; size > 0; size-- ){
   //开始读取图像
   Mat img_1 = cv::imread(filenames[size]);
   Mat img_2 = cv::imread(filenames[size - 1]);
   assert(img_1.data && img_2.data && "Cannot load images");

    //匹配特征点
    find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);

    //估计两张图像间相机运动
   pose_estimation_2d2d(keypoints_1, keypoints_2, matches, R, t);
   cout <<  "位姿估计 i= " << size << endl;


    //--验证E=t^R*scale
    Mat t_x=(
            Mat_<double> (3,3) << 0, -t.at<double>(2, 0), t.at<double>(1, 0),
            t.at<double>(2,0), 0, -t.at<double>(0,0),
            -t.at<double>(1,0), t.at<double>(0,0), 0
        );
    cout << "对第 " << size << " t^R= " << t_x * R << endl;



    /*
    //验证对极约束
    cout << "对第 " << size << " 验证对极约束 " << endl;
    for(DMatch m:matches){
        Point2d pt1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
        Mat y1 = (Mat_(3,1) << pt1.x, pt1.y, 1);
        Point2d pt2 = pixel2cam(keypoints_2[m.trainIdx].pt, K);
        Mat y2 = (Mat_(3, 1) << pt2.x, pt2.y, 1);
        Mat d = y2.t() * t_x * R *y1;
        cout << " epipolar constraint = " << d <

    }


    chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
    chrono::duration<double> poseEstimation_2d2d = chrono::duration_cast<chrono::duration<double>>(t2 - t1);

    cout << "estimate 100 pose cost = " << poseEstimation_2d2d.count() << " seconds." <<endl;

   return 0;
}


//像素坐标与相机坐标转换
Point2d pixel2cam(const Point2d &p, const Mat &K) {
    return Point2d(
            (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
            (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}

void find_feature_matches(Mat &img_1, Mat &img_2,
                         std::vector<KeyPoint> &keypoints_1,
                         std::vector<KeyPoint> &keypoints_2,
                         std::vector<DMatch> &matches
                         ){
   //--初始化
   Mat descriptors_1, descriptors_2;
   //used in OpenCV3
   Ptr<FeatureDetector> detector = ORB::create();
   Ptr<DescriptorExtractor> descriptor = ORB::create();
   Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");

   //--第一步:检测Oriented FAST角点位置
   detector->detect(img_1, keypoints_1);
   detector->detect(img_2, keypoints_2);


   //--第二步:根据角点位置计算BRIEF描述子
   descriptor->compute(img_1, keypoints_1, descriptors_1);
   descriptor->compute(img_2, keypoints_2, descriptors_2);

   //--第三步:对两幅图像中的BRIEF描述子进行匹配,使用Hamming距离
   vector<DMatch> match;
   BFMatcher(NORM_HAMMING);
   matcher->match(descriptors_1, descriptors_2, match);

   //--第四步:匹配点筛选
   double min_dist = 10000, max_dist = 0;

   //找出所有匹配之间的最小距离和最大距离,既是最相似和最不相似的两组点之间的距离
   for (int num = 0; num < descriptors_1.rows; num++){
       double dist = match[num].distance;
       if (dist < min_dist) min_dist = dist;
       if (dist > max_dist) max_dist = dist;
   }

   printf(" --Max dist : %f \n", max_dist);
   printf(" --Min dist : %f \n", min_dist);

   //当描述子之间距离大于两倍最小距离时,即认为匹配有误。但有时候最小距离会非常小,设置一个经验值30作为下限
   for (int num1 = 0; num1 < descriptors_1.rows; num1++){
       if (match[num1].distance <= max(2 * min_dist, 30.0)){
           matches.push_back(match[num1]);
       }
   }
}

void pose_estimation_2d2d(
       std::vector<KeyPoint> &keypoints_1,
       std::vector<KeyPoint> &keypoints_2,
       std::vector<DMatch> &matches,
       Mat &R, Mat &t
){
    cout << "进入pose-estimation" << endl;

   //相机内参。TUM Freiburg2
   Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);

   //--把匹配点转换成vector形式
   vector<Point2f> points1;
   vector<Point2f> points2;

  // cout << "matches.size = " << matches.size() << endl;
   for (int i = 0; i < (int) matches.size(); i++){
       points1.push_back(keypoints_1[matches[i].queryIdx].pt);
       points2.push_back(keypoints_2[matches[i].queryIdx].pt);
   }

   //--计算基础矩阵
   //使用像素坐标计算基础矩阵
   //CV_FM_8POINT采用8点法
   Mat fundamental_matrix;
   fundamental_matrix = findFundamentalMat(points1, points2, CV_FM_RANSAC_ONLY);
   cout << " fundamental_matrix is " << endl << fundamental_matrix <<endl;

   //--计算本质矩阵
   //使用归一化坐标计算本质矩阵,需要知道相机的光心,焦距
   Point2d principal_point(325.1, 249.7); //相机光心,TUM dataset标定值
   double focal_length = 521; //相机焦距,TUM dataset标定值
   Mat essential_matrix;
   essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point);
   cout << "essential_Matrix is " << endl <<essential_matrix <<endl;

   //--计算单应矩阵
   //--但是本例中场景不是平面,单应矩阵意义不大
   Mat homography_matrix;
   homography_matrix = findHomography(points1, points2, RANSAC, 3);
   cout << "homography_matrix is " << endl << homography_matrix << endl;

   //--从本质矩阵中恢复旋转和平移信息
   //SVD奇异值分解
   //此函数仅在OpenCV3中提供
   recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point);
   cout << "R is " << endl << R << endl;
   cout << "t is " << endl << t <<endl;

}

运行结果

homography_matrix is 
[-0.6227391139971105, -2.886970200092988, 364.1872697938189;
 -0.2148405971333806, -0.972686679720263, 126.9249350232471;
 -0.001631390801007104, -0.007687557320241691, 0.9999999999999999]
R is 
[0.3194695470556352, 0.4729054474713082, -0.8211575039272485;
 0.4490655530003259, -0.8386377196567202, -0.3082643415601096;
 -0.8344335429574837, -0.2702724790298453, -0.4802847587280729]
t is 
[0.2296954847355427;
 -0.1516112670155564;
 0.9613813020888443]
位姿估计 i= 2
对第 2 t^R= [-0.3052136993786365, 0.8472269758895471, 0.3691761548756424;
 0.4987976662552955, 0.5167228229363109, -0.6791262298784655;
 0.1515835126764352, -0.1209335034643769, -0.1953036569510742]
 --Max dist : 85.000000 
 --Min dist : 5.000000 
进入pose-estimation
 fundamental_matrix is 
[-6.255652045940543e-06, 1.679691660890056e-05, -0.0005118221938695405;
 -5.358345603116775e-05, 0.0001784510571767766, -0.005452954314486946;
 0.01061051013232514, -0.03275577826825726, 1]
essential_Matrix is 
[0.004266503965144853, -0.6636712990779564, -0.1952643925552019;
 0.6959001651454869, -0.03315309018943863, 0.04331212815465878;
 0.1142026543150515, 0.1367396083384311, 0.04900965516181151]
homography_matrix is 
[-1.133399786845522, 0.2333755152279524, 382.6506100442739;
 -0.3823145696849643, 0.07505512543265744, 129.6483054540828;
 -0.002966733373018169, 0.0006222148878302129, 1]
R is 
[0.9606504274809822, -0.072697821483587, 0.2680779418977755;
 0.03923589585057277, 0.9909803233208437, 0.1281348635919174;
 -0.2749750909762716, -0.1125745332766574, 0.9548380353757032]
t is 
[0.2068527047159039;
 -0.159673994852563;
 0.9652544607094746]
位姿估计 i= 1
对第 1 t^R= [0.006033747771430509, -0.9385729521138152, -0.2761455522001097;
 0.9841514516064251, -0.04688554978048633, 0.06125259905155897;
 0.1615069425913533, 0.1933790086257947, 0.0693101190170626]
estimate 100 pose cost = 57.3466 seconds.

你可能感兴趣的:(slambook,CV,opencv,计算机视觉,slam)