Opencv 各种特征点提取和匹配

opencv 特征点的提取和匹配

1. 当中的数据结构

KeyPoint这数据结构中有如下数据结构:

class KeyPoint
{ Point2f pt; //坐标
float size; //特征点邻域直径
float angle; //特征点的方向,值为[零,三百六十),负值表示不使用
float response;
int octave; //特征点所在的图像金字塔的组
int class_id; //用于聚类的id

angle:角度,表示关键点的方向,SIFT算法通过对关键点周围邻域进行梯度运算,求得该点方向。-1为初值。

class_id:当要对图片进行分类时,我们可以用class_id对每个特征点进行区分,未设定时为-1

octave:代表是从金字塔哪一层提取的得到的数据。

pt:关键点点的坐标(pt.x pt.y)

response:响应程度,代表该点强壮大小。response代表着该关键点how good,更确切的说,是该点角点的程度。瞬间明白。

size:该点直径的大小

注意:keypoint只是保存了opencv的sift库检测到的特征点的一些基本信息,也就上面所说的这些,但sift所提取出来的特征向量其实不是在这个里面,特征向量通过SiftDescriptorExtractor 提取,结果放在一个Mat的数据结构中。新版的SIFT可以直接提取。

DMATCH 数据结构:

struct DMatch
{ //三个构造函数
DMatch():
queryIdx(-1),trainIdx(-1),imgIdx(-1),distance(std::numeric_limits::max()) {}

DMatch(int _queryIdx, int _trainIdx, float _distance ) :
queryIdx( _queryIdx),trainIdx( _trainIdx), imgIdx(-1),distance( _distance) {}

DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx( _trainIdx), imgIdx( _imgIdx),distance( _distance) {}
int queryIdx; //此匹配对应的查询图像的特征描述子索引(输入图1)
int trainIdx; //此匹配对应的训练(模板)图像的特征描述子索引(输入图2)
int imgIdx; //训练图像的索引(若有多个)
float distance; //两个特征向量之间的欧氏距离,越小表明匹配度越高。
booloperator < (const DMatch &m) const;
};

2. 各种的特征点

SIFT SURF FAST。。。。
使用特征提取过程得到的特征描述符(descriptor)数据类型有的是float类型的,比如说:surf SurfDescriptorExtractor,sift
SiftDescriptorExtractor,有的是uchar类型的,比如说有ORB,BriefDescriptorExtractor。

对应float类型的匹配方式有:FlannBasedMatcher,BruteForce等。对应uchar类型的匹配方式有:BruteForce,BruteForce。所以ORB和BRIEF特征描述子只能使用BruteForce匹配法。 #include opencv2/legacy/legacy.hpp>
在链接选项当中加入: opencv_legacy248d.lib (release版本的就是: opencv_legacy248.lib 248 换成你的版本号)
在链接选项当中加入: opencv_legacy248d.lib (release版本的就是: opencv_legacy248.lib 248 换成你的版本号)

具体的代码如下:

#include 
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"  
#include "opencv2/features2d/features2d.hpp"  
#include "opencv2/highgui/highgui.hpp"  
#include  "opencv2/legacy/legacy.hpp" // 暴力匹配的头文件
#include  "opencv2/nonfree/nonfree.hpp"
#include   
#include  

#include "cv_import_static_lib.h"

using namespace std;
using namespace cv;

void main(){


    Mat img_1 = imread("E:\\3Dtestdata\\3.jpg");
    Mat img_2 = imread("E:\\3Dtestdata\\4.jpg");
    if (!img_1.data || !img_2.data)
    {
        cout << "error reading images " << endl;
        return ;
    }

    vector keyPoints_1, keyPoints_2;
    Mat descriptors_1, descriptors_2;

    /*-----------------SIFT featrue Point----------------
    SIFT sift;
    sift(img_1, Mat(), keyPoints_1, descriptors_1);
    sift(img_2, Mat(), keyPoints_2, descriptors_2);
    */

    /*-----------------SURF featrue Point----------------
    SURF surf;
    surf(img_1, Mat(), keyPoints_1, descriptors_1);
    surf(img_2, Mat(), keyPoints_2, descriptors_2); 
    //SurfDescriptorExtractor extrator;           // another surf sift operation 
    //extrator.compute(img_1, keyPoints_1, descriptors_1);
    //extrator.compute(img_2, keyPoints_2, descriptors_2);
    */

    //-----------------ORB featrue Point----------------
    ORB orb;   // float Feature, can not use FlannBase Match.
    orb(img_1, Mat(), keyPoints_1, descriptors_1);
    orb(img_2, Mat(), keyPoints_2, descriptors_2);


    /*-----------------ORB featrue Point----------------
    MSER mesr;
     */

    /*-----------------FAST featrue Point----------------
    FastFeatureDetector fast1(100);   // 检测的阈值为40  
    FastFeatureDetector fast2(100);

    fast1.detect(img_1, keyPoints_1);
    fast2.detect(img_2, keyPoints_2);
    //SurfDescriptorExtractor extrator;           // another surf sift operation 
    //extrator.compute(img_1, keyPoints_1, descriptors_1);
    //extrator.compute(img_2, keyPoints_2, descriptors_2);

    OrbDescriptorExtractor extrator;
    extrator.compute(img_1, keyPoints_1, descriptors_1);
    extrator.compute(img_2, keyPoints_2, descriptors_2);
    */


    BruteForceMatcher matcher;// orb 等float型的

    //FlannBasedMatcher matcher;   // 只能 对uchar的点进行匹配

    vector< DMatch > matches;

    matcher.match(descriptors_1, descriptors_2, matches);

    double max_dist = 0; double min_dist = 100;
    //-- Quick calculation of max and min distances between keypoints  
    for (int i = 0; i < descriptors_1.rows; i++)
    {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    cout<<"-- Max dist :"<< max_dist<cout<<"-- Min dist :"<< min_dist<//-- Draw only "good" matches (i.e. whose distance is less than 0.6*max_dist )  
    //-- PS.- radiusMatch can also be used here.  
    vector< DMatch > good_matches;
    for (int i = 0; i < descriptors_1.rows; i++)
    {
        if (matches[i].distance < 0.6*max_dist)
        {
            good_matches.push_back(matches[i]);
        }
    }


    // vector m_LeftKey;
    // vector m_RightKey;
    // vector m_Matches;
    // 以上三个变量已经被计算出来,分别是提取的关键点及其匹配,下面直接计算F

    // 分配空间
    int ptCount = (int)matches.size();
    Mat p1(ptCount, 2, CV_32F);
    Mat p2(ptCount, 2, CV_32F);

    // 把Keypoint转换为Mat
    Point2f pt;
    for (int i = 0; ifloat>(i, 0) = pt.x;
        p1.at<float>(i, 1) = pt.y;

        pt = keyPoints_2[matches[i].trainIdx].pt;
        p2.at<float>(i, 0) = pt.x;
        p2.at<float>(i, 1) = pt.y;
    }


    // 用RANSAC方法计算 基本矩阵F
    Mat m_Fundamental;
    vector m_RANSACStatus;

    m_Fundamental = findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);//?????????????????

    // 计算野点个数
    int OutlinerCount = 0;
    for (int i = 0; iif (m_RANSACStatus[i] == 0) // 状态为0表示野点
        {
            OutlinerCount++;
        }
    }

    // 计算内点
    vector m_LeftInlier;
    vector m_RightInlier;
    vector m_InlierMatches;
    // 上面三个变量用于保存内点和匹配关系
    int InlinerCount = ptCount - OutlinerCount;
    m_InlierMatches.resize(InlinerCount);
    m_LeftInlier.resize(InlinerCount);
    m_RightInlier.resize(InlinerCount);
    InlinerCount = 0;
    for (int i = 0; iif (m_RANSACStatus[i] != 0)
        {
            m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
            m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
            m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
            m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
            m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
            m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
            InlinerCount++;
        }
    }

    // 把内点转换为drawMatches可以使用的格式
    vector key1(InlinerCount);
    vector key2(InlinerCount);
    KeyPoint::convert(m_LeftInlier, key1);
    KeyPoint::convert(m_RightInlier, key2);

    // 显示计算F过后的内点匹配
     //Mat m_matLeftImage;
     //Mat m_matRightImage;
    // 以上两个变量保存的是左右两幅图像
    Mat OutImage;
    drawMatches(img_1, key1, img_2, key2, m_InlierMatches, OutImage);

    //stereoRectifyUncalibrated();

    Mat img_matches;
    drawMatches(img_1, keyPoints_1, img_2, keyPoints_2,
    good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    imwrite("FASTResult.jpg", img_matches);
    imshow("Match", img_matches);

    imwrite("FmatrixResult.jpg", OutImage);
    imshow("Match2", OutImage);
    waitKey(0);

    return;
}

Opencv 各种特征点提取和匹配_第1张图片

做好匹配以后,对于以后的三维重建都很有帮助。

你可能感兴趣的:(C++,opencv)