特征描述子
即图像中每个像素位置的描述,通过此描述去匹配另一张图像是否含有相同特征。
暴力匹配:Brute-Force
图像匹配本质上是特征匹配。因为我们总可以将图像表示成多个特征向量的组成,因此如果两
幅图片具有相同的特征向量越多,则可以认为两幅图片的相似程度越高。而特征向量的相似程度通常是用它们之间的欧氏距离来衡量,欧式距离越小,则可以认为越相似。
代码:
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main() {
Mat src1, src2;
src1 = imread("C:/Users/Administrator/Desktop/pic/04.jpg");
src2 = imread("C:/Users/Administrator/Desktop/pic/03.jpg");
imshow("src1", src1);
imshow("src2", src2);
int minHessian = 400;
Ptr detector = SURF::create(minHessian);
vector keypoints1;
vector keypoints2;
Mat descriptor1, descriptor2;
detector->detectAndCompute(src1,Mat(), keypoints1, descriptor1);
detector->detectAndCompute(src2,Mat(), keypoints2, descriptor2);
cout << "keypoint1.size=" << keypoints1.size() << endl;
cout << "keypoint2.size=" << keypoints2.size() << endl;
cout << "descriptor1 depth" << descriptor1.depth()<<",type=" <cout << "descriptor2 depth" << descriptor2.depth() << ",type=" << descriptor2.type() << endl;
BFMatcher matcher(NORM_L2); //Brute - Force 匹配,参数表示匹配的方式,默认NORM_L2(欧几里得) ,NORM_L1(绝对值的和)
vector matches;// 保存匹配的结果
matcher.match(descriptor1, descriptor2, matches); //在descriptor_2中暴力匹配descriptor_1中含有的特征描述子匹配
cout << "matches.size=" << matches.size() << endl;
Mat dst;
drawMatches(src1, keypoints1, src2, keypoints2, matches, dst);
imshow("dst", dst);
waitKey(0);
}
FLANN特征匹配
算法速度特别快
特征匹配记录下目标图像与待匹配图像的特征点(KeyPoint),并根据特征点集合构造特征量(descriptor),对这个特征量进行比较、筛选,最终得到一个匹配点的映射集合。我们也可以根据这个集合的大小来衡量两幅图片的匹配程度。
代码:
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main() {
Mat src1, src2;
src1 = imread("C:/Users/Administrator/Desktop/pic/test3.jpg");
src2 = imread("C:/Users/Administrator/Desktop/pic/test4.jpg");
imshow("src1", src1);
imshow("src2", src2);
int minHessian = 300;
Ptr detector = SURF::create(minHessian);// 也可以用 SIFT 特征
vector keypoints1; // 保存特征点
vector keypoints2;
Mat descriptor1, descriptor2;// 特征描述子
detector->detectAndCompute(src1, Mat(), keypoints1, descriptor1);
detector->detectAndCompute(src2, Mat(), keypoints2, descriptor2);
cout << "keypoint1.size=" << keypoints1.size() << endl;
cout << "keypoint2.size=" << keypoints2.size() << endl;
cout << "descriptor1 depth" << descriptor1.depth() << ",type=" << descriptor1.type() << endl;
cout << "descriptor2 depth" << descriptor2.depth() << ",type=" << descriptor2.type() << endl;
FlannBasedMatcher matcher; //Flann匹配
vector matches;// 保存匹配的结果
matcher.match(descriptor1, descriptor2, matches); //在descriptor_2中匹配descriptor_1中含有的特征描述子匹配
cout << "matches.size=" << matches.size() << endl;
//找到好的匹配点
double minDist = 1000;
double maxDist = 0;
for (int i = 0; i < descriptor1.rows; i++) {
cout << "matches[" << i << "].queryIdx" << matches[i].queryIdx << ","<cout << "matches[" << i << "].trainIdx" << matches[i].trainIdx << ","<cout << "matches[" << i << "].distanIdx" << matches[i].distance<< ","<double dist = matches[i].distance;
if (dist > maxDist) {
maxDist = dist;
}
if (dist < minDist) {
minDist = dist;
}
}
cout << "maxdistance=" << maxDist << endl;
cout << "mindistance=" << minDist << endl;
vector goodMatches;
for (int i = 0; i < descriptor1.rows; i++) {
double dist = matches[i].distance;
if (dist < max(3 * minDist, 0.02)) {
goodMatches.push_back(matches[i]);
}
}
Mat dst;
drawMatches(src1, keypoints1, src2, keypoints2, goodMatches, dst, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("dst", dst);
waitKey(0);
}
平面对象识别
对象形变与位置变换、
API:
// 发现两个平面的透视变换,生成变换矩阵。即根据srcPoints的顶点数
//据与dstPoints的顶点数据,返回有srcPoints变换到dstPoints的变换矩阵
Mat findHomography(
InputArray srcPoints, // srcPoints 与 dstPoints 应该是同尺寸
InputArray dstPoints,
int method = 0, // 发现变换矩阵的算法 RANSAC
double ransacReprojThreshold = 3,
OutputArray mask=noArray(),
const int maxIters = 2000,
const double confidence = 0.995
);
void perspectiveTransform( // 透视变换
InputArray src,
OutputArray dst,
InputArray m // 变换矩阵
);
代码:
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main()
{
Mat img1 = imread("C:/Users/Administrator/Desktop/pic/test3.jpg",0);
Mat img2 = imread("C:/Users/Administrator/Desktop/pic/test4.jpg",0);
imshow("object image", img1);
imshow("object in scene", img2);
// surf featurs extraction
int minHessian = 500;
Ptr detector = SURF::create(minHessian); // 也可以用 SIFT 特征
vector keypoints_obj; // 保存特征点
vector keypoints_scene;
Mat descriptor_obj, descriptor_scene; // 特征描述子
detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj, false); // SURF特征检测,同时计算生成对应描述子
detector->detectAndCompute(img2, Mat(), keypoints_scene, descriptor_scene, false);
cout << "keypoints_obj.size=" << keypoints_obj.size() << endl; // keypoints_obj.size=786
cout << "keypoints_scene.size=" << keypoints_scene.size() << endl; // keypoints_scene.size = 1040
// descriptor_obj depth=5, type=5, size=[64 x 786] CV_32F单通道 每个关键点用64来描述?
cout << "descriptor_obj depth=" << descriptor_obj.depth() << ", type=" << descriptor_obj.type() << ", size=" << descriptor_obj.size() << endl;
// descriptor_scene depth = 5, type = 5, size = [64 x 1040]
cout << "descriptor_scene depth=" << descriptor_scene.depth() << ", type=" << descriptor_scene.type() << ", size=" << descriptor_scene.size() << endl;
// matching
FlannBasedMatcher matcher; // Flann 匹配类
vector matches; // 保存匹配结果
// 在descriptor_scene中匹配descriptor_obj的特征描述子,结果放到matches中, matches的长度与descriptor_obj的行数一致
matcher.match(descriptor_obj, descriptor_scene, matches); // descriptor_obj中的特征描述子都会在descriptor_scene找到一个匹配点(不管是否真的准确)
//descriptor_obj rows=786, cols=64
cout << "descriptor_obj rows=" << descriptor_obj.rows << ", cols=" << descriptor_obj.cols << endl;
cout << "matches.size=" << matches.size() << endl; // matches.size = 786
// find good matched points
double minDist = 1000;
double maxDist = 0;
for (int i = 0; i < descriptor_obj.rows; i++) {
cout << "matches[" << i << "].queryIdx=" << matches[i].queryIdx << ", "; // matches[i].queryIdx 要查询的特征描述子的下标
cout << "matches[" << i << "].trainIdx=" << matches[i].trainIdx << ", "; // matches[i].trainIdx 匹配到的要训练的特征描述子的下标
cout << "matches[" << i << "].distance=" << matches[i].distance << endl; // 这两个描述特征子的距离 匹配度?
double dist = matches[i].distance; // 要查询的特征描述子与匹配到的要训练的特征描述子之间的距离, 距离越小,匹配的越准确
if (dist > maxDist) {
maxDist = dist;
}
if (dist < minDist) {
minDist = dist;
}
}
printf("max distance=%f\n", maxDist);
printf("min distance=%f\n", minDist);
vector goodMatches; // 保存匹配度高的匹配点
for (int i = 0; i < descriptor_obj.rows; i++) {
double dist = matches[i].distance;
if (dist < max(3 * minDist, 0.02)) { // 阈值的选取 实际情况实际分析?
goodMatches.push_back(matches[i]);
}
}
Mat matchesImg; // 显示匹配的结果
drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodMatches, matchesImg, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); // Single keypoints will not be drawn.
imshow("Flann Matching Result", matchesImg);
// 发现查询描述子的特征所在的顶点与训练描述子的特征所在的顶点 之间的变换矩阵
vector obj;
vector objInScene;
for (size_t t = 0; t < goodMatches.size(); t++) {
obj.push_back(keypoints_obj[goodMatches[t].queryIdx].pt); // 获取要查询的描述子的关键点的位置
objInScene.push_back(keypoints_scene[goodMatches[t].trainIdx].pt); // 获取要训练的描述子的关键点的位置
}
Mat H = findHomography(obj, objInScene, RANSAC); // 发现 透视变换矩阵
cout << "goodMatches.size=" << goodMatches.size() << endl;
cout << "H depth=" << H.depth() << ", type=" << H.type() << ", size=" << H.size() << endl;
vector obj_corners(4); // 书本的四个顶点
vector scene_corners(4); // 保存训练图像中书本的顶点位置
obj_corners[0] = Point(0, 0); // 因为图1中书本的顶点就是图像的四个角,所以就这么设置以下的值了
obj_corners[1] = Point(img1.cols, 0);
obj_corners[2] = Point(img1.cols, img1.rows);
obj_corners[3] = Point(0, img1.rows);
perspectiveTransform(obj_corners, scene_corners, H); // 透视变换,通过变换矩阵找到训练图像中书本的顶点位置
// draw line 因为matchesImg是两张图像的合成,所以若要在matchesImg上显示找到的书本的位置,x坐标需要偏移img1.cols
line(matchesImg, scene_corners[0] + Point2f(img1.cols, 0), scene_corners[1] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
line(matchesImg, scene_corners[1] + Point2f(img1.cols, 0), scene_corners[2] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
line(matchesImg, scene_corners[2] + Point2f(img1.cols, 0), scene_corners[3] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
line(matchesImg, scene_corners[3] + Point2f(img1.cols, 0), scene_corners[0] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
Mat dst;
cvtColor(img2, dst, COLOR_GRAY2BGR);
line(dst, scene_corners[0], scene_corners[1], Scalar(0, 0, 255), 2, 8, 0); // 在训练图像上绘制找到的书本
line(dst, scene_corners[1], scene_corners[2], Scalar(0, 0, 255), 2, 8, 0);
line(dst, scene_corners[2], scene_corners[3], Scalar(0, 0, 255), 2, 8, 0);
line(dst, scene_corners[3], scene_corners[0], Scalar(0, 0, 255), 2, 8, 0);
imshow("find known object demo", matchesImg); // 在合成的matchesImg上显示找到的书本
imshow("Draw object", dst); // 在原训练图上显示找到的书本
waitKey(0);
}
AKAZE局部特征检测与匹配
局部特征相关算法在过去二十年期间风靡一时,其中代表的有SIFT、SURF算法等(广泛应用于目标检测、识别、匹配定位中),这两种算法是用金字塔策略构建高斯尺度空间(SURF算法采用框滤波来近似高斯函数)。不论SIFT还是SURF算法在构造尺度空间时候存在一个重要的缺点:高斯模糊不保留对象边界信息并且在所有尺度上平滑到相同程度的细节与噪声,影响定位的准确性和独特性。
针对高斯核函数构建尺度空间的缺陷,有学者提出了非线性滤波构建尺度空间:双边滤波、非线性扩散滤波方式。非线性滤波策略构建尺度空间主要能够局部自适应进行滤除小细节同时保留目标的边界使其尺度空间保留更多的特征信息。例如:BFSIFT采取双边滤波与双向匹配方式改善SIFT算法在SAR图像上匹配性能低下的问题(主要由于SAR图像斑点噪声严重),但是付出更高的计算复杂度。AKAZE作者之前提出的KAZE算法采取非线性扩散滤波相比于SIFT与SURF算法提高了可重复性和独特性。但是KAZE算法缺点在于计算密集,通过AOS数值逼近的策略来求解非线性扩散方程,虽然AOS求解稳定并且可并行化,但是需要求解大型线性方程组,在移动端实时性要求难以满足。
与SIFT、SURF算法相比,AKAZE算法更快同时与ORB、BRISK算法相比,可重复性与鲁棒性提升很大。
KAZE是日语音译过来的
步骤:
与SIFT、 SUFR比较:
更加稳定
非线性尺度空间
AKAZE速度更加快
代码:(特征检测)
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main() {
Mat src = imread("C:/Users/Administrator/Desktop/pic/test3.jpg");
imshow("src", src);
Ptr kaze = KAZE::create();// KAZE局部特征检测类
vector keypoints; // 保存检测到的特征点
double t1 = getTickCount();
kaze->detect(src, keypoints);
double t2 = getTickCount();
double tkaze = 1000 * (t2 - t1) / getTickFrequency();
cout << "kazetime=" << tkaze << endl;
Mat dst;
drawKeypoints(src, keypoints, dst);
imshow("kaze", dst);
Ptrakeze = AKAZE::create();// AKAZE局部特征检测类
vector keypoints1;
t1 = getTickCount();
akeze->detect(src, keypoints1);
t2 = getTickCount();
double takaze = 1000 * (t2 - t1) / getTickFrequency();
cout <<"akezetime=" << takaze << endl;
Mat dst1;
drawKeypoints(src, keypoints1, dst1);
imshow("Akeze", dst1);
waitKey(0);
}
代码:(特征匹配)
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main()
{
Mat img1 = imread("C:/Users/Administrator/Desktop/pic/test3.jpg", 0);
Mat img2 = imread("C:/Users/Administrator/Desktop/pic/test4.jpg", 0);
imshow("box image", img1);
imshow("scene image", img2);
// extract akaze features
Ptr detector = AKAZE::create(); // AKAZE局部特征检测类
vector keypoints_obj; // 保存检测到的特征点
vector keypoints_scene;
Mat descriptor_obj, descriptor_scene; // 保存生成的特征描述子
double t1 = getTickCount();
detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj); // AKAZE特征检测,并且计算生成特征描述子
detector->detectAndCompute(img2, Mat(), keypoints_scene, descriptor_scene);
cout << "keypoints_obj.size=" << keypoints_obj.size() << endl;
cout << "keypoints_scene.size=" << keypoints_scene.size() << endl;
cout << "descriptor_obj depth=" << descriptor_obj.depth() << ", type=" << descriptor_obj.type() << ", size=" << descriptor_obj.size() << endl;
cout << "descriptor_scene depth=" << descriptor_scene.depth() << ", type=" << descriptor_scene.type() << ", size=" << descriptor_scene.size() << endl;
double t2 = getTickCount();
double tkaze = 1000 * (t2 - t1) / getTickFrequency(); // 乘以1000,转换成毫秒
printf("AKAZE Time consume(ms) : %f\n", tkaze); // AKAZE Time consume(ms) : 2205.513233
// matching 还可以使用BFMatcher matcher(NORM_L2);
// 如果直接使用 FlannBasedMatcher matcher; 会报错,因为其默认方式支持的特征描述子的数据类型是float型,而AKAZE的特征描述子的数据类型为uchar
FlannBasedMatcher matcher(new flann::LshIndexParams(20, 10, 2)); // new flann::LshIndexParams(20, 10, 2) 使Flann支持uchar类型的特征匹配
//BFMatcher matcher(NORM_L2);
vector matches;
matcher.match(descriptor_obj, descriptor_scene, matches);
cout << "matches.size=" << matches.size() << endl; // matches.size=460 长度与要查询的特征描述子一致
// draw matches(key points)
Mat akazeMatchesImg;
drawMatches(img1, keypoints_obj, img2, keypoints_scene, matches, akazeMatchesImg);
imshow("akaze match result", akazeMatchesImg);
// 筛选匹配都高的匹配值
vector goodMatches;
double minDist = 100000, maxDist = 0;
for (int i = 0; i < descriptor_obj.rows; i++) {
double dist = matches[i].distance;
if (dist < minDist) {
minDist = dist;
}
if (dist > maxDist) {
maxDist = dist;
}
}
printf("min distance : %f\n", minDist);
printf("max distance : %f\n", maxDist);
for (int i = 0; i < descriptor_obj.rows; i++) {
double dist = matches[i].distance;
if (dist < max(1.5*minDist, 0.02)) {
goodMatches.push_back(matches[i]);
}
}
cout << "goodMatches.size=" << goodMatches.size() << endl; // goodMatches.size=11
drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodMatches, akazeMatchesImg, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); // 不绘制单独的点
imshow("good match result", akazeMatchesImg);
Mat drawSinglePoints;
drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodMatches, akazeMatchesImg);
imshow("drawSinglePoints", akazeMatchesImg);
waitKey(0);
}
BRISK特征提取:一种二进制特征描述算子
具有较好的旋转不变性,尺度不变性,鲁棒性等.
在图像配准应用中,速度比较:ORB>FREAK>BRISK>SURF>SIFT,在对有较大模糊的图像配准时,BRISK算法在其中表现最为出色。
Brisk(Binary Robust Invariant Scalable Keypoints)特征相比于SURF SIFT 有些步骤是相同的:
代码:
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main() {
Mat src1 = imread("C:/Users/Administrator/Desktop/pic/test3.jpg", 0);
Mat src2 = imread("C:/Users/Administrator/Desktop/pic/test4.jpg", 0);
Ptr detector = BRISK::create();
vector keypoints1;
vector keypoints2;
Mat descriptor1, descriptor2;
detector->detectAndCompute(src1, Mat(), keypoints1, descriptor1);
detector->detectAndCompute(src2, Mat(), keypoints2, descriptor2);
cout << "keypoints1.size=" << keypoints1.size() << endl;
cout << "keypoints12.size=" << keypoints2.size() << endl;
cout << "descriptor1.depth=" << descriptor1.depth() << " descriptor1.size=" << descriptor1.size()<< endl;
cout << "descriptor2.depth=" << descriptor1.depth() << " descriptor2.size=" << descriptor2.size() << endl;
//匹配
BFMatcher matcher(NORM_L2);
vector matches;
matcher.match(descriptor1,descriptor2, matches);
//绘制
Mat dst;
drawMatches(src1, keypoints1, src2, keypoints2, matches,dst);
imshow("dst", dst);
//BRISK检测是比较准确的
Mat img = imread("C:/Users/Administrator/Desktop/pic/test3.jpg");
Ptrdetec = BRISK::create();
vector keypoints;
detec->detect(img, keypoints);
Mat result;
drawKeypoints(img, keypoints, result);
imshow("keypoint", result);
waitKey(0);
}
ORB特征提检测与匹配
ORB(Oriented FAST and Rotated BRIEF)是一种快速特征点提取和描述的算法。这个算法是由Ethan Rublee, Vincent Rabaud, Kurt Konolige以及Gary R.Bradski在2011年一篇名为“ORB:An Efficient Alternative to SIFTor SURF”的文章中提出。ORB算法分为两部分,分别是特征点提取和特征点描述。特征提取是由FAST(Features from Accelerated Segment Test)算法发展来的,特征点描述是根据BRIEF(Binary Robust IndependentElementary Features)特征描述算法改进的。ORB特征是将FAST特征点的检测方法与BRIEF特征描述子结合起来,并在它们原来的基础上做了改进与优化。据说,ORB算法的速度是sift的100倍,是surf的10倍。
ORB是一种快速的特征提取和匹配的算法。它的速度非常快,但是相应的算法的质量较差。和sift相比,ORB使用二进制串作为特征描述,这就造成了高的误匹配率。
代码:
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
int main()
{
//读取要匹配的两张图像
Mat img_1 = imread("C:/Users/Administrator/Desktop/pic/test3.jpg");
Mat img_2 = imread("C:/Users/Administrator/Desktop/pic/test4.jpg");
//初始化
//首先创建两个关键点数组,用于存放两张图像的关键点,数组元素是KeyPoint类型
vector keypoints_1, keypoints_2;
//创建两张图像的描述子,类型是Mat类型
Mat descriptors_1, descriptors_2;
//创建一个ORB类型指针orb,ORB类是继承自Feature2D类
//class CV_EXPORTS_W ORB : public Feature2D
//这里看一下create()源码:参数较多,不介绍。
//所以这里的语句就是创建一个Ptr类型的orb,用于接收ORB类中create()函数的返回值
Ptr orb = ORB::create();
//第一步:检测Oriented FAST角点位置.
//detect是Feature2D中的方法,orb是子类指针,可以调用
//看一下detect()方法的原型参数:需要检测的图像,关键点数组,第三个参数为默认值
/*
CV_WRAP virtual void detect( InputArray image,
CV_OUT std::vector& keypoints,
InputArray mask=noArray() );
*/
orb->detect(img_1, keypoints_1);
orb->detect(img_2, keypoints_2);
//第二步:根据角点位置计算BRIEF描述子
orb->compute(img_1, keypoints_1, descriptors_1);
orb->compute(img_2, keypoints_2, descriptors_2);
//定义输出检测特征点的图片。
Mat outimg1;
//注意看,这里并没有用到描述子,描述子的作用是用于后面的关键点筛选。
drawKeypoints(img_1, keypoints_1, outimg1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
imshow("ORB特征点", outimg1);
//第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
//创建一个匹配点数组,用于承接匹配出的DMatch,其实叫match_points_array更为贴切。matches类型为数组,元素类型为DMatch
vector matches;
//创建一个BFMatcher匹配器,BFMatcher类构造函数如下:两个参数都有默认值,但是第一个距离类型下面使用的并不是默认值,而是汉明距离
//CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false );
BFMatcher matcher(NORM_HAMMING);
//调用matcher的match方法进行匹配,这里用到了描述子,没有用关键点。
//匹配出来的结果写入上方定义的matches[]数组中
matcher.match(descriptors_1, descriptors_2, matches);
//第四步:遍历matches[]数组,找出匹配点的最大距离和最小距离,用于后面的匹配点筛选。
//这里的距离是上方求出的汉明距离数组,汉明距离表征了两个匹配的相似程度,所以也就找出了最相似和最不相似的两组点之间的距离。
double min_dist = 0, max_dist = 0;//定义距离
for (int i = 0; i < descriptors_1.rows; ++i)//遍历
{
double dist = matches[i].distance;
if (distif (dist>max_dist) max_dist = dist;
}
printf("Max dist: %f\n", max_dist);
printf("Min dist: %f\n", min_dist);
//第五步:根据最小距离,对匹配点进行筛选,
//当描述自之间的距离大于两倍的min_dist,即认为匹配有误,舍弃掉。
//但是有时最小距离非常小,比如趋近于0了,所以这样就会导致min_dist到2*min_dist之间没有几个匹配。
// 所以,在2*min_dist小于30的时候,就取30当上限值,小于30即可,不用2*min_dist这个值了
vector good_matches;
for (int j = 0; j < descriptors_1.rows; ++j)
{
if (matches[j].distance <= max(2 * min_dist, 30.0))
good_matches.push_back(matches[j]);
}
//第六步:绘制匹配结果
Mat img_match;//所有匹配点图
drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_match);
imshow("所有匹配点对", img_match);
Mat img_goodmatch;//筛选后的匹配点图
drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches, img_goodmatch);
imshow("筛选后的匹配点对", img_goodmatch);
waitKey(0);
}