OpenCV2 特征检测与匹配

由于OpenCV3 中的众多特征检测算子(如SIFT SURF ORB)所依赖的稳定版代码都已经从官方的版本中转移到xfeatures2d的第三方库中。所以我重新安装了OpenCV2.4.9版本。

特征检测算子,可以用来检测两幅图的稳定特征,来完成配准,也可以来进行三维重建,图像拼接的任务。

具有尺度旋转不变的特征检测算子SIFT,检测的特征很稳定,具有尺度不变性,旋转不变性,且精度较高。但是计算速度不快,因此有了SURF算子(为SIFT的3倍,精度有牺牲),还有ORB算子,速度非常快SIFT100背SURF的10倍,但是特征不具有尺度和旋转不变性(速度快所有三维重建用的多)。

SURF特征检测算子:
代码如下:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include   
using namespace cv;
using namespace std;
int main()
{
     
	Mat image01 = imread("4.jpg", 1);    //右图
	Mat image02 = imread("3.jpg", 1);    //左图
	namedWindow("p2", 0);
	namedWindow("p1", 0);
	imshow("p2", image01);
	imshow("p1", image02);
	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);
	//提取特征点    
	SurfFeatureDetector surfDetector(800);  // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	surfDetector.detect(image1, keyPoint1);//检测出特征点
	surfDetector.detect(image2, keyPoint2);//检测出特征点
	//特征点描述,为下边的特征点匹配做准备    
	SurfDescriptorExtractor SurfDescriptor;
	Mat imageDesc1, imageDesc2;
	SurfDescriptor.compute(image1, keyPoint1, imageDesc1);//计算特征点描述
	SurfDescriptor.compute(image2, keyPoint2, imageDesc2);//计算特征点描述
	//获得匹配特征点,并提取最优配对     
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;
	matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
	cout << "total match points: " << matchePoints.size() << endl;
	Mat img_match;
	drawMatches(image01, keyPoint1, image02, keyPoint2, matchePoints, img_match);
	namedWindow("match", 0);
	imshow("match", img_match);
	imwrite("match.jpg", img_match);
	waitKey();
	return 0;
}

效果图如下:

斜率大的直线应该是错误的匹配,匹配效果一般。为了排除因为图像遮挡和背景混乱而产生的无匹配关系的关键点,SIFT的作者Lowe提出了比较最近邻距离与次近邻距离的SIFT匹配方式:取一幅图像中的一个SIFT关键点,并找出其与另一幅图像中欧式距离最近的前两个关键点,在这两个关键点中,如果最近的距离除以次近的距离得到的比率ratio少于某个阈值T,则接受这一对匹配点。因为对于错误匹配,由于特征空间的高维性,相似的距离可能有大量其他的错误匹配,从而它的ratio值比较高。显然降低这个比例阈值T,SIFT匹配点数目会减少,但更加稳定,反之亦然。

Lowe推荐ratio的阈值为0.8,但作者对大量任意存在尺度、旋转和亮度变化的两幅图片进行匹配,结果表明ratio取值在0. 4~0. 6 之间最佳,小于0. 4的很少有匹配点,大于0. 6的则存在大量错误匹配点,所以建议ratio的取值原则如下:
ratio=0. 4:对于准确度要求高的匹配;
ratio=0. 6:对于匹配点数目要求比较多的匹配;
ratio=0. 5:一般情况下。

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include   
using namespace cv;
using namespace std;
int main()
{
     
	Mat image01 = imread("4.jpg", 1);
	Mat image02 = imread("3.jpg", 1);
	imshow("p2", image01);
	imshow("p1", image02);
	//灰度图转换 
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);
	//提取特征点    
	SurfFeatureDetector surfDetector(2000);  // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	surfDetector.detect(image1, keyPoint1);
	surfDetector.detect(image2, keyPoint2);
	//特征点描述,为下边的特征点匹配做准备    
	SurfDescriptorExtractor SurfDescriptor;
	Mat imageDesc1, imageDesc2;
	SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
	SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;
	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();
	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;
	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchePoints.size(); i++)
	{
     
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
     
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}
	Mat SURF_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, SURF_match);
	imshow("SURF_match ", SURF_match);
	waitKey();
	return 0;
}

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include   
using namespace cv;
using namespace std;
int main()
{
     

	Mat image01 = imread("4.jpg", 1);
	Mat image02 = imread("3.jpg", 1);
	imshow("p2", image01);
	imshow("p1", image02);
	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);
	//提取特征点    
	SiftFeatureDetector siftDetector(800);  // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	siftDetector.detect(image1, keyPoint1);
	siftDetector.detect(image2, keyPoint2);

	//特征点描述,为下边的特征点匹配做准备    
	SiftDescriptorExtractor SiftDescriptor;
	Mat imageDesc1, imageDesc2;
	SiftDescriptor.compute(image1, keyPoint1, imageDesc1);
	SiftDescriptor.compute(image2, keyPoint2, imageDesc2);

	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;

	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();

	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchePoints.size(); i++)
	{
     
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
     
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat SIFT_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, SIFT_match);
	imshow("SIFT_match ", SIFT_match);
	waitKey();
	return 0;
}

效果如下:

ORB:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include   
using namespace cv;
using namespace std;
int main()
{
     
	Mat image01 = imread("4.jpg", 1);
	Mat image02 = imread("3.jpg", 1);
	imshow("p2", image01);
	imshow("p1", image02);
	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);
	//提取特征点    
	OrbFeatureDetector OrbDetector(1000);  // 在这里调整精度,值越小点越少,越精准 
	vector<KeyPoint> keyPoint1, keyPoint2;
	OrbDetector.detect(image1, keyPoint1);
	OrbDetector.detect(image2, keyPoint2);
	//特征点描述,为下边的征点匹配做准备    
	OrbDescriptorExtractor OrbDescriptor;
	Mat imageDesc1, imageDesc2;
	OrbDescriptor.compute(image1, keyPoint1, imageDesc1);
	OrbDescriptor.compute(image2, keyPoint2, imageDesc2);
	flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
	vector<DMatch> GoodMatchePoints;
	Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
	flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());
	// Lowe's algoithm,获取优秀匹配点
	for (int i = 0; i < matchDistance.rows; i++)
	{
     
		if (matchDistance.at<float>(i, 0) < 0.6 * matchDistance.at<float>(i, 1))
		{
     
			DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
			GoodMatchePoints.push_back(dmatches);
		}
	}
	Mat ORB_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, ORB_match);
	imshow("ORB_match ", ORB_match);
	waitKey();
	return 0;
}

FAST(加速分割测试获得特征, Features from Accelerated Segment Test) 。 这种算子专门用来快速检测兴趣点, 只需要对比几个像素,就可以判断是否为关键点。

跟Harris检测器的情况一样, FAST算法源于对构成角点的定义。FAST对角点的定义基于候选特征点周围的图像强度值。 以某个点为中心作一个圆, 根据圆上的像素值判断该点是否为关键点。 如果存在这样一段圆弧, 它的连续长度超过周长的3/4, 并且它上面所有像素的强度值都与圆心的强度值明显不同(全部更黑或更亮) , 那么就认定这是一个关键点。
用这个算法检测兴趣点的速度非常快, 因此十分适合需要优先考虑速度的应用。 这些应用包括实时视觉跟踪、 目标识别等, 它们需要在实时视频流中跟踪或匹配多个点。
我们使用FastFeatureDetector 进行特征点提取,因为opencv没有提供fast专用的描述子提取器,所以我们借用SiftDescriptorExtractor 来实现描述子的提取。
。要完成特征点的匹配第一个步骤就是找出每幅图像的特征点,这叫做特征检测,比如我们使用FastFeatureDetector、SiftFeatureDetector都是特征检测的模块。我们得到这些图像的特征点后,我们就对这些特征点进行进一步的分析,用一些数学上的特征对其进行描述,如梯度直方图,局部随机二值特征等。所以在这一步我们可以选择其他描述子提取器对这些点进行特征描述,进而完成特征点的精确匹配。在OpenCV中SURF,ORB,SIFT既包含FeatureDetector,又包DescriptorExtractor,所以我们使用上述三种算法做特征匹配时,都用其自带的方法配套使用。除此之外,如果我们相用FAST角点检测并作特征点匹配该怎么办?此时可以使用上述的FastFeatureDetector + BriefDescriptorExtractor 的方式,这种组合方式其实就是著名的ORB算法。所以特征点检测和特征点匹配是两种不同的步骤,我们只需根据自己项目的需求对这两个步骤的方法随意组合就好。

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include   
using namespace cv;
using namespace std;
int main()
{
     
	Mat image01 = imread("4.jpg", 1);
	Mat image02 = imread("3.jpg", 1);
	imshow("p2", image01);
	imshow("p1", image02);
	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);
	//提取特征点    
	FastFeatureDetector Detector(50);  //阈值 
	vector<KeyPoint> keyPoint1, keyPoint2;
	Detector.detect(image1, keyPoint1);
	Detector.detect(image2, keyPoint2);
	//特征点描述,为下边的特征点匹配做准备    
	SiftDescriptorExtractor   Descriptor;
	Mat imageDesc1, imageDesc2;
	Descriptor.compute(image1, keyPoint1, imageDesc1);
	Descriptor.compute(image2, keyPoint2, imageDesc2);
	
	BruteForceMatcher< L2<float> > matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;
	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();
	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;
	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchePoints.size(); i++)
	{
     
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
     
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat FAST_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, FAST_match);
	imshow("FAST_match", FAST_match);
	imwrite("FAST_match.jpg", FAST_match);
	waitKey();
	return 0;
}


FAST效果要匹配点少,没有错误的匹配。

Harris角点:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include   
using namespace cv;
using namespace std;
int main()
{
     
	Mat image01 = imread("4.jpg", 1);
	Mat image02 = imread("3.jpg", 1);
	imshow("p2", image01);
	imshow("p1", image02);
	//灰度图转换  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);
	//提取特征点    
	GoodFeaturesToTrackDetector Detector(500);  //最大点数,值越大,点越多
	vector<KeyPoint> keyPoint1, keyPoint2;
	Detector.detect(image1, keyPoint1);
	Detector.detect(image2, keyPoint2);
	//特征点描述,为下边的特征点匹配做准备    
	SiftDescriptorExtractor  Descriptor;
	Mat imageDesc1, imageDesc2;
	Descriptor.compute(image1, keyPoint1, imageDesc1);
	Descriptor.compute(image2, keyPoint2, imageDesc2);
	BruteForceMatcher< L2<float> > matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;
	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();
	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;
	// Lowe's algorithm,获取优秀匹配点
	for (int i = 0; i < matchePoints.size(); i++)
	{
     
		if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
		{
     
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}
	Mat Harris_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, Harris_match);
	imshow("Harris_match ", Harris_match);
	imwrite("Harris_match.jpg", Harris_match);
	waitKey();
	return 0;
}

你可能感兴趣的:(Opencv,特征检测与匹配)