在OpenCV中使用SIFT、SURT等需要头文件#include
同时需要初始化模块 initModule_nonfree();
通过定义FeatureDetector的对象可以使用多种特征检测方法。
Ptr FeatureDetector::create(const string& detectorType);
detectorType包括:
“FAST” – FastFeatureDetector
“STAR” – StarFeatureDetector
“SIFT” – SIFT (nonfree module)
“SURF” – SURF (nonfree module)
“ORB” – ORB
“MSER” – MSER
“GFTT” – GoodFeaturesToTrackDetector
“HARRIS” – GoodFeaturesToTrackDetector with Harris detector enabled
“Dense” – DenseFeatureDetector
“SimpleBlob” – SimpleBlobDetector
通过定义DescriptorExtractor的对象创建特征向量生成器
Ptrdescriptor_extractor = DescriptorExtractor::create(const string& detectorType)
通过定义DescriptorMatcher的对象创建特征匹配器
Ptrdescriptor_matcher = DescriptorMatcher::create(const string& detectorType)
对应float类型的匹配有:“FlannBased”,“BruteForce“
“BruteForce-L1”
对应uchar类型的匹配方式有:“BruteForce-Hamming”,“BruteForce-HammingLUT”。
画出特征点
CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1,
const Mat& img2, const vector& keypoints2,
const vector& matches1to2, Mat& outImg,
const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
const vector& matchesMask=vector(), int flags=DrawMatchesFlags::DEFAULT );
img1 : 源图像1
keypoints1 :源图像1的特征点
img2 : 源图像2
keypoints2 : 源图像2的特征点
matches1to2 :源图像1的特征点匹配源图像2的特征点
outImg : 输出图像具体由flags决定
matchColor : 匹配的颜色(特征点和连线),若matchColor=Scalar::all(-1),颜色随机
singlePointColor : 单个点的颜色,即未配对的特征点,若matchColor==Scalar::all(-1),颜色随机
matchesMask :Mask决定哪些点将被画出,若为空,则画出所有匹配点
flags: Fdefined by DrawMatchesFlags
程序
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
void siftDetector(Mat& img1, Mat& img2);
int main()
{
Mat img1 = imread("1.jpg");
Mat img2 = imread("2.jpg");
imshow("src", img1);
if (!img1.data||!img2.data)
{
cout << "读取错误" << endl;
return -1;
}
siftDetector(img1,img2);
waitKey(0);
return 0;
}
void siftDetector(Mat& img1,Mat& img2)
{
initModule_nonfree();//初始化模块,使用SIFT或SURF时用到
Ptrdetector = FeatureDetector::create("SIFT");//创建SIFT特征检测器
Ptrdescriptor_extractor = DescriptorExtractor::create("SIFT");//创建特征向量生成器
Ptrdescriptor_matcher = DescriptorMatcher::create("BruteForce");//创建特征匹配器
if (detector.empty() || descriptor_extractor.empty())
cout << "创建失败" << endl;
//读入图像
//特征点检测
double t = getTickCount();//当前时间
vectorkeyPoint1, keyPoint2;
detector->detect(img1, keyPoint1);//检测图1中的SIFT特征点,存储到keyPoint1
detector->detect(img2, keyPoint2);
cout << "图像1特征点的个数:" << keyPoint1.size() << endl;
cout << "图像2特征点的个数:" << keyPoint2.size() << endl;
//根据特征点计算特征描述子矩阵,即特征向量矩阵
Mat descriptors1, descriptors2;
descriptor_extractor->compute(img1, keyPoint1, descriptors1);
descriptor_extractor->compute(img2, keyPoint2, descriptors2);
t = ((double)getTickCount() - t) / getTickFrequency();
cout << "Sift算法用时:" << t << " 秒" << endl;
cout << "图像1特征描述矩阵大小:" << descriptors1.size() << ", 特征向量个数 :"
<< descriptors1.rows << ", 维数:" << descriptors1.cols << endl;
cout << "图像2特征描述矩阵大小:" << descriptors2.size() << ", 特征向量个数 :"
<< descriptors2.rows << ", 维数:" << descriptors2.cols << endl;
//画出特征点
Mat img_keyPoint1, img_keyPoint2;
drawKeypoints(img1, keyPoint1, img_keyPoint1);
drawKeypoints(img2, keyPoint2, img_keyPoint2);
/*imshow("img1",img1);
imshow("img_keyPoint1", img_keyPoint1);*/
//特征匹配
vectormatches;//匹配结果
descriptor_matcher->match(descriptors1, descriptors2, matches);//匹配两个图像的特征矩阵
cout << "Match个数:" << matches.size() << endl;
//计算匹配结果中距离的最大和最小值
//距离是指两个特征向量见的欧式距离,表面两个特征的差异,值越小表面两个特征点越接近
double max_dist = 0;
double min_dist = 100;
double dist;
for (size_t i = 0; i < matches.size(); i++)
{
dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist>max_dist)
max_dist = dist;
}
cout << "最大距离:" << max_dist << endl;
cout << "最小距离:" << min_dist << endl;
//筛选出较好的匹配点
vectorgoodMatches;
//struct DMatch
//{ //有三个构造函数
// DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {}
// DMatch( int _queryIdx, int _trainIdx, float _distance ) : queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {}
// DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) : queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {}
// CV_PROP_RW int queryIdx; //此匹配对应的查询图像的特征描述子索引 query descriptor index
// CV_PROP_RW int trainIdx; //此匹配对应的训练(模板)图像的特征描述子索引 train descriptor index
// CV_PROP_RW int imgIdx; //训练图像的索引(若有多个) train image index
// CV_PROP_RW float distance;//两个特征向量之间的欧氏距离,越小表明匹配度越高
// bool operator<( const DMatch &m ) const { return distance < m.distance; } };
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 0.31*max_dist)
goodMatches.push_back(matches[i]);//将元素添加到矩阵的底部。(在为mat增加一行的时候,用到push_back)
}
//画出匹配结果
Mat img_matches;
drawMatches(img1, keyPoint1, img2, keyPoint2, goodMatches, img_matches, CV_RGB(255, 0, 0)/*CV_RGB(255,0,0)*/, CV_RGB(0, 255, 0), Mat(), 2);
imshow("MatchSIFT", img_matches);
waitKey(0);
}