先上几个有用的例子
1.
#include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/nonfree/nonfree.hpp" #include "opencv2/nonfree/features2d.hpp" #include <iostream> #include <stdio.h> #include <stdlib.h> using namespace cv; using namespace std; int main() { initModule_nonfree();//初始化模块,使用SIFT或SURF时用到 Ptr<FeatureDetector> detector = FeatureDetector::create( "SIFT" );//创建SIFT特征检测器 Ptr<DescriptorExtractor> descriptor_extractor = DescriptorExtractor::create( "SIFT" );//创建特征向量生成器 Ptr<DescriptorMatcher> descriptor_matcher = DescriptorMatcher::create( "BruteForce" );//创建特征匹配器 if( detector.empty() || descriptor_extractor.empty() ) cout<<"fail to create detector!"; //读入图像 Mat img1 = imread("phone2.jpg"); Mat img2 = imread("phone3.jpg"); //特征点检测 double t = getTickCount();//当前滴答数 vector<KeyPoint> keypoints1,keypoints2; detector->detect( img1, keypoints1 );//检测img1中的SIFT特征点,存储到keypoints1中 detector->detect( img2, keypoints2 ); cout<<"图像1特征点个数:"<<keypoints1.size()<<endl; cout<<"图像2特征点个数:"<<keypoints2.size()<<endl; //输出特征点 cout<<"图像1的特征点:"<<endl; for(int i=0;i<keypoints1.size();i++) { cout<<"坐标:"<<keypoints1[i].pt; cout<<",邻域直径:"<<keypoints1[i].size; cout<<",方向:"<<keypoints1[i].angle; cout<<",octave:"<<keypoints1[i].octave; cout<<",id:"<<keypoints1[i].class_id<<endl; } //根据特征点计算特征描述子矩阵,即特征向量矩阵 Mat descriptors1,descriptors2; descriptor_extractor->compute( img1, keypoints1, descriptors1 ); descriptor_extractor->compute( img2, keypoints2, descriptors2 ); t = ((double)getTickCount() - t)/getTickFrequency(); cout<<"SIFT算法用时:"<<t<<"秒"<<endl; cout<<"图像1特征描述矩阵大小:"<<descriptors1.size() <<",特征向量个数:"<<descriptors1.rows<<",维数:"<<descriptors1.cols<<endl; cout<<"图像2特征描述矩阵大小:"<<descriptors2.size() <<",特征向量个数:"<<descriptors2.rows<<",维数:"<<descriptors2.cols<<endl; //输出特征描述 for(int i=0;i<128;i++) cout<<descriptors1.at<long>(0,i)<<","; //画出特征点 Mat img_keypoints1,img_keypoints2; drawKeypoints(img1,keypoints1,img_keypoints1,Scalar::all(-1),0); drawKeypoints(img2,keypoints2,img_keypoints2,Scalar::all(-1),0); //imshow("Src1",img_keypoints1); //imshow("Src2",img_keypoints2); //特征匹配 vector<DMatch> matches;//匹配结果 descriptor_matcher->match( descriptors1, descriptors2, matches );//匹配两个图像的特征矩阵 cout<<"Match个数:"<<matches.size()<<endl; //计算匹配结果中距离的最大和最小值 //距离是指两个特征向量间的欧式距离,表明两个特征的差异,值越小表明两个特征点越接近 double max_dist = 0; double min_dist = 100; for(int i=0; i<matches.size(); i++) { double dist = matches[i].distance; if(dist < min_dist) min_dist = dist; if(dist > max_dist) max_dist = dist; } cout<<"最大距离:"<<max_dist<<endl; cout<<"最小距离:"<<min_dist<<endl; //筛选出较好的匹配 vector<DMatch> goodMatches; for(int i=0; i<matches.size(); i++) { if(matches[i].distance < 0.5 * max_dist) { goodMatches.push_back(matches[i]); } } cout<<"goodMatch个数:"<<goodMatches.size()<<endl; //输出匹配结果 for(int i=0; i<goodMatches.size(); i++) { cout<<"queryIdx:"<<matches[i].queryIdx; cout<<",trainIdx:"<<matches[i].trainIdx; cout<<",imgIdx:"<<matches[i].imgIdx; cout<<",欧氏距离:"<<matches[i].distance<<endl; } //画出匹配结果 Mat img_matches; //红色连接的是匹配的特征点对,绿色是未匹配的特征点 drawMatches(img1,keypoints1,img2,keypoints2,goodMatches,img_matches, Scalar::all(-1)/*CV_RGB(255,0,0)*/,CV_RGB(0,255,0),Mat(),2); imshow("MatchSIFT",img_matches); waitKey(0); return 0; }
LocalFeature.h
// 局部图像特征提取与匹配 // Author: www.icvpr.com // Blog : http://blog.csdn.net/icvpr #ifndef _FEATURE_H_ #define _FEATURE_H_ #include <iostream> #include <vector> #include <string> #include <opencv2/opencv.hpp> using namespace cv; using namespace std; class Feature { public: Feature(); ~Feature(); Feature(const string& detectType, const string& extractType, const string& matchType); public: void detectKeypoints(const Mat& image, vector<KeyPoint>& keypoints); // 检测特征点 void extractDescriptors(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor); // 提取特征向量 void bestMatch(const Mat& queryDescriptor, Mat& trainDescriptor, vector<DMatch>& matches); // 最近邻匹配 void knnMatch(const Mat& queryDescriptor, Mat& trainDescriptor, vector<vector<DMatch>>& matches, int k); // K近邻匹配 void saveKeypoints(const Mat& image, const vector<KeyPoint>& keypoints, const string& saveFileName = ""); // 保存特征点 void saveMatches(const Mat& queryImage, const vector<KeyPoint>& queryKeypoints, const Mat& trainImage, const vector<KeyPoint>& trainKeypoints, const vector<DMatch>& matches, const string& saveFileName = ""); // 保存匹配结果到图片中 private: Ptr<FeatureDetector> m_detector; Ptr<DescriptorExtractor> m_extractor; Ptr<DescriptorMatcher> m_matcher; string m_detectType; string m_extractType; string m_matchType; }; #endif
// 局部图像特征提取与匹配 // Author: www.icvpr.com // Blog : http://blog.csdn.net/icvpr #include "LocalFeature.h" Feature::Feature() { m_detectType = "SIFT"; m_extractType = "SIFT"; m_matchType = "FruteForce"; initModule_nonfree(); } Feature::~Feature() { } Feature::Feature(const string& detectType, const string& extractType, const string& matchType) { assert(!detectType.empty()); assert(!extractType.empty()); assert(!matchType.empty()); m_detectType = detectType; m_extractType = extractType; m_matchType = matchType; initModule_nonfree(); } void Feature::detectKeypoints(const Mat& image, std::vector<KeyPoint>& keypoints) { assert(image.type() == CV_8UC1); assert(!m_detectType.empty()); keypoints.clear(); m_detector = FeatureDetector::create(m_detectType); m_detector->detect(image, keypoints); } void Feature::extractDescriptors(const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptor) { assert(image.type() == CV_8UC1); assert(!m_extractType.empty()); m_extractor = DescriptorExtractor::create(m_extractType); m_extractor->compute(image, keypoints, descriptor); } void Feature::bestMatch(const Mat& queryDescriptor, Mat& trainDescriptor, std::vector<DMatch>& matches) { assert(!queryDescriptor.empty()); assert(!trainDescriptor.empty()); assert(!m_matchType.empty()); matches.clear(); m_matcher = DescriptorMatcher::create(m_matchType); m_matcher->add(std::vector<Mat>(1, trainDescriptor)); m_matcher->train(); m_matcher->match(queryDescriptor, matches); } void Feature::knnMatch(const Mat& queryDescriptor, Mat& trainDescriptor, std::vector<std::vector<DMatch>>& matches, int k) { assert(k > 0); assert(!queryDescriptor.empty()); assert(!trainDescriptor.empty()); assert(!m_matchType.empty()); matches.clear(); m_matcher = DescriptorMatcher::create(m_matchType); m_matcher->add(std::vector<Mat>(1, trainDescriptor)); m_matcher->train(); m_matcher->knnMatch(queryDescriptor, matches, k); } void Feature::saveKeypoints(const Mat& image, const vector<KeyPoint>& keypoints, const string& saveFileName) { assert(!saveFileName.empty()); Mat outImage; cv::drawKeypoints(image, keypoints, outImage, Scalar(255,255,0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); // string saveKeypointsImgName = saveFileName + "_" + m_detectType + ".jpg"; imwrite(saveKeypointsImgName, outImage); } void Feature::saveMatches(const Mat& queryImage, const vector<KeyPoint>& queryKeypoints, const Mat& trainImage, const vector<KeyPoint>& trainKeypoints, const vector<DMatch>& matches, const string& saveFileName) { assert(!saveFileName.empty()); Mat outImage; cv::drawMatches(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, outImage, Scalar(255, 0, 0), Scalar(0, 255, 255), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); // string saveMatchImgName = saveFileName + "_" + m_detectType + "_" + m_extractType + "_" + m_matchType + ".jpg"; imwrite(saveMatchImgName, outImage); }
// 局部图像特征提取与匹配 // Author: www.icvpr.com // Blog : http://blog.csdn.net/icvpr #include #include #include <opencv2/opencv.hpp> using namespace cv; using namespace std; #include "LocalFeature.h" int main(int argc, char** argv) { if (argc != 6) { cout << "wrong usage!" << endl; cout << "usage: .exe FAST SIFT BruteForce queryImage trainImage" << endl; return -1; } string detectorType = argv[1]; string extractorType = argv[2]; string matchType = argv[3]; string queryImagePath = argv[4]; string trainImagePath = argv[5]; Mat queryImage = imread(queryImagePath, CV_LOAD_IMAGE_GRAYSCALE); if (queryImage.empty()) { cout<<"read failed"<< endl; return -1; } Mat trainImage = imread(trainImagePath, CV_LOAD_IMAGE_GRAYSCALE); if (trainImage.empty()) { cout<<"read failed"<< endl; return -1; } Feature feature(detectorType, extractorType, matchType); vector queryKeypoints, trainKeypoints; feature.detectKeypoints(queryImage, queryKeypoints); feature.detectKeypoints(trainImage, trainKeypoints); Mat queryDescriptor, trainDescriptor; feature.extractDescriptors(queryImage, queryKeypoints, queryDescriptor); feature.extractDescriptors(trainImage, trainKeypoints, trainDescriptor); vector matches; feature.bestMatch(queryDescriptor, trainDescriptor, matches); vector<vector> knnmatches; feature.knnMatch(queryDescriptor, trainDescriptor, knnmatches, 2); Mat outImage; feature.saveMatches(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, "../"); return 0; }