这篇文章接的是上次两个文章的内容,这个比两面的两个我认为更具体一点。
这里是从Opencv英文网站上摘下来的,方便自己学习记录笔记,也希望能为大家的学习带来方便
#include
#include
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
int main( int argc, char** argv )
{
//首先是图片的读取
Mat img_object = imread("D:\\t.jpg");
Mat img_scene = imread("D:\\q.jpg");
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
//正式开始:第一步,定义特征检测类的对象,因为SurfFeatureDetector 这是一个类,所以必须实例化,这个400可以改变特征检测的精度,值越大精度越高
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;//定义两个容器,用来存放检测到的目标
detector.detect( img_object, keypoints_object );//这里是调用类的属性,这里的detect是类里面的函数
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
//第二步开始计算特征点,也是把SurfDescriptorExtractor 这个类实例化
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;//定义两个模板,接收计算的结果
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );//调用函数开始计算
//-- Step 3: Matching descriptor vectors using FLANN matcher
//进行FlannBasedMatcher匹配,这是一个匹配的方法,还有一个暴力匹配的方法
FlannBasedMatcher matcher;//实例化
std::vector< DMatch > matches;//定义一个容器,接收结果
matcher.match( descriptors_object, descriptors_scene, matches );//调用函数开始匹配,匹配后的结果放在matches 中
double max_dist = 0; double min_dist = 100;//定义两个变量,用来筛选匹配到的结果
//-- Quick calculation of max and min distances between keypoints
//计算匹配到的点之间的最大最小值
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
//把筛选距离小于 3*min_dist的点存放如good_matches这个容器里,这里都是筛选后的点
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )//开始筛选
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;//定义一个模板,这里就开始了绘制了
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
//从这里开始就是开始把匹配到的目标标记出来的步骤了
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );//这里具体的含义与变量可以在源码中查找,我自己也每太搞明白。模板图obj对应queryIdx
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );//把找到的两个坐标集进行放射变换
//-- Get the corners from the image_1 ( the object to be "detected" )
//获取模板图片的四个角的点坐标
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows );
obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);//定义一个二维数组,
perspectiveTransform( obj_corners, scene_corners, H);//仿射变换,
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
//这里就开始绘制了
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}