//utils.h
#ifndef _UTILS_H
#define _UTILS_H
#include
#include
#include
#include
#include
using namespace cv;
// ORB settings
const int ORB_MAX_KPTS = 1500;
const float ORB_SCALE_FACTOR = 1.5;
const int ORB_PYRAMID_LEVELS = 3;
const float ORB_EDGE_THRESHOLD = 31.0;
const int ORB_FIRST_PYRAMID_LEVEL = 0;
const int ORB_WTA_K = 2;
const int ORB_PATCH_SIZE = 31;
// BRISK settings
const float BRISK_HTHRES = 10.0;
const int BRISK_NOCTAVES = 6;
const float DRATIO = 0.8f; // NNDR Matching value
const float MIN_H_ERROR = 2.50f; // Maximum error in pixels to accept an inlier
void matches2points_nndr(const std::vector& train,
const std::vector& query,
const std::vector >& matches,
std::vector& pmatches, float nndr);
void compute_inliers_ransac(const std::vector& matches,
std::vector& inliers,
float error, bool use_fund);
void draw_inliers(const cv::Mat& img1, const cv::Mat& img2, cv::Mat& img_com,
const std::vector& ptpairs, int color);
typedef struct info
{
double t;
int n1;
int n2;
int m;
int rm;
}INFO;
void sift(char* path1, char* path2, INFO& info, bool show);
void surf(char* path1, char* path2, INFO& info, bool show);
void orb(char* path1, char* path2, INFO& info, bool show);
void brisk(char* path1, char* path2, INFO& info, bool show);
void freak(char* path1, char* path2, INFO& info, bool show);
void showInfo(INFO info);
#endif
//utils.cpp
#include "stdafx.h"
#include "utils.h"
#include
using namespace std;
/**
* @brief This function converts matches to points using nearest neighbor distance
* ratio matching strategy
* @param train Vector of keypoints from the first image
* @param query Vector of keypoints from the second image
* @param matches Vector of nearest neighbors for each keypoint
* @param pmatches Vector of putative matches
* @param nndr Nearest neighbor distance ratio value
*/
void matches2points_nndr(const std::vector& train,
const std::vector& query,
const std::vector >& matches,
std::vector& pmatches, float nndr) {
float dist1 = 0.0, dist2 = 0.0;
for (size_t i = 0; i < matches.size(); i++) {
DMatch dmatch = matches[i][0];
dist1 = matches[i][0].distance;
dist2 = matches[i][1].distance;
if (dist1 < nndr*dist2) {
pmatches.push_back(train[dmatch.queryIdx].pt);
pmatches.push_back(query[dmatch.trainIdx].pt);
}
}
}
/**
* @brief This function computes the set of inliers estimating the fundamental matrix
* or a planar homography in a RANSAC procedure
* @param matches Vector of putative matches
* @param inliers Vector of inliers
* @param error The minimum pixelic error to accept an inlier
* @param use_fund Set to true if you want to compute a fundamental matrix
*/
void compute_inliers_ransac(const std::vector& matches,
std::vector& inliers,
float error, bool use_fund) {
vector points1, points2;
Mat H = Mat::zeros(3,3,CV_32F);
int npoints = matches.size()/2;
Mat status = Mat::zeros(npoints,1,CV_8UC1);
for (size_t i = 0; i < matches.size(); i+=2) {
points1.push_back(matches[i]);
points2.push_back(matches[i+1]);
}
if (use_fund == true){
H = findFundamentalMat(points1,points2,CV_FM_RANSAC,error,0.99,status);
}
else {
H = findHomography(points1,points2,CV_RANSAC,error,status);
}
for (int i = 0; i < npoints; i++) {
if (status.at(i) == 1) {
inliers.push_back(points1[i]);
inliers.push_back(points2[i]);
}
}
}
//*******************************************************************************
//*******************************************************************************
/**
* @brief This function draws the set of the inliers between the two images
* @param img1 First image
* @param img2 Second image
* @param img_com Image with the inliers
* @param ptpairs Vector of point pairs with the set of inliers
* @param color The color for each method
*/
void draw_inliers(const cv::Mat& img1, const cv::Mat& img2, cv::Mat& img_com,
const std::vector& ptpairs, int color) {
int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
float rows1 = 0.0, cols1 = 0.0;
float rows2 = 0.0, cols2 = 0.0;
float ufactor = 0.0, vfactor = 0.0;
rows1 = img1.rows;
cols1 = img1.cols;
rows2 = img2.rows;
cols2 = img2.cols;
ufactor = (float)(cols1)/(float)(cols2);
vfactor = (float)(rows1)/(float)(rows2);
// This is in case the input images don't have the same resolution
Mat img_aux = Mat(Size(img1.cols,img1.rows),CV_8UC3);
resize(img2,img_aux,Size(img1.cols,img1.rows),0,0,CV_INTER_LINEAR);
for (int i = 0; i < img_com.rows; i++) {
for (int j = 0; j < img_com.cols; j++) {
if (j < img1.cols) {
*(img_com.ptr(i)+3*j) = *(img1.ptr(i)+3*j);
*(img_com.ptr(i)+3*j+1) = *(img1.ptr(i)+3*j+1);
*(img_com.ptr(i)+3*j+2) = *(img1.ptr(i)+3*j+2);
}
else {
*(img_com.ptr(i)+3*j) = *(img_aux.ptr(i)+3*(j-img_aux.cols));
*(img_com.ptr(i)+3*j+1) = *(img_aux.ptr(i)+3*(j-img_aux.cols)+1);
*(img_com.ptr(i)+3*j+2) = *(img_aux.ptr(i)+3*(j-img_aux.cols)+2);
}
}
}
for (size_t i = 0; i < ptpairs.size(); i+= 2) {
x1 = (int)(ptpairs[i].x+.5);
y1 = (int)(ptpairs[i].y+.5);
x2 = (int)(ptpairs[i+1].x*ufactor+img1.cols+.5);
y2 = (int)(ptpairs[i+1].y*vfactor+.5);
if (color == 0) {
line(img_com,Point(x1,y1),Point(x2,y2),CV_RGB(255,255,0),1);
}
else if (color == 1) {
line(img_com,Point(x1,y1),Point(x2,y2),CV_RGB(255,0,0),1);
}
else if (color == 2) {
line(img_com,Point(x1,y1),Point(x2,y2),CV_RGB(0,0,255),1);
}
}
}
void showInfo(INFO info)
{
printf("%-40s%d\n","The keypoints number of src image is :", info.n1);
printf("%-40s%d\n","The keypoints number of dst image is : ", info.n2);
printf("%-40s%d\n","The matching number is : ", info.m);
printf("%-40s%d\n","The right result number is : ", info.rm);
printf("%-40s%.2fs\n","The total time is : ", info.t);
return ;
}
//sift.cpp
#include "stdafx.h"
#include
#include
#include "utils.h"
#include
using namespace std;
void sift(char* path1, char* path2, INFO& info, bool show)
{
double t1,t2;
t1=cvGetTickCount();
initModule_nonfree();
Mat img1, img2;
img1=imread(path1,0);
img2=imread(path2,0);
if(img1.data==NULL)
{
cout<<"The image can not been loaded: "< sift_detector = FeatureDetector::create( "SIFT" );
Ptr sift_descriptor = DescriptorExtractor::create( "SIFT" );
vector kpts1_sift, kpts2_sift;
Mat desc1_sift, desc2_sift;
Ptr matcher_l2 = DescriptorMatcher::create("BruteForce"); //欧氏距离匹配
vector > dmatches_sift;
vector matches_sift, inliers_sift;
sift_detector->detect(img1,kpts1_sift);
sift_detector->detect(img2,kpts2_sift);
info.n1=kpts1_sift.size();
info.n2=kpts2_sift.size();
sift_descriptor->compute(img1,kpts1_sift,desc1_sift);
sift_descriptor->compute(img2,kpts2_sift,desc2_sift);
matcher_l2->knnMatch(desc1_sift,desc2_sift,dmatches_sift,2); //匹配
matches2points_nndr(kpts1_sift,kpts2_sift,dmatches_sift,matches_sift,DRATIO);
info.m=matches_sift.size()/2;
compute_inliers_ransac(matches_sift,inliers_sift,MIN_H_ERROR,false);
info.rm=inliers_sift.size()/2;
t2=cvGetTickCount();
info.t=(t2-t1)/1000000.0/cvGetTickFrequency();
Mat img1_rgb_sift = imread(path1,1);
Mat img2_rgb_sift = imread(path2,1);
Mat img_com_sift = Mat(Size(img1.cols*2,img1.rows),CV_8UC3);
if(show == true)
{
draw_inliers(img1_rgb_sift,img2_rgb_sift,img_com_sift,inliers_sift,2);
imshow("sift",img_com_sift);
waitKey(0);
}
return;
}
使用
INFO sift_info;
sift(path1,path2,sift_info,true);
showInfo(sift_info);