OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现

欢迎加群:1012878218,一起学习、交流强化学习,里面会有关于深度学习、机器学习、强化学习的各种资料 。     

主要采用了OpenCV3.1.0分别基于Surf-SITF-ORB实现了简单的图像拼接与融合。

要说明的是,OpenCV3.1.0的版本中,这些特征都放在了OpenCV的未发行扩展版本contrib中,所以在这之前要先安装对应于OpenCV版本的contrib。经过一天的折腾,最终笔者参考下面的这个链接,安装成功了。https://blog.csdn.net/lyl771857509/article/details/79070799

使用的配置为OpenCV3.1.0+Cmake3.6.2+OpenCV contrib3.1.0

输入图片为:

1.jpg

OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现_第1张图片

2.jpg

OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现_第2张图片

SURF:

#include   
#include   
#include "opencv2/core.hpp"  
#include "opencv2/core/utility.hpp"  
#include "opencv2/core/ocl.hpp"  
#include "opencv2/imgcodecs.hpp"  
#include "opencv2/highgui.hpp"  
#include "opencv2/features2d.hpp"  
#include "opencv2/calib3d.hpp"  
#include "opencv2/imgproc.hpp"   
#include"opencv2/xfeatures2d.hpp"  
#include"opencv2/ml.hpp" 

using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace cv::ml;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

typedef struct
{
	Point2f left_top;
	Point2f left_bottom;
	Point2f right_top;
	Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
	double v2[] = { 0, 0, 1 };//左上角
	double v1[3];//变换后的坐标值
	Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	
	//左上角(0,0,1)
	cout << "V2: " << V2 << endl;
	cout << "V1: " << V1 << endl;
	corners.left_top.x = v1[0] / v1[2];
	corners.left_top.y = v1[1] / v1[2];

	//左下角(0,src.rows,1)
	v2[0] = 0;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.left_bottom.x = v1[0] / v1[2];
	corners.left_bottom.y = v1[1] / v1[2];

	//右上角(src.cols,0,1)
	v2[0] = src.cols;
	v2[1] = 0;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_top.x = v1[0] / v1[2];
	corners.right_top.y = v1[1] / v1[2];

	//右下角(src.cols,src.rows,1)
	v2[0] = src.cols;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_bottom.x = v1[0] / v1[2];
	corners.right_bottom.y = v1[1] / v1[2];
}


int main()
{
	Mat a = imread("2.jpg", 1);//右图  
	Mat b = imread("1.jpg", 1);//左图

	Ptr surf;            //创建方式和OpenCV2中的不一样,并且要加上命名空间xfreatures2d
							   //否则即使配置好了还是显示SURF为未声明的标识符  
	surf = SURF::create(800);

	BFMatcher matcher;         //实例化一个暴力匹配器
	Mat c, d;
	vector key1, key2;
	vector matches;    //DMatch是用来描述匹配好的一对特征点的类,包含这两个点之间的相关信息
							   //比如左图有个特征m,它和右图的特征点n最匹配,这个DMatch就记录它俩最匹配,并且还记录m和n的
							   //特征向量的距离和其他信息,这个距离在后面用来做筛选

	surf->detectAndCompute(a, Mat(), key1, c);//输入图像,输入掩码,输入特征点,输出Mat,存放所有特征点的描述向量
	surf->detectAndCompute(b, Mat(), key2, d);//这个Mat行数为特征点的个数,列数为每个特征向量的尺寸,SURF是64(维)

	matcher.match(d, c, matches);             //匹配,数据来源是特征向量,结果存放在DMatch类型里面  

											  //sort函数对数据进行升序排列
	sort(matches.begin(), matches.end());     //筛选匹配点,根据match里面特征对的距离从小到大排序
	vector good_matches;
	int ptsPairs = std::min(50, (int)(matches.size() * 0.15));
	cout << ptsPairs << endl;
	for (int i = 0; i < ptsPairs; i++)
	{
		good_matches.push_back(matches[i]);//距离最小的50个压入新的DMatch
	}

	Mat outimg;                            //drawMatches这个函数直接画出摆在一起的图
	drawMatches(b, key2, a, key1, good_matches, outimg, Scalar::all(-1), Scalar::all(-1), vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);  //绘制匹配点  
	imshow("combine", outimg);
	
	//计算图像配准点
	vector imagePoints1, imagePoints2;

	for (int i = 0; i(i);  //获取第i行的首地址
		uchar* t = trans.ptr(i);
		uchar* d = dst.ptr(i);
		for (int j = start; j < cols; j++)
		{
			//如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
			if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
			{
				alpha = 1;
			}
			else
			{
				//img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好  
				alpha = (processWidth - (j - start)) / processWidth;
			}
			d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
			d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
			d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
		}
	}
}

OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现_第3张图片

OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现_第4张图片

 

OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现_第5张图片

 

OPenCV3.1.0 Surf-SITF-ORB 图像拼接与融合技术实现_第6张图片

ORB:

#include   
#include   
#include "opencv2/core.hpp"  
#include "opencv2/core/utility.hpp"  
#include "opencv2/core/ocl.hpp"  
#include "opencv2/imgcodecs.hpp"  
#include "opencv2/highgui.hpp"  
#include "opencv2/features2d.hpp"  
#include "opencv2/calib3d.hpp"  
#include "opencv2/imgproc.hpp"   
#include"opencv2/xfeatures2d.hpp"  
#include"opencv2/ml.hpp" 

using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace cv::ml;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

typedef struct
{
	Point2f left_top;
	Point2f left_bottom;
	Point2f right_top;
	Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
	double v2[] = { 0, 0, 1 };//左上角
	double v1[3];//变换后的坐标值
	Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;

	//左上角(0,0,1)
	cout << "V2: " << V2 << endl;
	cout << "V1: " << V1 << endl;
	corners.left_top.x = v1[0] / v1[2];
	corners.left_top.y = v1[1] / v1[2];

	//左下角(0,src.rows,1)
	v2[0] = 0;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.left_bottom.x = v1[0] / v1[2];
	corners.left_bottom.y = v1[1] / v1[2];

	//右上角(src.cols,0,1)
	v2[0] = src.cols;
	v2[1] = 0;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_top.x = v1[0] / v1[2];
	corners.right_top.y = v1[1] / v1[2];

	//右下角(src.cols,src.rows,1)
	v2[0] = src.cols;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_bottom.x = v1[0] / v1[2];
	corners.right_bottom.y = v1[1] / v1[2];
}


int main()
{
	Mat a = imread("2.jpg", 1);//右图  
	Mat b = imread("1.jpg", 1);//左图

	Ptr orb;            //创建方式和OpenCV2中的不一样,并且要加上命名空间xfreatures2d
							   //否则即使配置好了还是显示SURF为未声明的标识符  
	orb = ORB::create(800);

	BFMatcher matcher;         //实例化一个暴力匹配器
	Mat c, d;
	vector key1, key2;
	vector matches;    //DMatch是用来描述匹配好的一对特征点的类,包含这两个点之间的相关信息
							   //比如左图有个特征m,它和右图的特征点n最匹配,这个DMatch就记录它俩最匹配,并且还记录m和n的
							   //特征向量的距离和其他信息,这个距离在后面用来做筛选

	orb->detectAndCompute(a, Mat(), key1, c);//输入图像,输入掩码,输入特征点,输出Mat,存放所有特征点的描述向量
	orb->detectAndCompute(b, Mat(), key2, d);//这个Mat行数为特征点的个数,列数为每个特征向量的尺寸,SURF是64(维)

	matcher.match(d, c, matches);             //匹配,数据来源是特征向量,结果存放在DMatch类型里面  

											  //sort函数对数据进行升序排列
	sort(matches.begin(), matches.end());     //筛选匹配点,根据match里面特征对的距离从小到大排序
	vector good_matches;
	int ptsPairs = std::min(50, (int)(matches.size() * 0.15));
	cout << ptsPairs << endl;
	for (int i = 0; i < ptsPairs; i++)
	{
		good_matches.push_back(matches[i]);//距离最小的50个压入新的DMatch
	}

	Mat outimg;                            //drawMatches这个函数直接画出摆在一起的图
	drawMatches(b, key2, a, key1, good_matches, outimg, Scalar::all(-1), Scalar::all(-1), vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);  //绘制匹配点  
	imshow("combine", outimg);

	//计算图像配准点
	vector imagePoints1, imagePoints2;

	for (int i = 0; i(i);  //获取第i行的首地址
		uchar* t = trans.ptr(i);
		uchar* d = dst.ptr(i);
		for (int j = start; j < cols; j++)
		{
			//如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
			if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
			{
				alpha = 1;
			}
			else
			{
				//img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好  
				alpha = (processWidth - (j - start)) / processWidth;
			}
			d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
			d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
			d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
		}
	}
}

SIFT:

#include   
#include   
#include "opencv2/core.hpp"  
#include "opencv2/core/utility.hpp"  
#include "opencv2/core/ocl.hpp"  
#include "opencv2/imgcodecs.hpp"  
#include "opencv2/highgui.hpp"  
#include "opencv2/features2d.hpp"  
#include "opencv2/calib3d.hpp"  
#include "opencv2/imgproc.hpp"   
#include"opencv2/xfeatures2d.hpp"  
#include"opencv2/ml.hpp" 

using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace cv::ml;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

typedef struct
{
	Point2f left_top;
	Point2f left_bottom;
	Point2f right_top;
	Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
	double v2[] = { 0, 0, 1 };//左上角
	double v1[3];//变换后的坐标值
	Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;

	//左上角(0,0,1)
	cout << "V2: " << V2 << endl;
	cout << "V1: " << V1 << endl;
	corners.left_top.x = v1[0] / v1[2];
	corners.left_top.y = v1[1] / v1[2];

	//左下角(0,src.rows,1)
	v2[0] = 0;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.left_bottom.x = v1[0] / v1[2];
	corners.left_bottom.y = v1[1] / v1[2];

	//右上角(src.cols,0,1)
	v2[0] = src.cols;
	v2[1] = 0;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_top.x = v1[0] / v1[2];
	corners.right_top.y = v1[1] / v1[2];

	//右下角(src.cols,src.rows,1)
	v2[0] = src.cols;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_bottom.x = v1[0] / v1[2];
	corners.right_bottom.y = v1[1] / v1[2];
}


int main()
{
	Mat a = imread("2.jpg", 1);//右图  
	Mat b = imread("1.jpg", 1);//左图

	Ptr sift;            //创建方式和OpenCV2中的不一样,并且要加上命名空间xfreatures2d
							  //否则即使配置好了还是显示SURF为未声明的标识符  
	sift = SIFT::create(800);

	BFMatcher matcher;         //实例化一个暴力匹配器
	Mat c, d;
	vector key1, key2;
	vector matches;    //DMatch是用来描述匹配好的一对特征点的类,包含这两个点之间的相关信息
							   //比如左图有个特征m,它和右图的特征点n最匹配,这个DMatch就记录它俩最匹配,并且还记录m和n的
							   //特征向量的距离和其他信息,这个距离在后面用来做筛选

	sift->detectAndCompute(a, Mat(), key1, c);//输入图像,输入掩码,输入特征点,输出Mat,存放所有特征点的描述向量
	sift->detectAndCompute(b, Mat(), key2, d);//这个Mat行数为特征点的个数,列数为每个特征向量的尺寸,SURF是64(维)

	matcher.match(d, c, matches);             //匹配,数据来源是特征向量,结果存放在DMatch类型里面  

											  //sort函数对数据进行升序排列
	sort(matches.begin(), matches.end());     //筛选匹配点,根据match里面特征对的距离从小到大排序
	vector good_matches;
	int ptsPairs = std::min(50, (int)(matches.size() * 0.15));
	cout << ptsPairs << endl;
	for (int i = 0; i < ptsPairs; i++)
	{
		good_matches.push_back(matches[i]);//距离最小的50个压入新的DMatch
	}

	Mat outimg;                            //drawMatches这个函数直接画出摆在一起的图
	drawMatches(b, key2, a, key1, good_matches, outimg, Scalar::all(-1), Scalar::all(-1), vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);  //绘制匹配点  
	imshow("combine", outimg);

	//计算图像配准点
	vector imagePoints1, imagePoints2;

	for (int i = 0; i(i);  //获取第i行的首地址
		uchar* t = trans.ptr(i);
		uchar* d = dst.ptr(i);
		for (int j = start; j < cols; j++)
		{
			//如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
			if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
			{
				alpha = 1;
			}
			else
			{
				//img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好  
				alpha = (processWidth - (j - start)) / processWidth;
			}
			d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
			d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
			d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);
		}
	}
}

stitch:

#include 
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"

//#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/stitching.hpp"

using namespace std;
using namespace cv;

bool try_use_gpu = false;
vector imgs;
string result_name = "result.jpg";

int main()
{
	Mat img1 = imread("1.jpg");
	Mat img2 = imread("2.jpg");

	if (img1.empty() || img2.empty())
	{
		cout << "con't open image" << endl;
		return -1;
	}
	imgs.push_back(img1);
	imgs.push_back(img2);
	imshow("p1", img1);
	imshow("p2", img2);
	waitKey();
	Mat pano;
	Stitcher stitcher = Stitcher::createDefault(try_use_gpu);	//使用stitch函数进行拼接
	Stitcher::Status status = stitcher.stitch(imgs, pano);
	if (status != Stitcher::OK)
	{
		cout << "Can't stitch images, error code = " << status << endl;
		return -1;
	}
	namedWindow(result_name);
	imshow(result_name, pano);
	imwrite("stitch_output.jpg", pano);
	waitKey();
	return 0;
}

不使用OpenCV,直接实现的Surf特征提取:

#include
#include 
#include 
#include 
#include "time.h"


using namespace std;
using namespace cv;
class IntegralImg
{
public:
	int Width;		//图片的宽
	int Height;		//图片的高
	Mat Original;	//原始图片
	Mat Integral;	//积分图像
	IntegralImg(Mat img);
	float AreaSum(int x, int y, int dx, int dy);
};
IntegralImg::IntegralImg(Mat img)
{
	this->Original = img;
	integral(this->Original, this->Integral);
	this->Width = img.cols;
	this->Height = img.rows;
}
//计算积分图像 A-B-C+D
float IntegralImg::AreaSum(int x, int y, int dx, int dy)
{
	int r1;
	int c1;
	int r2;
	int c2;
	r1 = std::min(x, Height);
	c1 = std::min(y, Width);
	r2 = std::min(x + dx, Height);
	c2 = std::min(y + dy, Width);
	r1 = std::max(r1, 0);
	c1 = std::max(c1, 0);
	r2 = std::max(r2, 0);
	c2 = std::max(c2, 0);
	double A = this->Integral.at(r1, c1);
	double B = this->Integral.at(r2, c1);
	double C = this->Integral.at(r1, c2);
	double D = this->Integral.at(r2, c2);
	return (float)std::max(0.0, A + D - B - C);
}
class ResponseLayer
{
public:
	//本层图像的宽度
	int Width;
	//本层图像的高度
	int Height;
	//模板作用的步长
	int Step;
	//模板的长度的1/3
	int Lobe;
	//Lobe*2-1
	int Lobe2;
	//模板的长度一半,边框
	int Border;
	//模板长度
	int Size;
	//模板元素个数
	int Count;
	//金字塔级数
	int Octave;
	//金字塔层数
	int Interval;
	//高斯卷积后的图片
	Mat * Data;
	//Laplacian矩阵
	Mat * LapData;

	ResponseLayer(IntegralImg * img, int octave, int interval);
	void BuildLayerData(IntegralImg * img);
	float GetResponse(int x, int y, int step);
	float GetLaplacian(int x, int y, int step);
};
ResponseLayer::ResponseLayer(IntegralImg * img, int octave, int interval)
{
	this->Step = (int)pow(2.0, octave - 1);
	this->Width = img->Width / this->Step;
	this->Height = img->Height / this->Step;
	this->Lobe = (int)pow(2.0, octave)*interval + 1;
	this->Lobe2 = this->Lobe * 2 - 1;
	this->Size = 3 * this->Lobe;
	this->Border = this->Size / 2;
	this->Count = this->Size*this->Size;
	this->Octave = octave;
	this->Interval = interval;
	this->Data = new Mat(this->Height, this->Width, CV_32FC1);
	this->LapData = new Mat(this->Height, this->Width, CV_32FC1);
	this->BuildLayerData(img);
}

void ResponseLayer::BuildLayerData(IntegralImg * img)
{
	float inverse_area = 1.0 / this->Count;
	float Dxx, Dyy, Dxy;

	for (int r = 0, x = 0; xStep, x += 1)
	{
		for (int c = 0, y = 0; yStep, y += 1)
		{
			Dxx = img->AreaSum(r - Lobe + 1, c - Border, Lobe2, Size) - img->AreaSum(r - Lobe + 1, c - Lobe / 2, Lobe2, Lobe) * 3;
			Dyy = img->AreaSum(r - Border, c - Lobe + 1, Size, Lobe2) - img->AreaSum(r - Lobe / 2, c - Lobe + 1, Lobe, Lobe2) * 3;
			Dxy = img->AreaSum(r - Lobe, c + 1, Lobe, Lobe) + img->AreaSum(r + 1, c - Lobe, Lobe, Lobe)
				- img->AreaSum(r - Lobe, c - Lobe, Lobe, Lobe) - img->AreaSum(r + 1, c + 1, Lobe, Lobe);
			Dxx *= inverse_area;
			Dyy *= inverse_area;
			Dxy *= inverse_area;

			this->Data->at(x, y) = (Dxx * Dyy - 0.81f * Dxy * Dxy);
			this->LapData->at(x, y) = (Dxx + Dyy >= 0 ? 1 : 0);
		}
	}
}
float ResponseLayer::GetResponse(int x, int y, int step)
{
	int scale = step / this->Step;
	//std::cout<Data->at((x*scale),(y*scale))<Data->at((x*scale), (y*scale));
}

float ResponseLayer::GetLaplacian(int x, int y, int step)
{
	int scale = step / this->Step;
	return this->LapData->at((x*scale), (y*scale));
}

class IPoint :public Point2f
{
public:
	//float x;
	//float y;
	float dx;
	float dy;
	float scale;
	float orientation;
	float laplacian;
	float descriptor[64];
	float operator-(const IPoint &rhs);
	static void GetMatches(vector &ipts1, vector &ipts2, vector< pair > &matches);
};
//! Gets the distance in descriptor space between Ipoints
float IPoint::operator-(const IPoint &rhs)//运算符重载
{
	float sum = 0.f;
	for (int i = 0; i < 64; ++i)
		sum += (this->descriptor[i] - rhs.descriptor[i])*(this->descriptor[i] - rhs.descriptor[i]);
	return sqrt(sum);//sqrt求非负数的平方根
}

//! Populate IpPairVec with matched ipts 
void IPoint::GetMatches(vector &ipts1, vector &ipts2, vector< pair > &matches)
{
	float dist, d1, d2;
	IPoint *match;

	matches.clear();

	for (unsigned int i = 0; i < ipts1.size(); i++)
	{
		d1 = d2 = FLT_MAX;

		for (unsigned int j = 0; j < ipts2.size(); j++)
		{
			dist = ipts1[i] - ipts2[j];

			if (distx - ipts1[i].x;
			ipts1[i].dy = match->y - ipts1[i].y;
			matches.push_back(std::make_pair(ipts1[i], *match));
		}
	}
}

class FastHessian
{
public:

	IntegralImg Img;
	int Octaves;
	int Intervals;
	float Threshold;
	map Pyramid;
	vector IPoints;
	FastHessian(IntegralImg iImg, int octaves, int intervals, float threshold);
	void GeneratePyramid();
	void GetIPoints();
	void ShowIPoint();
	bool IsExtremum(int r, int c,
		int step, ResponseLayer * t, ResponseLayer * m, ResponseLayer * b);
	void InterpolateExtremum(int r, int c, int step,
		ResponseLayer *t, ResponseLayer *m, ResponseLayer *b);
	void InterpolateStep(int r, int c, int step,
		ResponseLayer *t, ResponseLayer *m, ResponseLayer *b,
		double* xi, double* xr, double* xc);
	Mat Deriv3D(int r, int c, int step,
		ResponseLayer *t, ResponseLayer *m, ResponseLayer *b);
	Mat Hessian3D(int r, int c, int step,
		ResponseLayer *t, ResponseLayer *m, ResponseLayer *b);
};
FastHessian::FastHessian(IntegralImg iImg, int octaves, int intervals, float threshold)
	:Octaves(octaves), Intervals(intervals), Img(iImg), Threshold(threshold)
{
	GeneratePyramid();
}

//生成金字塔
void FastHessian::GeneratePyramid()
{
	for (int o = 1; o <= Octaves; o++)
	{
		for (int i = 1; i <= Intervals; i++)
		{
			int size = 3 * ((int)pow(2.0, o)*i + 1);
			if (!this->Pyramid.count(size))
			{
				this->Pyramid[size] = new ResponseLayer(&Img, o, i);
				//imshow("d",abs((*(Pyramid[size])->Data)*100));
				//cv::waitKey();
			}
		}
	}
}
void FastHessian::GetIPoints()
{
	// Clear the vector of exisiting IPoints
	this->IPoints.clear();

	// Get the response layers
	ResponseLayer *b, *m, *t;
	//对Octave循环
	for (int o = 1; o <= this->Octaves; ++o)
	{
		//一个Octave中两个Interval的Size的差值
		int step = (int)(3 * pow(2.0, o));
		//计算当前层所需要的算子的Size
		int size = step + 3;
		//该算子的作用步长
		int s = (int)pow(2.0, o - 1);
		//卷积后图片的宽度
		int width = this->Img.Width / s;
		//卷积后图片的长度
		int height = this->Img.Height / s;

		//对Interval循环
		for (int i = 1; i <= this->Intervals - 2; ++i)
		{

			b = this->Pyramid[size];			//最底层
			m = this->Pyramid[size + step];		//中间层
			t = this->Pyramid[size + 2 * step];		//最高层

													//计算Border,在Border内的像素不记录为关键点
													//这里的Border计算有些疑惑,为何要除以Step?
			int border = (t->Border + 1) / (t->Step);

			//遍历所有的点,寻找符合极大抑制的点
			//OpenSurf里面遍历了所有包含Border在内的点
			//这里直接忽略那些点,从第一个有意义像素开始
			for (int r = border + 1; r < height - border; ++r)
			{
				for (int c = border + 1; c < width - border; ++c)
				{
					//判断中间层的中间元素是否是其周围26个元素中最大的
					if (IsExtremum(r, c, s, t, m, b))
					{
						//进行亚像素级别的差值,寻找亚像素级别的特征点
						InterpolateExtremum(r, c, s, t, m, b);
						//cout<<'('<IPoints.size(); i++)
	{
		cv::circle(this->Img.Original, cv::Point(this->IPoints[i].x, this->IPoints[i].y), 3, cv::Scalar(255, 0, 0, 100));
	}
	imshow("d", this->Img.Original);
}
//极大值抑制
bool FastHessian::IsExtremum(int r, int c, int step, ResponseLayer * t, ResponseLayer * m, ResponseLayer * b)
{
	// check the candidate point in the middle layer is above thresh 
	float candidate = m->GetResponse(r, c, step);
	if (candidate < this->Threshold)
		return 0;

	for (int rr = -1; rr <= 1; ++rr)
	{
		for (int cc = -1; cc <= 1; ++cc)
		{
			// if any response in 3x3x3 is greater candidate not maximum
			if (
				t->GetResponse(r + rr, c + cc, step) >= candidate ||							//与顶层9个元素比较
				((rr != 0 || cc != 0) && m->GetResponse(r + rr, c + cc, step) >= candidate) ||	//与中间层8个元素比较
				b->GetResponse(r + rr, c + cc, step) >= candidate								//与底层9个元素比较
				)
				return 0;
		}
	}
	return 1;
}
//亚像素求解
void FastHessian::InterpolateExtremum(int r, int c, int step, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b)
{
	// get the step distance between filters
	// check the middle filter is mid way between top and bottom
	int filterStep = (m->Size - b->Size);
	assert(filterStep > 0 && t->Size - m->Size == m->Size - b->Size);

	// Get the offsets to the actual location of the extremum
	double xi = 0, xr = 0, xc = 0;
	InterpolateStep(r, c, step, t, m, b, &xi, &xr, &xc);

	// If point is sufficiently close to the actual extremum
	if (fabs(xi) < 0.5f  &&  fabs(xr) < 0.5f  &&  fabs(xc) < 0.5f)
	{
		IPoint p;
		p.x = static_cast((c + xc)*step);
		p.y = static_cast((r + xr)*step);
		p.scale = static_cast((0.1333f)*(m->Size + xi*filterStep));
		p.laplacian = static_cast(m->GetLaplacian(r, c, step));
		this->IPoints.push_back(p);
	}
}
//用泰勒展开求解极值点
void FastHessian::InterpolateStep(int r, int c, int step, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b,
	double* xi, double* xr, double* xc)
{
	Mat dD, H, H_inv, X;

	dD = Deriv3D(r, c, step, t, m, b);
	//cout<(0, 0);
	*xr = X.at(1, 0);
	*xi = X.at(2, 0);
}
//计算一阶导数
Mat FastHessian::Deriv3D(int r, int c, int step, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b)
{
	double dx, dy, ds;
	dx = (m->GetResponse(r, c + 1, step) - m->GetResponse(r, c - 1, step)) / 2.0;
	dy = (m->GetResponse(r + 1, c, step) - m->GetResponse(r - 1, c, step)) / 2.0;
	ds = (t->GetResponse(r, c, step) - b->GetResponse(r, c, step)) / 2.0;

	//构造一阶导数
	Mat dI = (Mat_(3, 1) << dx, dy, ds);

	return dI;
}

//计算二阶导数
Mat FastHessian::Hessian3D(int r, int c, int step, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b)
{
	double v, dxx, dyy, dss, dxy, dxs, dys;

	v = m->GetResponse(r, c, step);
	dxx = m->GetResponse(r, c + 1, step) + m->GetResponse(r, c - 1, step) - 2 * v;
	dyy = m->GetResponse(r + 1, c, step) + m->GetResponse(r - 1, c, step) - 2 * v;
	dss = t->GetResponse(r, c, step) + b->GetResponse(r, c, step) - 2 * v;
	dxy = (m->GetResponse(r + 1, c + 1, step) - m->GetResponse(r + 1, c - 1, step) -
		m->GetResponse(r - 1, c + 1, step) + m->GetResponse(r - 1, c - 1, step)) / 4.0;
	dxs = (t->GetResponse(r, c + 1, step) - t->GetResponse(r, c - 1, step) -
		b->GetResponse(r, c + 1, step) + b->GetResponse(r, c - 1, step)) / 4.0;
	dys = (t->GetResponse(r + 1, c, step) - t->GetResponse(r - 1, c, step) -
		b->GetResponse(r + 1, c, step) + b->GetResponse(r - 1, c, step)) / 4.0;

	//构造Hessian矩阵
	Mat H = (Mat_(3, 3) <<
		dxx, dxy, dxs,
		dxy, dyy, dys,
		dxs, dys, dss);

	return H;
}

class SurfDescriptor
{
public:
	IntegralImg &Img;
	std::vector &IPoints;

	void GetOrientation();
	void GetDescriptor();

	float gaussian(int x, int y, float sig);
	float gaussian(float x, float y, float sig);
	float haarX(int row, int column, int s);
	float haarY(int row, int column, int s);
	float getAngle(float X, float Y);
	float RotateX(float x, float y, float si, float co);
	float RotateY(float x, float y, float si, float co);
	int fRound(float flt);
	void DrawOrientation();

	SurfDescriptor(IntegralImg &img, std::vector &iPoints);
};
//! SURF priors (these need not be done at runtime)
const float pi = 3.14159f;

//! lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right
const float gauss25[7][7] = {
	0.02546481,	0.02350698,	0.01849125,	0.01239505,	0.00708017,	0.00344629,	0.00142946,
	0.02350698,	0.02169968,	0.01706957,	0.01144208,	0.00653582,	0.00318132,	0.00131956,
	0.01849125,	0.01706957,	0.01342740,	0.00900066,	0.00514126,	0.00250252,	0.00103800,
	0.01239505,	0.01144208,	0.00900066,	0.00603332,	0.00344629,	0.00167749,	0.00069579,
	0.00708017,	0.00653582,	0.00514126,	0.00344629,	0.00196855,	0.00095820,	0.00039744,
	0.00344629,	0.00318132,	0.00250252,	0.00167749,	0.00095820,	0.00046640,	0.00019346,
	0.00142946,	0.00131956,	0.00103800,	0.00069579,	0.00039744,	0.00019346,	0.00008024
};

SurfDescriptor::SurfDescriptor(IntegralImg &img, std::vector &iPoints) :Img(img), IPoints(iPoints)
{

}

//提取当前关键点在附近区域的主方向
void SurfDescriptor::GetOrientation()
{
	for (int i = 0; iIPoints.size(); i++)
	{
		const int pCount = 109;
		IPoint &p = IPoints[i];
		float gauss = 0.f;
		int s = fRound(p.scale), r = fRound(p.y), c = fRound(p.x);
		float resX[pCount], resY[pCount], Ang[pCount];
		int id[] = { 6,5,4,3,2,1,0,1,2,3,4,5,6 };

		int idx = 0;

		//计算6倍scale的区域的haar特征
		for (int i = -6; i <= 6; i++)
		{
			for (int j = -6; j <= 6; j++)
			{
				if (i*i + j*j<36)
				{
					//用4倍scale的haar特征提取x y方向上的梯度特征
					//为何是4倍sigma?
					gauss = gauss25[id[i + 6]][id[j + 6]];
					resX[idx] = gauss * haarX(r + j*s, c + i*s, 4 * s);
					resY[idx] = gauss * haarY(r + j*s, c + i*s, 4 * s);
					//计算当前点的方向特征
					Ang[idx] = getAngle(resX[idx], resY[idx]);
					idx++;
				}
			}
		}

		//计算主方向
		float sumX = 0.f, sumY = 0.f;
		float maxX = 0.f, maxY = 0.f;
		float max = 0.f, orientation = 0.f;
		float ang1 = 0.f, ang2 = 0.f;

		//计算pi/3扇形的特征点
		//步长为0.15
		float pi3 = pi / 3.0f;
		for (ang1 = 0; ang1<2 * pi; ang1 += 0.15f)
		{
			ang2 = (ang1 + pi3>2 * pi ? ang1 - 5.0f*pi3 : ang1 + pi3);
			sumX = sumY = 0.f;
			for (int k = 0; kang2 &&
					((0 max)
			{
				max = sumX*sumX + sumY*sumY;
				maxX = sumX;
				maxY = sumY;
			}
		}

		p.orientation = getAngle(maxX, maxY);
	}
}

//画出主方向特征
void SurfDescriptor::DrawOrientation()
{
	int r1, c1, c2, r2;
	for (int i = 0; iIPoints.size(); i++)
	{
		r1 = fRound(IPoints[i].y);
		c1 = fRound(IPoints[i].x);
		c2 = fRound(10 * cos(IPoints[i].orientation)) + c1;
		r2 = fRound(10 * sin(IPoints[i].orientation)) + r1;
		cv::line(this->Img.Original, cv::Point(c1, r1), cv::Point(c2, r2), cv::Scalar(0, 255, 0));
	}
	imshow("d", this->Img.Original);
}

//根据主方向的值,提取4*4*4=64维的Haar特征
void SurfDescriptor::GetDescriptor()
{
	//OpenSURF 在这个函数写得让人很难理解
	//不像是为了效率,反正写得很纠结
	//由于将特征点附近区域划分成4*4的子区域
	//那么特征点是某个子区域的中心点
	int o[] = { -7, -2, 3, 8 };
	//int so[]={-2, -1, 0, 1, 2};

	for (int t = 0; tIPoints.size(); t++)
	{
		IPoint &p = IPoints[t];
		float scale = p.scale;
		float * desp = p.descriptor;
		int x = fRound(p.x);
		int y = fRound(p.y);
		float co = cos(p.orientation);
		float si = sin(p.orientation);
		float cx = -0.5f, cy = 0.f; //Subregion centers for the 4x4 gaussian weighting
		int count = 0;
		float len = 0.f;

		for (int i = 0; i<4; i++)
		{
			cx += 1.f;
			cy = -0.5f;
			for (int j = 0; j<4; j++)
			{
				int xs = fRound(RotateX(scale*o[i], scale*o[j], si, co) + x);
				int ys = fRound(RotateY(scale*o[i], scale*o[j], si, co) + y);
				float dx = 0.f, dy = 0.f, mdx = 0.f, mdy = 0.f;

				cy += 1.f;
				for (int k = o[i] - 5; k <= o[i] + 3; k++)
				{
					for (int l = o[j] - 5; l <= o[j] + 3; l++)
					{
						int sample_x = fRound(RotateX(scale*k, scale*l, si, co) + x);
						int sample_y = fRound(RotateY(scale*k, scale*l, si, co) + y);

						//为何是2.5*scale,文章里面写的是3.3*scale
						float gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
						float rx = haarX(sample_y, sample_x, 2 * fRound(scale));
						float ry = haarY(sample_y, sample_x, 2 * fRound(scale));

						float rrx = gauss_s1*RotateX(rx, ry, si, co);
						float rry = gauss_s1*RotateY(rx, ry, si, co);

						dx += rrx;
						dy += rry;
						mdx += fabs(rrx);
						mdy += fabs(rry);
					}
				}

				float gauss_s2 = gaussian(cx - 2.f, cy - 2.f, 1.5f);

				desp[count++] = dx*gauss_s2;
				desp[count++] = dy*gauss_s2;
				desp[count++] = mdx*gauss_s2;
				desp[count++] = mdy*gauss_s2;

				len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy) * gauss_s2*gauss_s2;
			}
		}

		len = sqrt(len);
		for (int i = 0; i < 64; ++i)
			desp[i] /= len;

	}

}

//根据角度,旋转坐标
inline float SurfDescriptor::RotateX(float x, float y, float si, float co)
{
	return -x*si + y*co;
}

inline float SurfDescriptor::RotateY(float x, float y, float si, float co)
{
	return x*co + y*si;
}

//! Round float to nearest integer
inline int SurfDescriptor::fRound(float flt)
{
	return (int)floor(flt + 0.5f);
}
//-------------------------------------------------------

//! Calculate the value of the 2d gaussian at x,y
inline float SurfDescriptor::gaussian(int x, int y, float sig)
{
	return (1.0f / (2.0f*pi*sig*sig)) * exp(-(x*x + y*y) / (2.0f*sig*sig));
}

//-------------------------------------------------------

//! Calculate the value of the 2d gaussian at x,y
inline float SurfDescriptor::gaussian(float x, float y, float sig)
{
	return 1.0f / (2.0f*pi*sig*sig) * exp(-(x*x + y*y) / (2.0f*sig*sig));
}

//-------------------------------------------------------

//! Calculate Haar wavelet responses in x direction
inline float SurfDescriptor::haarX(int row, int column, int s)
{
	return Img.AreaSum(row - s / 2, column, s, s / 2)
		- Img.AreaSum(row - s / 2, column - s / 2, s, s / 2);
}

//-------------------------------------------------------

//! Calculate Haar wavelet responses in y direction
inline float SurfDescriptor::haarY(int row, int column, int s)
{
	return Img.AreaSum(row, column - s / 2, s / 2, s)
		- Img.AreaSum(row - s / 2, column - s / 2, s / 2, s);
}

//-------------------------------------------------------

//! Get the angle from the +ve x-axis of the vector given by (X Y)
float SurfDescriptor::getAngle(float X, float Y)
{
	if (X > 0 && Y >= 0)
		return atan(Y / X);

	if (X < 0 && Y >= 0)
		return pi - atan(-Y / X);

	if (X < 0 && Y < 0)
		return pi + atan(Y / X);

	if (X > 0 && Y < 0)
		return 2 * pi - atan(-Y / X);

	return 0;
}
class Visualize
{
public:

	void DrawIPoint(char * name, cv::Mat img, std::vector ipts);
	void DrawMatch(cv::Mat img1, cv::Mat img2, std::vector< std::pair > matches);
	void DrawMatchStep(cv::Mat img1, cv::Mat img2, std::vector< std::pair > matches);
	int fRound(float flt);
};

//画出关键点
void Visualize::DrawIPoint(char * name, cv::Mat img, std::vector ipts)
{
	cout << "Surf Found: " << ipts.size() << " Interest Points." << endl;
	for (int i = 0; i(ipts[i].scale * 2.5);
		if (ipts[i].laplacian == 0)
		{
			cv::circle(img, ipts[i], scale, cv::Scalar(255, 0, 0));
		}
		else if (ipts[i].laplacian == 1)
		{
			cv::circle(img, ipts[i], scale, cv::Scalar(0, 0, 255));
		}
		int r1 = fRound(ipts[i].y);
		int c1 = fRound(ipts[i].x);
		int c2 = fRound(scale * cos(ipts[i].orientation)) + c1;
		int r2 = fRound(scale * sin(ipts[i].orientation)) + r1;
		cv::line(img, cv::Point(c1, r1), cv::Point(c2, r2), cv::Scalar(0, 255, 0));
	}
	imshow(name, img);
}

//画出所有匹配的点对
void Visualize::DrawMatch(cv::Mat img1, cv::Mat img2, std::vector< std::pair > matches)
{
	int Width = img1.cols + img2.cols;
	int Height = max(img1.rows, img2.rows);

	for (unsigned int i = 0; i < matches.size(); ++i)
	{
		circle(img1, matches[i].first, 4, Scalar(255, 0, 0));
		circle(img2, matches[i].second, 4, Scalar(255, 0, 0));

		const int & w = img1.cols;
		line(img1, matches[i].first, Point(matches[i].second.x + w, matches[i].second.y), Scalar(255, 255, 255), 1);
		line(img2, Point(matches[i].first.x - w, matches[i].first.y), matches[i].second, Scalar(255, 255, 255), 1);
	}

	imshow("1", img1);
	imshow("2", img2);

	Mat Conbine = Mat::zeros(Height, Width, CV_8UC3);
	img1.copyTo(Conbine(Rect(0, 0, img1.cols, img1.rows)));
	img2.copyTo(Conbine(Rect(img1.cols, 0, img2.cols, img2.rows)));
	imshow("d", Conbine);
}

void Visualize::DrawMatchStep(cv::Mat img1, cv::Mat img2, std::vector< std::pair > matches)
{
	int Width = img1.cols + img2.cols;
	int Height = max(img1.rows, img2.rows);
	
	for (unsigned int i = 0; i < matches.size(); ++i)
	{
		circle(img1, matches[i].first, 4, Scalar(255, 0, 0));
		circle(img2, matches[i].second, 4, Scalar(255, 0, 0));

		const int & w = img1.cols;
		line(img1, matches[i].first, Point(matches[i].second.x + w, matches[i].second.y), Scalar(255, 255, 255), 1);
		line(img2, Point(matches[i].first.x - w, matches[i].first.y), matches[i].second, Scalar(255, 255, 255), 1);
	}

	Mat Conbine = Mat::zeros(Height, Width, CV_8UC3);
	img1.copyTo(Conbine(Rect(0, 0, img1.cols, img1.rows)));
	img2.copyTo(Conbine(Rect(img1.cols, 0, img2.cols, img2.rows)));
	imshow("conbine", Conbine);
	imwrite("conbine.jpg", Conbine);
}

//! Round float to nearest integer
inline int Visualize::fRound(float flt)
{
	return (int)floor(flt + 0.5f);
}

class Surf
{
public:

	vector GetAllFeatures(Mat img);
};
vector Surf::GetAllFeatures(Mat img)
{
	clock_t start;
	clock_t end;
	start = clock();
	IntegralImg IImg(img);
	end = clock();
	cout << "IntegralImg took: " << float(end - start) / CLOCKS_PER_SEC << " seconds" << std::endl;
	start = clock();
	FastHessian fh(IImg, 4, 4, 0.0001);
	fh.GetIPoints();
	end = clock();
	std::cout << "FastHessian took: " << float(end - start) / CLOCKS_PER_SEC << " seconds" << std::endl;
	start = clock();
	SurfDescriptor sd(IImg, fh.IPoints);
	sd.GetOrientation();
	sd.GetDescriptor();
	end = clock();
	std::cout << "Descriptor took: " << float(end - start) / CLOCKS_PER_SEC << " seconds" << std::endl;
	return fh.IPoints;
}

Mat ReadFloatImg(char * szFilename)
{
	Mat iImg = imread(szFilename, 0);
	Mat fImg;
	iImg.convertTo(fImg, CV_32FC1);
	fImg /= 255.0;
	return fImg;
}

int main()
{
	//加载图片
	Mat colorImg1 = imread("1.jpg");
	Mat img1 = ReadFloatImg("1.jpg");
	Mat colorImg2 = imread("2.jpg");
	Mat img2 = ReadFloatImg("2.jpg");

	//得到surf特征
	Surf surf;
	Visualize v;
	vector< pair > matches;
	vector ips1 = surf.GetAllFeatures(img1);
	vector ips2 = surf.GetAllFeatures(img2);

	//得到匹配的特征点
	IPoint::GetMatches(ips1, ips2, matches);
	v.DrawMatchStep(colorImg1, colorImg2, matches);	
	waitKey(0);
	return 0;
}

 

你可能感兴趣的:(OpenCV)