对于具有重复区域的两张至多张图像的拼接过程主要分为四步:
基于尺度不变特征的图像配准算法目前只有SIFT和SURF算法。集合了FAST算法和BRIEF算法优点的ORB也不能解决尺度变化问题。对于图像的特征检测和特征匹配部分,第二部分基于opencv3.1的特征检测、特征点匹配、图像拼接(二)已经讲述的很清楚,本文不再赘述。
目前我们已经得到两幅图像间大量的匹配点对,每一对点代表了两幅图的对应关系。
基于重复区域的图像拼接本质上依然是图像匹配,也就是说在图像A中的一部分区域通过一定的旋转变换,平移变换和仿射变换之后出现在了图像B的某个位置上。对于巡航无人车工况来说,采集到的多幅图像中的重复区域是因拍摄角度,视点的不同出现在了不同图像的不同区域中。如果将重复区域之间因视角不同产生的变换计算出来,得到一个确定的变换矩阵,就可以把这样的变换关系推广到整个图像中,认为图像A和B之间符合这样的关系。通过这样的方式,多幅视野有限的图像就可以拼接在一起。
因此,可以看出把这个变换矩阵计算出来将是最为关键的一步,其实也是整个图像拼接部分最终目的所在。
在实际工况中,多摄像头采集到的图像间往往包含了缩放变换,平移变换,投影变换以及一定程度的仿射变换;仿射变换可以看做是线性变换和平移变换的叠加,而所有的线性变换从本质上都可以归纳到投影变换的模型中;对于投影变换来说,模型可以通过一个三维的方阵来描述,由于这个矩阵是对应于齐次坐标(N+1维向量代表N维)的变换阵,可以被归一化为8个独立参数。理论上,四个匹配点对就可以确定唯一的一个投影变换矩阵,但由于这样存在误差往往使用大量的匹配点对进行拟合。在opencv中,有这样的接口完成了这个运算:
perspectiveTransform
void perspectiveTransform(InputArray src, OutputArray dst, InputArray m)
对于通过多于四个匹配点对求取单应性矩阵的拟合问题,目的是使投影误差最小,这时拟合矩阵成为了一个非线性的最优化问题。这里opencv提供了另外一个接口可以直接调用完成运算:
findHomography
Mat findHomography(InputArray srcPoints, InputArray dstPoints, int method=0, double ransacReprojThreshold=3, OutputArray mask=noArray() )
这里给出了opencv3.0及以上关于这两个函数给出的例程源码:
#include
#include
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
/* @function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
/* @function readme */
void readme()
{ std::cout << " Usage: ./SURF_descriptor " << std::endl; }
运行后我们惊奇地发现竟然报错,原因是官方网站还没有对于opencv3.0及以上的代码完全更新,在这里我们知道,对于3.0以上的版本,surf描述子提取器还有海塞矩阵的定义已经不能再用老方法了(这里有问题请参考前两篇文章)。所以更改之后的代码为:
#include
#include
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
/* @function main */
int main(int argc, char** argv)
{
Mat img_object = imread("box.png", IMREAD_GRAYSCALE);
Mat img_scene = imread("box_in_scene.png", IMREAD_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 2000;
Ptr<SURF> detector = SURF::create(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect(img_object, keypoints_object);
detector->detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
Ptr<SURF>extractor = SURF::create();
Mat descriptors_object, descriptors_scene;
extractor->compute(img_object, keypoints_object, descriptors_object);
extractor->compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, RANSAC);
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
//-- Show detected matches
imshow("Good Matches & Object detection", img_matches);
waitKey(0);
return 0;
}
/* @function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor " << std::endl;
}
注意:
Ptr detector = SURF::create(minHessian)
;detector->detect,extractor->compute
;#include "opencv2/imgproc.hpp"
;Mat、Mat_、Mat_、vector、vector>、vector
,只有作为函数的形参才会使用。