stitching系列是我在看过zhaocj(https://blog.csdn.net/zhaocj/article/details/78798687)大佬的这一系列之后,决定向其学习,更新一版属于我自己的Opencv 4.0.0版本的Stitching系列。
本人是图像拼接领域的小白,讲述过程中如有不对,希望大家批评指正。
本次主题——特征点提取
1.1 原理
参考我的github:https://github.com/CeciliaPYY/road2beEngineer/blob/master/duNiang/Sift.md
1.2 源码
Feature2D 表示寻找图像特征的类
class CV_EXPORTS_W Feature2D : public virtual Algorithm
#endif
{
public:
virtual ~Feature2D();
/** @brief Detects keypoints in an image (first variant) or image set (second variant). detect 方法用来检测输入图像或者图像集合上的特征点
@param image Image. 待寻找特征的输入图像或者图像集合
@param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 图像或者图像集合上找到的关键点或关键点集合
of keypoints detected in images[i] .
@param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer mask矩阵用来确定关键点寻找范围,其中非0代表感兴趣区域
matrix with non-zero values in the region of interest.
*/
CV_WRAP virtual void detect( InputArray image,
CV_OUT std::vector& keypoints,
InputArray mask=noArray() );
/** @overload
@param images Image set.
@param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set
of keypoints detected in images[i] .
@param masks Masks for each input image specifying where to look for keypoints (optional).
masks[i] is a mask for images[i].
*/
CV_WRAP virtual void detect( InputArrayOfArrays images,
CV_OUT std::vector >& keypoints,
InputArrayOfArrays masks=noArray() );
/** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set
(second variant). compute方法用来计算输入图像或图像集合上的特征描述子或特征描述子集合
@param image Image.
@param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be
computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint
with several dominant orientations (for each orientation).
@param descriptors Computed descriptors. In the second variant of the method descriptors[i] are
descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the
descriptor for keypoint j-th keypoint.
*/
CV_WRAP virtual void compute( InputArray image,
CV_OUT CV_IN_OUT std::vector& keypoints,
OutputArray descriptors );
/** @overload
@param images Image set.
@param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be
computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 除了算子自己找到的关键点,用户可以自行加入新的关键点
with several dominant orientations (for each orientation).
@param descriptors Computed descriptors. In the second variant of the method descriptors[i] are
descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the
descriptor for keypoint j-th keypoint.
*/
CV_WRAP virtual void compute( InputArrayOfArrays images,
CV_OUT CV_IN_OUT std::vector >& keypoints,
OutputArrayOfArrays descriptors );
/** Detects keypoints and computes the descriptors */
同时定位关键点并计算特征描述子
CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,
CV_OUT std::vector& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints=false );
CV_WRAP virtual int descriptorSize() const;
CV_WRAP virtual int descriptorType() const;
CV_WRAP virtual int defaultNorm() const;
CV_WRAP void write( const String& fileName ) const;
CV_WRAP void read( const String& fileName );
virtual void write( FileStorage&) const CV_OVERRIDE;
// see corresponding cv::Algorithm method
CV_WRAP virtual void read( const FileNode&) CV_OVERRIDE;
//! Return true if detector object is empty
CV_WRAP virtual bool empty() const CV_OVERRIDE;
CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
// see corresponding cv::Algorithm method
CV_WRAP inline void write(const Ptr& fs, const String& name = String()) const { Algorithm::write(fs, name); }
};
嗯,从上面的代码看来,之前调研的好几个闭源拼图软件都可能是opencv开发的,因为其中有添加关键点的接口。
SIFT_Impl 类
/*!
SIFT implementation.
The class implements SIFT algorithm by D. Lowe.
*/
class SIFT_Impl : public SIFT
{
public:
explicit SIFT_Impl( int nfeatures = 0, int nOctaveLayers = 3,
double contrastThreshold = 0.04, double edgeThreshold = 10,
double sigma = 1.6);
//! returns the descriptor size in floats (128)
int descriptorSize() const CV_OVERRIDE;
//! returns the descriptor type
int descriptorType() const CV_OVERRIDE;
//! returns the default norm type
int defaultNorm() const CV_OVERRIDE;
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
// sift算法可以检测关键点,并同时为检测到的关键点以及用户自定义关键点计算特征向量
void detectAndCompute(InputArray img, InputArray mask,
std::vector& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints = false) CV_OVERRIDE;
// 构建高斯金字塔
void buildGaussianPyramid( const Mat& base, std::vector& pyr, int nOctaves ) const;
// 构建差分金字塔
void buildDoGPyramid( const std::vector& pyr, std::vector& dogpyr ) const;
// 计算空间极值点
void findScaleSpaceExtrema( const std::vector& gauss_pyr, const std::vector& dog_pyr,
std::vector& keypoints ) const;
protected:
CV_PROP_RW int nfeatures;
CV_PROP_RW int nOctaveLayers;
CV_PROP_RW double contrastThreshold;
CV_PROP_RW double edgeThreshold;
CV_PROP_RW double sigma;
};
具体看每个类成员函数的实现,
SIFT::create
Ptr SIFT::create( int _nfeatures, int _nOctaveLayers,
double _contrastThreshold, double _edgeThreshold, double _sigma )
{
// 建立一个指向 SIFT_Impl 类的指针,并传入相应的参数
return makePtr(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma);
}
SIFT_Impl::detectAndCompute
void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask,
std::vector& keypoints,
OutputArray _descriptors,
bool useProvidedKeypoints)
{
int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.empty() || image.depth() != CV_8U )
CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
if( useProvidedKeypoints )
{
// 使用用户自行提供的关键点
firstOctave = 0;
int maxOctave = INT_MIN;
for( size_t i = 0; i < keypoints.size(); i++ )
{
int octave, layer;
float scale;
unpackOctave(keypoints[i], octave, layer, scale);
firstOctave = std::min(firstOctave, octave);
maxOctave = std::max(maxOctave, octave);
actualNLayers = std::max(actualNLayers, layer-2);
}
firstOctave = std::min(firstOctave, 0);
CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
actualNOctaves = maxOctave - firstOctave + 1;
}
// 建立原始图片,主要是对原始图片进行高斯滤波
Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
std::vector gpyr, dogpyr;
int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave;
//double t, tf = getTickFrequency();
//t = (double)getTickCount();
// 建立高斯金字塔
buildGaussianPyramid(base, gpyr, nOctaves);
// 建立差分金字塔
buildDoGPyramid(gpyr, dogpyr);
//t = (double)getTickCount() - t;
//printf("pyramid construction time: %g\n", t*1000./tf);
if( !useProvidedKeypoints )
{
//t = (double)getTickCount();
// 在差分金字塔中寻找极值点,并删除那些对比度强的点和边缘点,计算梯度统计直方图,并计算关键点的梯度、梯度方向以及权重
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
KeyPointsFilter::removeDuplicatedSorted( keypoints );
if( nfeatures > 0 )
KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t;
//printf("keypoint detection time: %g\n", t*1000./tf);
if( firstOctave < 0 )
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint& kpt = keypoints[i];
float scale = 1.f/(float)(1 << -firstOctave);
kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
kpt.pt *= scale;
kpt.size *= scale;
}
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
else
{
// filter keypoints by mask
// 用 mask 对关键点进行筛选
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
if( _descriptors.needed() )
{
//t = (double)getTickCount();
int dsize = descriptorSize();
_descriptors.create((int)keypoints.size(), dsize, CV_32F);
Mat descriptors = _descriptors.getMat();
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
//t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf);
}
}
1.3 应用
//
// main.cpp
// learn_from_scratch
//
// Created by Peng,Yuyan on 2019/1/18.
// Copyright © 2019年 Peng,Yuyan. All rights reserved.
//
#include
#include
#include
#include
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core.hpp"
#include
#include
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
using namespace cv;
using namespace cv::xfeatures2d;
using std::cout;
using std::endl;
#define Error -1
int main(){
Mat img1, img2;
img1 = imread("/Users/pengyuyan/Desktop/test_csdn/GSP_test_result/baidu-girl-1/pic_4.jpeg", IMREAD_GRAYSCALE);
img2 = imread("/Users/pengyuyan/Desktop/test_csdn/GSP_test_result/baidu-girl-1/pic_5.jpeg", IMREAD_GRAYSCALE);
// if (img1.empty() || img2.empty())
if (img1.empty())
{
cout << "Error reading image" << endl;
return Error;
}
// 1. 尝试展示原图
// namedWindow("Image show");
// imshow("Image show", img_org);
// waitKey(0);
// 2. 尝试使用 detectAndCompute drawKeypoints
// Ptr detector = SIFT::create();
Ptr detector = SURF::create();
std::vector keypoints1, keypoints2;
Mat descriptors1, descriptors2;
// 2.1 检测一下关键点
detector->detectAndCompute(img1, noArray(), keypoints1, descriptors1);
detector->detectAndCompute(img2, noArray(), keypoints2, descriptors2);
// 2.2 描绘一下关键点
Mat img_keypoints;
drawKeypoints(img1, keypoints1, img_keypoints);
namedWindow("Keypoints show");
imshow("Keypoints show", img_keypoints);
waitKey(0);
// 3. 进行一下关键点匹配
Ptr matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
std::vector> knn_matches;
matcher->knnMatch(descriptors1, descriptors2, knn_matches, 2);
// 4. 筛选出不符合条件的匹配
const float ratio_thresh = 0.7f;
std::vector good_matches;
for(int i=0; i < knn_matches.size(); i++)
{
if(knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
{
good_matches.push_back(knn_matches[i][0]);
}
}
// 5. 画出符合条件的匹配
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), std::vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Good Matches", img_matches);
waitKey();
//
return 0;
}