#include
#include
#include
#include
#include
#include
#include "ORBextractor.h"
#include
using namespace cv;
using namespace std;
namespace ORB_SLAM2
{
const int PATCH_SIZE = 31;
const int HALF_PATCH_SIZE = 15;
const int EDGE_THRESHOLD = 19; //边界阈值
//灰度质心法(IC)计算特征的旋转
static float IC_Angle(const Mat& image, Point2f pt, const vector & u_max)
{
int m_01 = 0, m_10 = 0;
const uchar* center = &image.at (cvRound(pt.y), cvRound(pt.x)); //cvRound 返回跟参数最接近的整数值;
//我们要在一个圆域中算出m10和m01,计算步骤是先算出中间红线的m10,然后在平行于x轴算出m10和m01,一次计算相当于图像中的同个颜色的两个line。
// Treat the center line differently, v=0 横坐标:-15-----+15
for (int u = -HALF_PATCH_SIZE; u <= HALF_PATCH_SIZE; ++u)
m_10 += u * center[u];
// Go line by line in the circuI853lar patch
int step = (int)image.step1(); //opencv中概念,计算每行的元素个数
for (int v = 1; v <= HALF_PATCH_SIZE; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int d = u_max[v];
for (int u = -d; u <= d; ++u)
{
int val_plus = center[u + v*step], val_minus = center[u - v*step];
v_sum += (val_plus - val_minus);
m_10 += u * (val_plus + val_minus);
}
m_01 += v * v_sum;
}
//返回计算的角度
return fastAtan2((float)m_01, (float)m_10);
}
//弧度制与角度的转换
const float factorPI = (float)(CV_PI/180.f);
//计算描述子
static void computeOrbDescriptor(const KeyPoint& kpt,
const Mat& img, const Point* pattern,
uchar* desc)
{
float angle = (float)kpt.angle*factorPI;
float a = (float)cos(angle), b = (float)sin(angle);
const uchar* center = &img.at(cvRound(kpt.pt.y), cvRound(kpt.pt.x));
const int step = (int)img.step;
#define GET_VALUE(idx) \
center[cvRound(pattern[idx].x*b + pattern[idx].y*a)*step + \
cvRound(pattern[idx].x*a - pattern[idx].y*b)]
for (int i = 0; i < 32; ++i, pattern += 16)
{
。。。
}
#undef GET_VALUE
}
static int bit_pattern_31_[256*4] =
{
。。。。
};
ORBextractor::ORBextractor(int _nfeatures, float _scaleFactor, int _nlevels,
int _iniThFAST, int _minThFAST):
nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
iniThFAST(_iniThFAST), minThFAST(_minThFAST)
{
/***********确定每一层的特征点数,采用等比数列**************/
//定义每一层的尺度和逆尺度
mvScaleFactor.resize(nlevels);
mvLevelSigma2.resize(nlevels);
mvScaleFactor[0]=1.0f;
mvLevelSigma2[0]=1.0f;
for(int i=1; i= vmin; --v)
{
while (umax[v0] == umax[v0 + 1])
++v0;
umax[v] = v0;
++v0;
}
}
//计算每个关键点的角度
static void computeOrientation(const Mat& image, vector& keypoints, const vector& umax)
{
for (vector::iterator keypoint = keypoints.begin(),
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
{
keypoint->angle = IC_Angle(image, keypoint->pt, umax);
}
}
void ExtractorNode::DivideNode(ExtractorNode &n1, ExtractorNode &n2, ExtractorNode &n3, ExtractorNode &n4)
{
const int halfX = ceil(static_cast(UR.x-UL.x)/2);
const int halfY = ceil(static_cast(BR.y-UL.y)/2);
//Define boundaries of childs
n1.UL = UL;
n1.UR = cv::Point2i(UL.x+halfX,UL.y);
n1.BL = cv::Point2i(UL.x,UL.y+halfY);
n1.BR = cv::Point2i(UL.x+halfX,UL.y+halfY);
n1.vKeys.reserve(vKeys.size());
n2.UL = n1.UR;
n2.UR = UR;
n2.BL = n1.BR;
n2.BR = cv::Point2i(UR.x,UL.y+halfY);
n2.vKeys.reserve(vKeys.size());
n3.UL = n1.BL;
n3.UR = n1.BR;
n3.BL = BL;
n3.BR = cv::Point2i(n1.BR.x,BL.y);
n3.vKeys.reserve(vKeys.size());
n4.UL = n3.UR;
n4.UR = n2.BR;
n4.BL = n3.BR;
n4.BR = BR;
n4.vKeys.reserve(vKeys.size());
//Associate points to childs
for(size_t i=0;iN,则说明将所有节点再分裂一次可以达到要求。
//vSizeAndPointerToNode 是前面分裂出来的子节点(n1, n2, n3, n4)中可以分裂的节点。
//按照它们特征点的排序,先从特征点多的开始分裂,分裂的结果继续存储在 lNodes 中。每分裂一个节点都会进行一次判断,
//如果 lNodes 中的节点数量大于所需要的特征点数量,退出整个 while(!bFinish) 循环,如果进行了一次分裂,
//并没有增加节点数量,不玩了,退出整个 while(!bFinish) 循环。取出每一个节点(每个区域)对应的最大响应点,即我们确定的特征点。
vector ORBextractor::DistributeOctTree(const vector& vToDistributeKeys, const int &minX,
const int &maxX, const int &minY, const int &maxY, const int &N, const int &level)
{
// Compute how many initial nodes 计算有多少初始节点
const int nIni = round(static_cast(maxX-minX)/(maxY-minY)); //round四舍五入取整,水平划分格子的数量
const float hX = static_cast(maxX-minX)/nIni; //水平划分格子的宽度
list lNodes;
vector vpIniNodes;
vpIniNodes.resize(nIni);
for(int i=0; i(i),0);
ni.UR = cv::Point2i(hX*static_cast(i+1),0);
ni.BL = cv::Point2i(ni.UL.x,maxY-minY);
ni.BR = cv::Point2i(ni.UR.x,maxY-minY);
ni.vKeys.reserve(vToDistributeKeys.size());
lNodes.push_back(ni);
vpIniNodes[i] = &lNodes.back();
}
//Associate points to childs
for(size_t i=0;ivKeys.push_back(kp);
}
list::iterator lit = lNodes.begin();
while(lit!=lNodes.end())
{
if(lit->vKeys.size()==1)
{
lit->bNoMore=true;
lit++;
}
else if(lit->vKeys.empty())
lit = lNodes.erase(lit);
else
lit++;
}
bool bFinish = false;
int iteration = 0;
vector > vSizeAndPointerToNode;
vSizeAndPointerToNode.reserve(lNodes.size()*4);
// 根据兴趣点分布,利用N叉树方法对图像进行划分区域
while(!bFinish)
{
iteration++;
int prevSize = lNodes.size();
lit = lNodes.begin();
int nToExpand = 0;
vSizeAndPointerToNode.clear();
// 将目前的子区域经行划分
while(lit!=lNodes.end())
{
if(lit->bNoMore)
{
// If node only contains one point do not subdivide and continue
lit++;
continue;
}
else
{
// If more than one point, subdivide
ExtractorNode n1,n2,n3,n4;
lit->DivideNode(n1,n2,n3,n4); // 再细分成四个子区域
// Add childs if they contain points
if(n1.vKeys.size()>0)
{
lNodes.push_front(n1);
if(n1.vKeys.size()>1)
{
nToExpand++;
vSizeAndPointerToNode.push_back(make_pair(n1.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
if(n2.vKeys.size()>0)
{
lNodes.push_front(n2);
if(n2.vKeys.size()>1)
{
nToExpand++;
vSizeAndPointerToNode.push_back(make_pair(n2.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
if(n3.vKeys.size()>0)
{
lNodes.push_front(n3);
if(n3.vKeys.size()>1)
{
nToExpand++;
vSizeAndPointerToNode.push_back(make_pair(n3.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
if(n4.vKeys.size()>0)
{
lNodes.push_front(n4);
if(n4.vKeys.size()>1)
{
nToExpand++;
vSizeAndPointerToNode.push_back(make_pair(n4.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
lit=lNodes.erase(lit);
continue;
}
}
// Finish if there are more nodes than required features
// or all nodes contain just one point
if((int)lNodes.size()>=N || (int)lNodes.size()==prevSize)
{
bFinish = true;
}
// 当再划分之后所有的Node数大于要求数目时
else if(((int)lNodes.size()+nToExpand*3)>N)
{
while(!bFinish)
{
prevSize = lNodes.size();
vector > vPrevSizeAndPointerToNode = vSizeAndPointerToNode;
vSizeAndPointerToNode.clear();
// 对需要划分的部分进行排序, 即对兴趣点数较多的区域进行划分
sort(vPrevSizeAndPointerToNode.begin(),vPrevSizeAndPointerToNode.end());
for(int j=vPrevSizeAndPointerToNode.size()-1;j>=0;j--)
{
ExtractorNode n1,n2,n3,n4;
vPrevSizeAndPointerToNode[j].second->DivideNode(n1,n2,n3,n4);
// Add childs if they contain points
if(n1.vKeys.size()>0)
{
lNodes.push_front(n1);
if(n1.vKeys.size()>1)
{
vSizeAndPointerToNode.push_back(make_pair(n1.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
if(n2.vKeys.size()>0)
{
lNodes.push_front(n2);
if(n2.vKeys.size()>1)
{
vSizeAndPointerToNode.push_back(make_pair(n2.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
if(n3.vKeys.size()>0)
{
lNodes.push_front(n3);
if(n3.vKeys.size()>1)
{
vSizeAndPointerToNode.push_back(make_pair(n3.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
if(n4.vKeys.size()>0)
{
lNodes.push_front(n4);
if(n4.vKeys.size()>1)
{
vSizeAndPointerToNode.push_back(make_pair(n4.vKeys.size(),&lNodes.front()));
lNodes.front().lit = lNodes.begin();
}
}
lNodes.erase(vPrevSizeAndPointerToNode[j].second->lit);
if((int)lNodes.size()>=N)
break;
}
if((int)lNodes.size()>=N || (int)lNodes.size()==prevSize)
bFinish = true;
}
}
}
// Retain the best point in each node
// 保留每个区域响应值最大的一个兴趣点
vector vResultKeys;
vResultKeys.reserve(nfeatures);
for(list::iterator lit=lNodes.begin(); lit!=lNodes.end(); lit++)
{
vector &vNodeKeys = lit->vKeys;
cv::KeyPoint* pKP = &vNodeKeys[0];
float maxResponse = pKP->response;
for(size_t k=1;kmaxResponse)
{
pKP = &vNodeKeys[k];
maxResponse = vNodeKeys[k].response;
}
}
vResultKeys.push_back(*pKP);
}
return vResultKeys;
}
//对影像金字塔中的每一层图像进行特征点的计算。具体计算过程是将影像网格分割成小区域,每一个小区域独立使用FAST角点检测
//检测完成之后使用DistributeOcTree函数对检测到所有的角点进行筛选,使得角点分布均匀
void ORBextractor::ComputeKeyPointsOctTree(vector >& allKeypoints)
{
allKeypoints.resize(nlevels);
const float W = 30;//窗口大小
// 对每一层图像做处理
for (int level = 0; level < nlevels; ++level) //计算边界
{
const int minBorderX = EDGE_THRESHOLD-3; //裁边19-3=16,
const int minBorderY = minBorderX;
const int maxBorderX = mvImagePyramid[level].cols-EDGE_THRESHOLD+3;
const int maxBorderY = mvImagePyramid[level].rows-EDGE_THRESHOLD+3;
vector vToDistributeKeys;
vToDistributeKeys.reserve(nfeatures*10);
//宽度和高度
const float width = (maxBorderX-minBorderX);
const float height = (maxBorderY-minBorderY);
//宽高方向的网格数量
const int nCols = width/W;
const int nRows = height/W;
//网格的宽与高
const int wCell = ceil(width/nCols);
const int hCell = ceil(height/nRows);
//遍历每一个窗口
for(int i=0; i=maxBorderY-3)
continue;
//最大Y超出边界就使用计算最宽的边界
if(maxY>maxBorderY)
maxY = maxBorderY;
//计算每列的位置
for(int j=0; j=maxBorderX-6)
continue;
if(maxX>maxBorderX)
maxX = maxBorderX;
// FAST提取兴趣点, 自适应阈值
vector vKeysCell;
FAST(mvImagePyramid[level].rowRange(iniY,maxY).colRange(iniX,maxX),
vKeysCell,iniThFAST,true);
//如果没有找到FAST特征点,就降低阈值重新计算
if(vKeysCell.empty())
{
FAST(mvImagePyramid[level].rowRange(iniY,maxY).colRange(iniX,maxX),
vKeysCell,minThFAST,true);
}
//找到特征点,就将其放到vToDistributeKeys
if(!vKeysCell.empty())
{
for(vector::iterator vit=vKeysCell.begin(); vit!=vKeysCell.end();vit++)
{
(*vit).pt.x+=j*wCell;
(*vit).pt.y+=i*hCell;
vToDistributeKeys.push_back(*vit);
}
}
}
}
vector & keypoints = allKeypoints[level];
keypoints.reserve(nfeatures);
// 根据mnFeaturesPerLevel,即该层的兴趣点数,对特征点进行剔除,采用Harris角点的score进行排序
keypoints = DistributeOctTree(vToDistributeKeys, minBorderX, maxBorderX,
minBorderY, maxBorderY,mnFeaturesPerLevel[level], level);
const int scaledPatchSize = PATCH_SIZE*mvScaleFactor[level];
// Add border to coordinates and scale information
const int nkps = keypoints.size();
for(int i=0; i > &allKeypoints)
{
allKeypoints.resize(nlevels);
float imageRatio = (float)mvImagePyramid[0].cols/mvImagePyramid[0].rows;//图像纵横比
for (int level = 0; level < nlevels; ++level)
{
const int nDesiredFeatures = mnFeaturesPerLevel[level];
const int levelCols = sqrt((float)nDesiredFeatures/(5*imageRatio)); //论文中提到的每个网格5个点吗?
const int levelRows = imageRatio*levelCols;
//得到每一层图像进行特征检测区域的上下两个坐标
const int minBorderX = EDGE_THRESHOLD;
const int minBorderY = minBorderX;
const int maxBorderX = mvImagePyramid[level].cols-EDGE_THRESHOLD;
const int maxBorderY = mvImagePyramid[level].rows-EDGE_THRESHOLD;
//将待检测区域划分为格子的行列个数
const int W = maxBorderX - minBorderX;
const int H = maxBorderY - minBorderY;
const int cellW = ceil((float)W/levelCols);
const int cellH = ceil((float)H/levelRows);
const int nCells = levelRows*levelCols;
const int nfeaturesCell = ceil((float)nDesiredFeatures/nCells);//每一个cell中特征点的个数
//Vector v(n,i),向量V中含有n个值为i 的元素 means cellKeypoint has levelRows层,每一层中又有levelCols层,均初始化为0
vector > > cellKeyPoints(levelRows, vector >(levelCols));
vector > nToRetain(levelRows,vector(levelCols,0));
vector > nTotal(levelRows,vector(levelCols,0));
vector > bNoMore(levelRows,vector(levelCols,false));
vector iniXCol(levelCols);
vector iniYRow(levelRows);
int nNoMore = 0;
int nToDistribute = 0;
float hY = cellH + 6;
for(int i=0; i iniYRow(levelRows)
if(i == levelRows-1)//如果循环到最后一个
{
hY = maxBorderY+3-iniY;//hY=3+Ymax-iniY=3+Ymax-(Ymin+(levelRows-1)*cellH -3)=6+Ymax-Ymin-H+cellH=cellH+6
if(hY<=0) //hY牵扯到后面cellimage的大小 范围从iniY到 iniY+hY,不可能为负值
continue; //continue 只管for、while,不看if,不管多少if都直接无视;如果小于直接跳出本次循环,根据上一个注释的式子,正常是不会小于的
}
float hX = cellW + 6;
for(int j=0; jnfeaturesCell) //网格中的关键点比需要的要多
{
nToRetain[i][j] = nfeaturesCell; //保存预先计算好的关键点
bNoMore[i][j] = false;
}
else
{
nToRetain[i][j] = nKeys;
nToDistribute += nfeaturesCell-nKeys;
bNoMore[i][j] = true;
nNoMore++;
}
}
}
// Retain by score 如果 总共的离散点数大于0并且 未达到阈值的cell数目比总共的格网数小;直到不需要离散 不需要加点为止
while(nToDistribute>0 && nNoMorenNewFeaturesCell) //总数目甚至比新的要求的点数还要多(当所有cell都执行这个条件语句,while循环就可以终止了)
{
nToRetain[i][j] = nNewFeaturesCell;//只保存新要求的点的数目即可
bNoMore[i][j] = false;
}
else
{
nToRetain[i][j] = nTotal[i][j];
nToDistribute += nNewFeaturesCell-nTotal[i][j];//还要离散的点的数目
bNoMore[i][j] = true; //还需要在加点
nNoMore++;
}
}
}
}
}
vector & keypoints = allKeypoints[level];
keypoints.reserve(nDesiredFeatures*2);
const int scaledPatchSize = PATCH_SIZE*mvScaleFactor[level];
// Retain by score and transform coordinates 换算特征点真实位置(添加边界值),添加特征点的尺度信息
for(int i=0; i &keysCell = cellKeyPoints[i][j];
KeyPointsFilter::retainBest(keysCell,nToRetain[i][j]);//保存最佳点
if((int)keysCell.size()>nToRetain[i][j])
keysCell.resize(nToRetain[i][j]);
for(size_t k=0, kend=keysCell.size(); knDesiredFeatures)
{
KeyPointsFilter::retainBest(keypoints,nDesiredFeatures);
keypoints.resize(nDesiredFeatures);
}
}
// and compute orientations
for (int level = 0; level < nlevels; ++level)
computeOrientation(mvImagePyramid[level], allKeypoints[level], umax);
}
//计算描述子
static void computeDescriptors(const Mat& image, vector& keypoints, Mat& descriptors,
const vector& pattern)
{
descriptors = Mat::zeros((int)keypoints.size(), 32, CV_8UC1);
for (size_t i = 0; i < keypoints.size(); i++)
computeOrbDescriptor(keypoints[i], image, &pattern[0], descriptors.ptr((int)i));
}
//输入的变量
// _image:获取的灰度图像
// _mask:掩码
// _keypoints:关键点
// _descriptors:描述子
//括号运算符输入图像,并且传入引用参数_keypoints,_descriptors用于计算得到的特征点及其描述子
// 这种设计使得只需要构造一次ORBextractor就可以为为所有图像生成特征点
void ORBextractor::operator()( InputArray _image, InputArray _mask, vector& _keypoints,
OutputArray _descriptors)
{
if(_image.empty())
return;
Mat image = _image.getMat();
assert(image.type() == CV_8UC1 ); //若错误则终止程序
// Pre-compute the scale pyramid
// 构建图像金字塔
ComputePyramid(image);
// 计算每层图像的兴趣点
vector < vector > allKeypoints; // vector>
ComputeKeyPointsOctTree(allKeypoints);
//ComputeKeyPointsOld(allKeypoints);
Mat descriptors;
int nkeypoints = 0;
for (int level = 0; level < nlevels; ++level)
nkeypoints += (int)allKeypoints[level].size();
if( nkeypoints == 0 )
_descriptors.release();
else
{
_descriptors.create(nkeypoints, 32, CV_8U);
descriptors = _descriptors.getMat();
}
_keypoints.clear();
_keypoints.reserve(nkeypoints);
int offset = 0;
for (int level = 0; level < nlevels; ++level)
{
vector& keypoints = allKeypoints[level];
int nkeypointsLevel = (int)keypoints.size();
if(nkeypointsLevel==0)
continue;
// preprocess the resized image 对图像进行高斯模糊
Mat workingMat = mvImagePyramid[level].clone();
GaussianBlur(workingMat, workingMat, Size(7, 7), 2, 2, BORDER_REFLECT_101);
// Compute the descriptors 计算描述子,采用高斯分布取点,就是上面的patten
Mat desc = descriptors.rowRange(offset, offset + nkeypointsLevel);
computeDescriptors(workingMat, keypoints, desc, pattern);
offset += nkeypointsLevel;
// Scale keypoint coordinates对关键点的位置坐做尺度恢复,恢复到原图的位置
if (level != 0)
{
float scale = mvScaleFactor[level]; //getScale(level, firstLevel, scaleFactor);
for (vector::iterator keypoint = keypoints.begin(),
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
keypoint->pt *= scale;
}
//在_keypoints.end()前面插入区间keypoints.begin(), keypoints.end()的所有元素
// And add the keypoints to the output
_keypoints.insert(_keypoints.end(), keypoints.begin(), keypoints.end());
}
}
/**
* 构建图像金字塔
* @param image 输入图像
*/
void ORBextractor::ComputePyramid(cv::Mat image)
{
for (int level = 0; level < nlevels; ++level)
{
float scale = mvInvScaleFactor[level];
Size sz(cvRound((float)image.cols*scale), cvRound((float)image.rows*scale));
Size wholeSize(sz.width + EDGE_THRESHOLD*2, sz.height + EDGE_THRESHOLD*2);
Mat temp(wholeSize, image.type()), masktemp;
mvImagePyramid[level] = temp(Rect(EDGE_THRESHOLD, EDGE_THRESHOLD, sz.width, sz.height));
// Compute the resized image
if( level != 0 )
{
resize(mvImagePyramid[level-1], mvImagePyramid[level], sz, 0, 0, cv::INTER_LINEAR);
copyMakeBorder(mvImagePyramid[level], temp, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD,
BORDER_REFLECT_101+BORDER_ISOLATED);
}
else
{
copyMakeBorder(image, temp, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD, EDGE_THRESHOLD,
BORDER_REFLECT_101);
}
}
}
} //namespace ORB_SLAM