本人邮箱[email protected],欢迎交流!
接着讲tracking.cc。
函数功能 | 判断是否需要生成新的关键帧,确定关键帧的标准 |
步骤 | 1. 在上一次进行重定位之后,过了20帧数据,或关键帧数小于20个,不满足不能生成 2. 在上一个关键帧插入之后,过了20帧,或局部建图是空闲状态,不满足不能生成。 3. 当前帧跟踪到大于若干个点,不满足不能生成 4. 当前帧的跟踪点数小于90%的参考关键帧跟踪点数,并且当前帧跟踪点数大于15,不满足不能生成 |
bool Tracking::NeedNewKeyFrame()
{
// 步骤1:如果用户在界面上选择重定位,那么将不插入关键帧
// 由于插入关键帧过程中会生成MapPoint,因此用户选择重定位后地图上的点云和关键帧都不会再增加
if(mbOnlyTracking)//如果仅跟踪,不选关键帧
return false;
//If Local Mapping is freezed by a Loop Closure do not insert keyframes
// 如果局部地图被闭环检测使用,则不插入关键帧
if(mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())
return false;
const int nKFs = mpMap->KeyFramesInMap();//关键帧数
// Do not insert keyframes if not enough frames have passed from last relocalisation
// 步骤2:判断是否距离上一次插入关键帧的时间太短
// mCurrentFrame.mnId是当前帧的ID
// mnLastRelocFrameId是最近一次重定位帧的ID
// mMaxFrames等于图像输入的帧率
// 如果关键帧比较少,则考虑插入关键帧
// 或距离上一次重定位超过1s,则考虑插入关键帧
if(mCurrentFrame.mnIdmMaxFrames)
return false;
// Tracked MapPoints in the reference keyframe
// 步骤3:得到参考关键帧跟踪到的MapPoints数量
// 在UpdateLocalKeyFrames函数中会将与当前关键帧共视程度最高的关键帧设定为当前帧的参考关键帧
int nMinObs = 3;
if(nKFs<=2)
nMinObs=2;
int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs);//获取参考关键帧跟踪到的MapPoints数量
// Local Mapping accept keyframes?
// 步骤4:查询局部地图管理器是否繁忙
bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames();
// Stereo & RGB-D: Ratio of close "matches to map"/"total matches"
//双目和RGBD:比率接近地图匹配数/总匹配数
// "total matches = matches to map + visual odometry matches"
//总匹配数=地图匹配数+视觉里程计匹配数
// Visual odometry matches will become MapPoints if we insert a keyframe.
// This ratio measures how many MapPoints we could create if we insert a keyframe.
//这个比率测量如果我们插入一个关键帧,我们可以创建多少个MapPoints
// 步骤5:对于双目或RGBD摄像头,统计总的可以添加的MapPoints数量和跟踪到地图中的MapPoints数量
int nMap = 0;//地图匹配数
int nTotal= 0;//总匹配数
if(mSensor!=System::MONOCULAR)// 双目或rgbd
{
for(int i =0; i0 && mCurrentFrame.mvDepth[i]Observations()>0)//mappoint能被观测
nMap++;// 被关键帧观测到的mappoints数,即观测到地图中的MapPoints数量
}
}
}
else
{
// There are no visual odometry matches in the monocular case
nMap=1;
nTotal=1;
}
const float ratioMap = (float)nMap/(float)(std::max(1,nTotal));
// 步骤6:决策是否需要插入关键帧
// Thresholds
// 设定inlier阈值,和之前帧特征点匹配的inlier比例
float thRefRatio = 0.75f;
if(nKFs<2)
thRefRatio = 0.4f;// 关键帧只有一帧,那么插入关键帧的阈值设置很低
if(mSensor==System::MONOCULAR)
thRefRatio = 0.9f;
// MapPoints中和地图关联的比例阈值
float thMapRatio = 0.35f;
if(mnMatchesInliers>300)
thMapRatio = 0.20f;
// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
// 很长时间没有插入关键帧
const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames;
// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
// localMapper处于空闲状态
const bool c1b = (mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames && bLocalMappingIdle);
// Condition 1c: tracking is weak
// 跟踪要跪的节奏,0.25和0.3是一个比较低的阈值
const bool c1c = mSensor!=System::MONOCULAR && (mnMatchesInliers15);
if((c1a||c1b||c1c)&&c2)
{
// If the mapping accepts keyframes, insert keyframe.
// Otherwise send a signal to interrupt BA
//如果mapping接受关键帧,则插入关键帧,否则发送信号到中断BA
if(bLocalMappingIdle)
{
return true;
}
else
{
mpLocalMapper->InterruptBA();//中断BA
if(mSensor!=System::MONOCULAR)
{
// 队列里不能阻塞太多关键帧
// tracking插入关键帧不是直接插入,而且先插入到mlNewKeyFrames中,
// 然后localmapper再逐个pop出来插入到mspKeyFrames
if(mpLocalMapper->KeyframesInQueue()<3)//队列中关键帧小于3
return true;
else
return false;
}
else
return false;
}
}
else
return false;
}
函数功能 | 生成新的关键帧 |
步骤 | 1:将当前帧构造成关键帧 2:将当前关键帧设置为当前帧的参考关键帧 3:对于双目或rgbd摄像头,为当前帧生成新的MapPoints |
void Tracking::CreateNewKeyFrame()
{
if(!mpLocalMapper->SetNotStop(true))
return;
// 步骤1:将当前帧构造成关键帧
KeyFrame* pKF = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);
// 步骤2:将当前关键帧设置为当前帧的参考关键帧
// 在UpdateLocalKeyFrames函数中会将与当前关键帧共视程度最高的关键帧设定为当前帧的参考关键帧
mpReferenceKF = pKF;
mCurrentFrame.mpReferenceKF = pKF;
// 这段代码和UpdateLastFrame中的那一部分代码功能相同
// 步骤3:对于双目或rgbd摄像头,为当前帧生成新的MapPoints
if(mSensor!=System::MONOCULAR)
{
// 根据Tcw计算mRcw、mtcw和mRwc、mOw
mCurrentFrame.UpdatePoseMatrices();
// We sort points by the measured depth by the stereo/RGBD sensor.
// We create all those MapPoints whose depth < mThDepth.
// If there are less than 100 close points we create the 100 closest.
// 步骤3.1:得到当前帧深度小于阈值的特征点
// 创建新的MapPoint, depth < mThDepth
vector > vDepthIdx;
vDepthIdx.reserve(mCurrentFrame.N);
for(int i=0; i0)
{
vDepthIdx.push_back(make_pair(z,i));
}
}
if(!vDepthIdx.empty())
{
// 步骤3.2:按照深度从小到大排序
sort(vDepthIdx.begin(),vDepthIdx.end());
// 步骤3.3:将距离比较近的点包装成MapPoints
int nPoints = 0;
for(size_t j=0; jObservations()<1)
{
bCreateNew = true;
mCurrentFrame.mvpMapPoints[i] = static_cast(NULL);
}
if(bCreateNew)
{
cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);
MapPoint* pNewMP = new MapPoint(x3D,pKF,mpMap);
// 这些添加属性的操作是每次创建MapPoint后都要做的
pNewMP->AddObservation(pKF,i);
pKF->AddMapPoint(pNewMP,i);
pNewMP->ComputeDistinctiveDescriptors();
pNewMP->UpdateNormalAndDepth();
mpMap->AddMapPoint(pNewMP);
mCurrentFrame.mvpMapPoints[i]=pNewMP;
nPoints++;
}
else
{
nPoints++;
}
// 这里决定了双目和rgbd摄像头时地图点云的稠密程度
// 但是仅仅为了让地图稠密直接改这些不太好,
// 因为这些MapPoints会参与之后整个slam过程
if(vDepthIdx[j].first>mThDepth && nPoints>100)
break;
}
}
}
mpLocalMapper->InsertKeyFrame(pKF);
mpLocalMapper->SetNotStop(false);
mnLastKeyFrameId = mCurrentFrame.mnId;
mpLastKeyFrame = pKF;
}
函数功能 | 在局部地图中查找在当前帧视野范围内的点,将视野范围内的点和当前帧的特征点进行投影匹配 |
步骤 | 1:遍历当前帧的mvpMapPoints,标记这些MapPoints不参与之后的搜索 2:将所有局部MapPoints投影到当前帧,判断是否在视野范围内,然后进行投影匹配 3:对于双目或rgbd摄像头,为当前帧生成新的MapPoints |
void Tracking::SearchLocalPoints()
{
// Do not search map points already matched
// 步骤1:遍历当前帧的mvpMapPoints,标记这些MapPoints不参与之后的搜索
// 因为当前的mvpMapPoints一定在当前帧的视野中
for(vector::iterator vit=mCurrentFrame.mvpMapPoints.begin(), vend=mCurrentFrame.mvpMapPoints.end(); vit!=vend; vit++)
{
MapPoint* pMP = *vit;
if(pMP)
{
if(pMP->isBad())
{
*vit = static_cast(NULL);
}
else
{
// 更新能观测到该点的帧数加1
pMP->IncreaseVisible();
// 标记该点被当前帧观测到
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
// 标记该点将来不被投影,因为已经匹配过
pMP->mbTrackInView = false;
}
}
}
int nToMatch=0;
// Project points in frame and check its visibility
// 步骤2:将所有局部MapPoints投影到当前帧,判断是否在视野范围内,然后进行投影匹配
for(vector::iterator vit=mvpLocalMapPoints.begin(), vend=mvpLocalMapPoints.end(); vit!=vend; vit++)
{
MapPoint* pMP = *vit;
// 已经被当前帧观测到MapPoint不再判断是否能被当前帧观测到
if(pMP->mnLastFrameSeen == mCurrentFrame.mnId)
continue;
if(pMP->isBad())
continue;
// Project (this fills MapPoint variables for matching)
// 步骤2.1:判断LocalMapPoints中的点是否在在视野内
if(mCurrentFrame.isInFrustum(pMP,0.5))
{
// 观测到该点的帧数加1,该MapPoint在某些帧的视野范围内
pMP->IncreaseVisible();
// 只有在视野范围内的MapPoints才参与之后的投影匹配
nToMatch++;
}
}
if(nToMatch>0)
{
ORBmatcher matcher(0.8);
int th = 1;
if(mSensor==System::RGBD)
th=3;
// If the camera has been relocalised recently, perform a coarser search
// 如果不久前进行过重定位,那么进行一个更加宽泛的搜索,阈值需要增大
if(mCurrentFrame.mnId
/**
* @brief 更新LocalMap
*
* 局部地图包括: \n
* - K1个关键帧、K2个临近关键帧和参考关键帧
* - 由这些关键帧观测到的MapPoints
*/
void Tracking::UpdateLocalMap()
{
// This is for visualization
// 这行程序放在UpdateLocalPoints函数后面是不是好一些
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
// Update
// 更新局部关键帧和局部MapPoints
UpdateLocalKeyFrames();
UpdateLocalPoints();
}
/**
* @brief 更新局部关键点,called by UpdateLocalMap()
*
* 局部关键帧mvpLocalKeyFrames的MapPoints,更新mvpLocalMapPoints
*/
void Tracking::UpdateLocalPoints()
{
// 步骤1:清空局部MapPoints
mvpLocalMapPoints.clear();
// 步骤2:遍历局部关键帧mvpLocalKeyFrames
for(vector::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
{
KeyFrame* pKF = *itKF;
const vector vpMPs = pKF->GetMapPointMatches();
// 步骤2:将局部关键帧的MapPoints添加到mvpLocalMapPoints
for(vector::const_iterator itMP=vpMPs.begin(), itEndMP=vpMPs.end(); itMP!=itEndMP; itMP++)
{
MapPoint* pMP = *itMP;
if(!pMP)
continue;
// mnTrackReferenceForFrame防止重复添加局部MapPoint
if(pMP->mnTrackReferenceForFrame==mCurrentFrame.mnId)
continue;
if(!pMP->isBad())
{
mvpLocalMapPoints.push_back(pMP);
pMP->mnTrackReferenceForFrame=mCurrentFrame.mnId;
}
}
}
}
/**
* @brief 更新局部关键帧,called by UpdateLocalMap()
*
* 遍历当前帧的MapPoints,将观测到这些MapPoints的关键帧和相邻的关键帧取出,更新mvpLocalKeyFrames
*/
void Tracking::UpdateLocalKeyFrames()
{
// Each map point vote for the keyframes in which it has been observed
// 步骤1:遍历当前帧的MapPoints,记录所有能观测到当前帧MapPoints的关键帧
map keyframeCounter;
for(int i=0; iisBad())
{
// 能观测到当前帧MapPoints的关键帧
const map observations = pMP->GetObservations();
for(map::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++)
keyframeCounter[it->first]++;
}
else
{
mCurrentFrame.mvpMapPoints[i]=NULL;
}
}
}
if(keyframeCounter.empty())
return;
int max=0;
KeyFrame* pKFmax= static_cast(NULL);
// 步骤2:更新局部关键帧(mvpLocalKeyFrames),添加局部关键帧有三个策略
// 先清空局部关键帧
mvpLocalKeyFrames.clear();
mvpLocalKeyFrames.reserve(3*keyframeCounter.size());
// All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points
// V-D K1: shares the map points with current frame
// 策略1:能观测到当前帧MapPoints的关键帧作为局部关键帧
for(map::const_iterator it=keyframeCounter.begin(), itEnd=keyframeCounter.end(); it!=itEnd; it++)
{
KeyFrame* pKF = it->first;
if(pKF->isBad())
continue;
if(it->second>max)
{
max=it->second;
pKFmax=pKF;
}
mvpLocalKeyFrames.push_back(it->first);
// mnTrackReferenceForFrame防止重复添加局部关键帧
pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
}
// Include also some not-already-included keyframes that are neighbors to already-included keyframes
// V-D K2: neighbors to K1 in the covisibility graph
// 策略2:与策略1得到的局部关键帧共视程度很高的关键帧作为局部关键帧
for(vector::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
{
// Limit the number of keyframes
if(mvpLocalKeyFrames.size()>80)
break;
KeyFrame* pKF = *itKF;
// 策略2.1:最佳共视的10帧
const vector vNeighs = pKF->GetBestCovisibilityKeyFrames(10);
for(vector::const_iterator itNeighKF=vNeighs.begin(), itEndNeighKF=vNeighs.end(); itNeighKF!=itEndNeighKF; itNeighKF++)
{
KeyFrame* pNeighKF = *itNeighKF;
if(!pNeighKF->isBad())
{
// mnTrackReferenceForFrame防止重复添加局部关键帧
if(pNeighKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pNeighKF);
pNeighKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;
break;
}
}
}
// 策略2.2:自己的子关键帧
const set spChilds = pKF->GetChilds();
for(set::const_iterator sit=spChilds.begin(), send=spChilds.end(); sit!=send; sit++)
{
KeyFrame* pChildKF = *sit;
if(!pChildKF->isBad())
{
if(pChildKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pChildKF);
pChildKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;
break;
}
}
}
// 策略2.3:自己的父关键帧
KeyFrame* pParent = pKF->GetParent();
if(pParent)
{
// mnTrackReferenceForFrame防止重复添加局部关键帧
if(pParent->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pParent);
pParent->mnTrackReferenceForFrame=mCurrentFrame.mnId;
break;
}
}
}
// V-D Kref: shares the most map points with current frame
// 步骤3:更新当前帧的参考关键帧,与自己共视程度最高的关键帧作为参考关键帧
if(pKFmax)
{
mpReferenceKF = pKFmax;
mCurrentFrame.mpReferenceKF = mpReferenceKF;
}
}
函数功能 | 重定位,从之前的关键帧中找出与当前帧之间拥有充足匹配点的候选帧,利用Ransac迭代,通过PnP求解位姿。 |
步骤 | 1. 先计算当前帧的BOW值,并从关键帧数据库中查找候选的匹配关键帧 2. 构建PnP求解器,标记杂点,准备好每个关键帧和当前帧的匹配点集 3. 用PnP算法求解位姿,进行若干次P4P Ransac迭代,并使用非线性最小二乘优化,直到发现一个有充足inliers支持的相机位置 4. 返回成功或失败 |
bool Tracking::Relocalization()
{
// Compute Bag of Words Vector
// 步骤1:计算当前帧特征点的Bow映射
mCurrentFrame.ComputeBoW();
// Relocalization is performed when tracking is lost当跟踪丢失执行重定位
// Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation
// 步骤2:找到与当前帧相似的候选关键帧
vector vpCandidateKFs = mpKeyFrameDB->DetectRelocalizationCandidates(&mCurrentFrame);
if(vpCandidateKFs.empty())//如果没找到候选关键帧,返回
return false;
const int nKFs = vpCandidateKFs.size();//候选关键帧个数
// We perform first an ORB matching with each candidat
// If enough matches are found we setup a PnP solver
//我们首先执行与每个候选匹配的ORB匹配
//如果找到足够的匹配,我们设置一个PNP解算器
ORBmatcher matcher(0.75,true);
vector vpPnPsolvers;
vpPnPsolvers.resize(nKFs);
vector > vvpMapPointMatches;
vvpMapPointMatches.resize(nKFs);
vector vbDiscarded;
vbDiscarded.resize(nKFs);
int nCandidates=0;
for(int i=0; iisBad())
vbDiscarded[i] = true;//去除不好的候选关键帧
else
{
// 步骤3:通过BoW进行匹配
int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]);
if(nmatches<15)//如果匹配点小于15剔除
{
vbDiscarded[i] = true;
continue;
}
else//用pnp求解
{
// 初始化PnPsolver
PnPsolver* pSolver = new PnPsolver(mCurrentFrame,vvpMapPointMatches[i]);
pSolver->SetRansacParameters(0.99,10,300,4,0.5,5.991);
vpPnPsolvers[i] = pSolver;
nCandidates++;
}
}
}
// Alternatively perform some iterations of P4P RANSAC可选地执行P4P RANSAC的一些迭代
// Until we found a camera pose supported by enough inliers直到早到符合很多内点的相机位置
bool bMatch = false;
ORBmatcher matcher2(0.9,true);
while(nCandidates>0 && !bMatch)
{
for(int i=0; i vbInliers;
int nInliers;
bool bNoMore;
// 步骤4:通过EPnP算法估计姿态
PnPsolver* pSolver = vpPnPsolvers[i];
cv::Mat Tcw = pSolver->iterate(5,bNoMore,vbInliers,nInliers);
// If Ransac reachs max. iterations discard keyframe
if(bNoMore)
{
vbDiscarded[i]=true;
nCandidates--;
}
// If a Camera Pose is computed, optimize
if(!Tcw.empty())
{
Tcw.copyTo(mCurrentFrame.mTcw);
set sFound;
const int np = vbInliers.size();//内点个数
for(int j=0; j(NULL);
// If few inliers, search by projection in a coarse window and optimize again
// 步骤6:如果内点较少,则通过投影的方式对之前未匹配的点进行匹配,再进行优化求解
if(nGood<50)
{
int nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,10,100);
if(nadditional+nGood>=50)
{
nGood = Optimizer::PoseOptimization(&mCurrentFrame);//优化
// If many inliers but still not enough, search by projection again in a narrower window
//如果许多内点仍然不够,则在较窄的窗口中再次用投影搜索
// the camera has been already optimized with many points
if(nGood>30 && nGood<50)
{
sFound.clear();
for(int ip =0; ip=50)
{
nGood = Optimizer::PoseOptimization(&mCurrentFrame);
for(int io =0; io=50)
{
bMatch = true;
break;
}
}
}
}
if(!bMatch)
{
return false;
}
else
{
mnLastRelocFrameId = mCurrentFrame.mnId;
return true;
}
}