Tracking类主要需要搞懂四种跟踪模式:
Tracking.h
/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal (University of Zaragoza)
* For more information see
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see .
*/
#ifndef TRACKING_H
#define TRACKING_H
#include
#include
#include"Viewer.h"
#include"FrameDrawer.h"
#include"Map.h"
#include"LocalMapping.h"
#include"LoopClosing.h"
#include"Frame.h"
#include "ORBVocabulary.h"
#include"KeyFrameDatabase.h"
#include"ORBextractor.h"
#include "Initializer.h"
#include "MapDrawer.h"
#include "System.h"
#include
namespace ORB_SLAM2
{
class Viewer;
class FrameDrawer;
class Map;
class LocalMapping;
class LoopClosing;
class System;
class Tracking
{
public:
Tracking(System* pSys, ORBVocabulary* pVoc, FrameDrawer* pFrameDrawer, MapDrawer* pMapDrawer, Map* pMap,
KeyFrameDatabase* pKFDB, const string &strSettingPath, const int sensor);
// Preprocess the input and call Track(). Extract features and performs stereo matching.
cv::Mat GrabImageStereo(const cv::Mat &imRectLeft,const cv::Mat &imRectRight, const double ×tamp);
cv::Mat GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const double ×tamp);
cv::Mat GrabImageMonocular(const cv::Mat &im, const double ×tamp);
void SetLocalMapper(LocalMapping* pLocalMapper);
void SetLoopClosing(LoopClosing* pLoopClosing);
void SetViewer(Viewer* pViewer);
// Load new settings
// The focal lenght should be similar or scale prediction will fail when projecting points
// TODO: Modify MapPoint::PredictScale to take into account focal lenght
void ChangeCalibration(const string &strSettingPath);
// Use this function if you have deactivated local mapping and you only want to localize the camera.
/**只根据已有的地图进行定位,不进行LocalMapping */
void InformOnlyTracking(const bool &flag);
public:
// Tracking states
enum eTrackingState{
SYSTEM_NOT_READY=-1,
NO_IMAGES_YET=0,
NOT_INITIALIZED=1,
OK=2,
LOST=3
};
eTrackingState mState;
eTrackingState mLastProcessedState;
// Input sensor
int mSensor;
// Current Frame
Frame mCurrentFrame;
cv::Mat mImGray;
// Initialization Variables (Monocular)
std::vector<int> mvIniLastMatches;
/// a vector restore init matched key point id
std::vector<int> mvIniMatches;
std::vector<cv::Point2f> mvbPrevMatched;
std::vector<cv::Point3f> mvIniP3D;
Frame mInitialFrame;
// Lists used to recover the full camera trajectory at the end of the execution.
// Basically we store the reference keyframe for each frame and its relative transformation
///储存了每一帧的参考帧和其相对于参考帧的变化,是为了恢复整个相机的轨迹
list<cv::Mat> mlRelativeFramePoses;
list<KeyFrame*> mlpReferences;
list<double> mlFrameTimes;
list<bool> mlbLost;
// True if local mapping is deactivated and we are performing only localization
bool mbOnlyTracking;
void Reset();
protected:
// Main tracking function. It is independent of the input sensor.
void Track();
// Map initialization for stereo and RGB-D
void StereoInitialization();
// Map initialization for monocular
void MonocularInitialization();
void CreateInitialMapMonocular();
void CheckReplacedInLastFrame();
bool TrackReferenceKeyFrame();
void UpdateLastFrame();
bool TrackWithMotionModel();
bool Relocalization();
void UpdateLocalMap();
void UpdateLocalPoints();
void UpdateLocalKeyFrames();
bool TrackLocalMap();
void SearchLocalPoints();
bool NeedNewKeyFrame();
void CreateNewKeyFrame();
// In case of performing only localization, this flag is true when there are no matches to
// points in the map. Still tracking will continue if there are enough matches with temporal points.
// In that case we are doing visual odometry. The system will try to do relocalization to recover
// "zero-drift" localization to the map.
bool mbVO;
//Other Thread Pointers
LocalMapping* mpLocalMapper;
LoopClosing* mpLoopClosing;
//ORB
ORBextractor* mpORBextractorLeft, *mpORBextractorRight;
ORBextractor* mpIniORBextractor;
//BoW
ORBVocabulary* mpORBVocabulary;
KeyFrameDatabase* mpKeyFrameDB;
// Initalization (only for monocular)
Initializer* mpInitializer;
//Local Map
KeyFrame* mpReferenceKF;
std::vector<KeyFrame*> mvpLocalKeyFrames;
std::vector<MapPoint*> mvpLocalMapPoints;
// System
System* mpSystem;
//Drawers
Viewer* mpViewer;
FrameDrawer* mpFrameDrawer;
MapDrawer* mpMapDrawer;
//Map
Map* mpMap;
//Calibration matrix
cv::Mat mK;
cv::Mat mDistCoef;
float mbf;
//New KeyFrame rules (according to fps)
int mMinFrames;
int mMaxFrames;
// Threshold close/far points
// Points seen as close by the stereo/RGBD sensor are considered reliable
// and inserted from just one frame. Far points requiere a match in two keyframes.
float mThDepth;
// For RGB-D inputs only. For some datasets (e.g. TUM) the depthmap values are scaled.
float mDepthMapFactor;
//Current matches in frame
int mnMatchesInliers;
//Last Frame, KeyFrame and Relocalisation Info
KeyFrame* mpLastKeyFrame;
Frame mLastFrame;
unsigned int mnLastKeyFrameId;
unsigned int mnLastRelocFrameId;
//Motion Model
cv::Mat mVelocity;
//Color order (true RGB, false BGR, ignored if grayscale)
bool mbRGB;
list<MapPoint*> mlpTemporalPoints;
};
} //namespace ORB_SLAM
#endif // TRACKING_H
Tracking.cpp
/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal (University of Zaragoza)
* For more information see
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see .
*/
#include "Tracking.h"
#include
#include
#include"ORBmatcher.h"
#include"FrameDrawer.h"
#include"Converter.h"
#include"Map.h"
#include"Initializer.h"
#include"Optimizer.h"
#include"PnPsolver.h"
#include
#include
using namespace std;
namespace ORB_SLAM2
{
/**
* 1.加载相机的参数;
* 2.创建ORBextractor
*/
Tracking::Tracking(System *pSys, ORBVocabulary* pVoc, FrameDrawer *pFrameDrawer, MapDrawer *pMapDrawer, Map *pMap, KeyFrameDatabase* pKFDB, const string &strSettingPath, const int sensor):
mState(NO_IMAGES_YET), mSensor(sensor), mbOnlyTracking(false), mbVO(false), mpORBVocabulary(pVoc),
mpKeyFrameDB(pKFDB), mpInitializer(static_cast<Initializer*>(NULL)), mpSystem(pSys), mpViewer(NULL),
mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpMap(pMap), mnLastRelocFrameId(0)
{
// Load camera parameters from settings file
cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);
float fx = fSettings["Camera.fx"];
float fy = fSettings["Camera.fy"];
float cx = fSettings["Camera.cx"];
float cy = fSettings["Camera.cy"];
cv::Mat K = cv::Mat::eye(3,3,CV_32F);
K.at<float>(0,0) = fx;
K.at<float>(1,1) = fy;
K.at<float>(0,2) = cx;
K.at<float>(1,2) = cy;
K.copyTo(mK);
cv::Mat DistCoef(4,1,CV_32F);
DistCoef.at<float>(0) = fSettings["Camera.k1"];
DistCoef.at<float>(1) = fSettings["Camera.k2"];
DistCoef.at<float>(2) = fSettings["Camera.p1"];
DistCoef.at<float>(3) = fSettings["Camera.p2"];
const float k3 = fSettings["Camera.k3"];
if(k3!=0)
{
DistCoef.resize(5);
DistCoef.at<float>(4) = k3;
}
DistCoef.copyTo(mDistCoef);
mbf = fSettings["Camera.bf"];
float fps = fSettings["Camera.fps"];
if(fps==0)
fps=30;
// Max/Min Frames to insert keyframes and to check relocalisation
mMinFrames = 0;
mMaxFrames = fps;
cout << endl << "Camera Parameters: " << endl;
cout << "- fx: " << fx << endl;
cout << "- fy: " << fy << endl;
cout << "- cx: " << cx << endl;
cout << "- cy: " << cy << endl;
cout << "- k1: " << DistCoef.at<float>(0) << endl;
cout << "- k2: " << DistCoef.at<float>(1) << endl;
if(DistCoef.rows==5)
cout << "- k3: " << DistCoef.at<float>(4) << endl;
cout << "- p1: " << DistCoef.at<float>(2) << endl;
cout << "- p2: " << DistCoef.at<float>(3) << endl;
cout << "- fps: " << fps << endl;
int nRGB = fSettings["Camera.RGB"];
mbRGB = nRGB;
if(mbRGB)
cout << "- color order: RGB (ignored if grayscale)" << endl;
else
cout << "- color order: BGR (ignored if grayscale)" << endl;
// Load ORB parameters
int nFeatures = fSettings["ORBextractor.nFeatures"];
float fScaleFactor = fSettings["ORBextractor.scaleFactor"];
int nLevels = fSettings["ORBextractor.nLevels"];
int fIniThFAST = fSettings["ORBextractor.iniThFAST"];
int fMinThFAST = fSettings["ORBextractor.minThFAST"];
mpORBextractorLeft = new ORBextractor(nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST);
if(sensor==System::STEREO)
mpORBextractorRight = new ORBextractor(nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST);
if(sensor==System::MONOCULAR)
mpIniORBextractor = new ORBextractor(2*nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST);
cout << endl << "ORB Extractor Parameters: " << endl;
cout << "- Number of Features: " << nFeatures << endl;
cout << "- Scale Levels: " << nLevels << endl;
cout << "- Scale Factor: " << fScaleFactor << endl;
cout << "- Initial Fast Threshold: " << fIniThFAST << endl;
cout << "- Minimum Fast Threshold: " << fMinThFAST << endl;
if(sensor==System::STEREO || sensor==System::RGBD)
{
mThDepth = mbf*(float)fSettings["ThDepth"]/fx;
cout << endl << "Depth Threshold (Close/Far Points): " << mThDepth << endl;
}
if(sensor==System::RGBD)
{
mDepthMapFactor = fSettings["DepthMapFactor"];
if(fabs(mDepthMapFactor)<1e-5)
mDepthMapFactor=1;
else
mDepthMapFactor = 1.0f/mDepthMapFactor;
}
}
void Tracking::SetLocalMapper(LocalMapping *pLocalMapper)
{
mpLocalMapper=pLocalMapper;
}
void Tracking::SetLoopClosing(LoopClosing *pLoopClosing)
{
mpLoopClosing=pLoopClosing;
}
void Tracking::SetViewer(Viewer *pViewer)
{
mpViewer=pViewer;
}
cv::Mat Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRectRight, const double ×tamp)
{
mImGray = imRectLeft;
cv::Mat imGrayRight = imRectRight;
if(mImGray.channels()==3)
{
if(mbRGB)
{
cvtColor(mImGray,mImGray,CV_RGB2GRAY);
cvtColor(imGrayRight,imGrayRight,CV_RGB2GRAY);
}
else
{
cvtColor(mImGray,mImGray,CV_BGR2GRAY);
cvtColor(imGrayRight,imGrayRight,CV_BGR2GRAY);
}
}
else if(mImGray.channels()==4)
{
if(mbRGB)
{
cvtColor(mImGray,mImGray,CV_RGBA2GRAY);
cvtColor(imGrayRight,imGrayRight,CV_RGBA2GRAY);
}
else
{
cvtColor(mImGray,mImGray,CV_BGRA2GRAY);
cvtColor(imGrayRight,imGrayRight,CV_BGRA2GRAY);
}
}
mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);
Track();
return mCurrentFrame.mTcw.clone();
}
cv::Mat Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const double ×tamp)
{
mImGray = imRGB;
cv::Mat imDepth = imD;
if(mImGray.channels()==3)
{
if(mbRGB)
cvtColor(mImGray,mImGray,CV_RGB2GRAY);
else
cvtColor(mImGray,mImGray,CV_BGR2GRAY);
}
else if(mImGray.channels()==4)
{
if(mbRGB)
cvtColor(mImGray,mImGray,CV_RGBA2GRAY);
else
cvtColor(mImGray,mImGray,CV_BGRA2GRAY);
}
if((fabs(mDepthMapFactor-1.0f)>1e-5) || imDepth.type()!=CV_32F)
imDepth.convertTo(imDepth,CV_32F,mDepthMapFactor);
mCurrentFrame = Frame(mImGray,imDepth,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);
Track();
return mCurrentFrame.mTcw.clone();
}
/**
* 1.转换图像为单通道的灰度图;
* 2.生成Frame类对象;
* 3.调用Track函数进行特征点跟踪
*/
cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp)
{
mImGray = im;
if(mImGray.channels()==3)
{
if(mbRGB)
cvtColor(mImGray,mImGray,CV_RGB2GRAY);
else
cvtColor(mImGray,mImGray,CV_BGR2GRAY);
}
else if(mImGray.channels()==4)
{
if(mbRGB)
cvtColor(mImGray,mImGray,CV_RGBA2GRAY);
else
cvtColor(mImGray,mImGray,CV_BGRA2GRAY);
}
if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET)
mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);
else
mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);
Track();
return mCurrentFrame.mTcw.clone();
}
/**
* 进行帧跟踪
* 几种跟踪模式:
* 1.TrackWithMotionModel
* 2.TrackReferenceKeyFrame
* 3.TrackLocalMap
* 4.Relocalization
*
*/
void Tracking::Track()
{
if(mState==NO_IMAGES_YET)
{
mState = NOT_INITIALIZED;
}
mLastProcessedState=mState;
// Get Map Mutex -> Map cannot be changed
unique_lock<mutex> lock(mpMap->mMutexMapUpdate);
///初始化SLAM系统,单目初始化需要用到前面两帧
if(mState==NOT_INITIALIZED)
{
if(mSensor==System::STEREO || mSensor==System::RGBD)
StereoInitialization();
else
MonocularInitialization();
mpFrameDrawer->Update(this);
if(mState!=OK)
return;
}
else ///初始化完成
{
// System is initialized. Track Frame.
bool bOK;
// Initial camera pose estimation using motion model or relocalization (if tracking is lost)
///非只跟踪模式,用匀速运动模型或者重定位模式(跟踪丢失)
if(!mbOnlyTracking)
{
// Local Mapping is activated. This is the normal behaviour, unless
// you explicitly activate the "only tracking" mode.
///局部建图激活,这是正常情况,除非使用仅跟踪模式
if(mState==OK)
{
// Local Mapping might have changed some MapPoints tracked in last frame
CheckReplacedInLastFrame();
///如果是静止的,或者当前帧的离上一次的重定位帧id小于2,就跟踪参考帧
if(mVelocity.empty() || mCurrentFrame.mnId<mnLastRelocFrameId+2)
{
bOK = TrackReferenceKeyFrame();
}
else///跟踪运动模型
{
bOK = TrackWithMotionModel();
if(!bOK)
bOK = TrackReferenceKeyFrame();
}
}
else
{
bOK = Relocalization();
}
}
else ///只跟踪模式
{
// Localization Mode: Local Mapping is deactivated
if(mState==LOST)
{
bOK = Relocalization();
}
else
{
if(!mbVO)
{
// In last frame we tracked enough MapPoints in the map
if(!mVelocity.empty())
{
bOK = TrackWithMotionModel();
}
else
{
bOK = TrackReferenceKeyFrame();
}
}
else
{
// In last frame we tracked mainly "visual odometry" points.
// We compute two camera poses, one from motion model and one doing relocalization.
// If relocalization is sucessfull we choose that solution, otherwise we retain
// the "visual odometry" solution.
bool bOKMM = false;
bool bOKReloc = false;
vector<MapPoint*> vpMPsMM;
vector<bool> vbOutMM;
cv::Mat TcwMM;
if(!mVelocity.empty())
{
bOKMM = TrackWithMotionModel();
vpMPsMM = mCurrentFrame.mvpMapPoints;
vbOutMM = mCurrentFrame.mvbOutlier;
TcwMM = mCurrentFrame.mTcw.clone();
}
bOKReloc = Relocalization();
if(bOKMM && !bOKReloc)
{
mCurrentFrame.SetPose(TcwMM);
mCurrentFrame.mvpMapPoints = vpMPsMM;
mCurrentFrame.mvbOutlier = vbOutMM;
if(mbVO)
{
for(int i =0; i<mCurrentFrame.N; i++)
{
if(mCurrentFrame.mvpMapPoints[i] && !mCurrentFrame.mvbOutlier[i])
{
mCurrentFrame.mvpMapPoints[i]->IncreaseFound();
}
}
}
}
else if(bOKReloc)
{
mbVO = false;
}
bOK = bOKReloc || bOKMM;
}
}
}
mCurrentFrame.mpReferenceKF = mpReferenceKF;
// If we have an initial estimation of the camera pose and matching. Track the local map.
///如果你有初始的相机姿态的估计,那么跟踪局部地图
if(!mbOnlyTracking)
{
if(bOK)
bOK = TrackLocalMap();
}
else
{
// mbVO true means that there are few matches to MapPoints in the map. We cannot retrieve
// a local map and therefore we do not perform TrackLocalMap(). Once the system relocalizes
// the camera we will use the local map again.
//mbVO为True的时候,意味着在地图中只有很少的匹配点,那么我们不能抽取出一个局部地图
//因此不执行TrackLocalMap.一旦系统重定位相机,我们就使用再次使用局部地图
if(bOK && !mbVO)
bOK = TrackLocalMap();
}
if(bOK)
mState = OK;
else
mState=LOST;
// Update drawer
mpFrameDrawer->Update(this);
// If tracking were good, check if we insert a keyframe
if(bOK)
{
// Update motion model
if(!mLastFrame.mTcw.empty())
{
cv::Mat LastTwc = cv::Mat::eye(4,4,CV_32F);
mLastFrame.GetRotationInverse().copyTo(LastTwc.rowRange(0,3).colRange(0,3));
mLastFrame.GetCameraCenter().copyTo(LastTwc.rowRange(0,3).col(3));
mVelocity = mCurrentFrame.mTcw*LastTwc;
}
else
mVelocity = cv::Mat();
mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw);
// Clean VO matches
for(int i=0; i<mCurrentFrame.N; i++)
{
MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
if(pMP)
if(pMP->Observations()<1)
{
mCurrentFrame.mvbOutlier[i] = false;
mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
}
}
// Delete temporal MapPoints
for(list<MapPoint*>::iterator lit = mlpTemporalPoints.begin(), lend = mlpTemporalPoints.end(); lit!=lend; lit++)
{
MapPoint* pMP = *lit;
delete pMP;
}
mlpTemporalPoints.clear();
// Check if we need to insert a new keyframe
if(NeedNewKeyFrame())
CreateNewKeyFrame();
// We allow points with high innovation (considererd outliers by the Huber Function)
// pass to the new keyframe, so that bundle adjustment will finally decide
// if they are outliers or not. We don't want next frame to estimate its position
// with those points so we discard them in the frame.
for(int i=0; i<mCurrentFrame.N;i++)
{
if(mCurrentFrame.mvpMapPoints[i] && mCurrentFrame.mvbOutlier[i])
mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
}
}
// Reset if the camera get lost soon after initialization
if(mState==LOST)
{
if(mpMap->KeyFramesInMap()<=5)
{
cout << "Track lost soon after initialisation, reseting..." << endl;
mpSystem->Reset();
return;
}
}
if(!mCurrentFrame.mpReferenceKF)
mCurrentFrame.mpReferenceKF = mpReferenceKF;
mLastFrame = Frame(mCurrentFrame);
}
// Store frame pose information to retrieve the complete camera trajectory afterwards.
///如果当前帧的相机Pose不为空,储存所有的Frame的位置信息,用来恢复整个相机的运动轨迹
if(!mCurrentFrame.mTcw.empty())
{
///Tcr = Tcw * (Trw)^-1
cv::Mat Tcr = mCurrentFrame.mTcw*mCurrentFrame.mpReferenceKF->GetPoseInverse();
mlRelativeFramePoses.push_back(Tcr);//记录当前帧相对于参考帧的位姿
mlpReferences.push_back(mpReferenceKF);///记录当前帧的参考关键帧
mlFrameTimes.push_back(mCurrentFrame.mTimeStamp);//记录当前帧的时间戳
mlbLost.push_back(mState==LOST);
}
else///如果当前帧的相机Pose为空,即不能定位得到当前帧的相机Pose
{
// This can happen if tracking is lost
mlRelativeFramePoses.push_back(mlRelativeFramePoses.back());
mlpReferences.push_back(mlpReferences.back());
mlFrameTimes.push_back(mlFrameTimes.back());
mlbLost.push_back(mState==LOST);
}
}
void Tracking::StereoInitialization()
{
if(mCurrentFrame.N>500)
{
// Set Frame pose to the origin
mCurrentFrame.SetPose(cv::Mat::eye(4,4,CV_32F));
// Create KeyFrame
KeyFrame* pKFini = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);
// Insert KeyFrame in the map
mpMap->AddKeyFrame(pKFini);
// Create MapPoints and asscoiate to KeyFrame
for(int i=0; i<mCurrentFrame.N;i++)
{
float z = mCurrentFrame.mvDepth[i];
if(z>0)
{
cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);
MapPoint* pNewMP = new MapPoint(x3D,pKFini,mpMap);
pNewMP->AddObservation(pKFini,i);
pKFini->AddMapPoint(pNewMP,i);
pNewMP->ComputeDistinctiveDescriptors();
pNewMP->UpdateNormalAndDepth();
mpMap->AddMapPoint(pNewMP);
mCurrentFrame.mvpMapPoints[i]=pNewMP;
}
}
cout << "New map created with " << mpMap->MapPointsInMap() << " points" << endl;
mpLocalMapper->InsertKeyFrame(pKFini);
mLastFrame = Frame(mCurrentFrame);
mnLastKeyFrameId=mCurrentFrame.mnId;
mpLastKeyFrame = pKFini;
mvpLocalKeyFrames.push_back(pKFini);
mvpLocalMapPoints=mpMap->GetAllMapPoints();
mpReferenceKF = pKFini;
mCurrentFrame.mpReferenceKF = pKFini;
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
mpMap->mvpKeyFrameOrigins.push_back(pKFini);
mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw);
mState=OK;
}
}
/// init monocular
void Tracking::MonocularInitialization()
{
/// if not Initializer not created
if(!mpInitializer)
{
// Set Reference Frame
/// if current frame's key point more than 100
if(mCurrentFrame.mvKeys.size()>100)
{
/// set init frame
mInitialFrame = Frame(mCurrentFrame);
/// set last frame
mLastFrame = Frame(mCurrentFrame);
/// set unditorted key frames to prevMatched
mvbPrevMatched.resize(mCurrentFrame.mvKeysUn.size());
for(size_t i=0; i<mCurrentFrame.mvKeysUn.size(); i++)
mvbPrevMatched[i]=mCurrentFrame.mvKeysUn[i].pt;
/// confirm Initializer not created
if(mpInitializer)
delete mpInitializer;
/// new Initializer, [const Frame &ReferenceFrame, float sigma, int iterations]
mpInitializer = new Initializer(mCurrentFrame,1.0,200);
fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
return;
}
}
else /// if Initializer created
{
// Try to initialize
/// if key points less than 100, initializer failed
if((int)mCurrentFrame.mvKeys.size()<=100)
{
delete mpInitializer;
mpInitializer = static_cast<Initializer*>(NULL);
fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
return;
}
// Find correspondences
ORBmatcher matcher(0.9,true);
int nmatches = matcher.SearchForInitialization(mInitialFrame,mCurrentFrame,mvbPrevMatched,mvIniMatches,100);
// Check if there are enough correspondences
/// not enougth correspondences, failed
if(nmatches<100)
{
delete mpInitializer;
mpInitializer = static_cast<Initializer*>(NULL);
return;
}
cv::Mat Rcw; // Current Camera Rotation
cv::Mat tcw; // Current Camera Translation
vector<bool> vbTriangulated; // Triangulated Correspondences (mvIniMatches)
if(mpInitializer->Initialize(mCurrentFrame, mvIniMatches, Rcw, tcw, mvIniP3D, vbTriangulated))
{
for(size_t i=0, iend=mvIniMatches.size(); i<iend;i++)
{
if(mvIniMatches[i]>=0 && !vbTriangulated[i])
{
mvIniMatches[i]=-1;
nmatches--;
}
}
// Set Frame Poses
mInitialFrame.SetPose(cv::Mat::eye(4,4,CV_32F));
cv::Mat Tcw = cv::Mat::eye(4,4,CV_32F);
Rcw.copyTo(Tcw.rowRange(0,3).colRange(0,3));
tcw.copyTo(Tcw.rowRange(0,3).col(3));
mCurrentFrame.SetPose(Tcw);
CreateInitialMapMonocular();
}
}
}
void Tracking::CreateInitialMapMonocular()
{
// Create KeyFrames
KeyFrame* pKFini = new KeyFrame(mInitialFrame,mpMap,mpKeyFrameDB);
KeyFrame* pKFcur = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);
pKFini->ComputeBoW();
pKFcur->ComputeBoW();
// Insert KFs in the map
mpMap->AddKeyFrame(pKFini);
mpMap->AddKeyFrame(pKFcur);
// Create MapPoints and asscoiate to keyframes
/// loop all map point triangulated
for(size_t i=0; i<mvIniMatches.size();i++)
{
/// no matched keypoints
if(mvIniMatches[i]<0)
continue;
//Create MapPoint.
cv::Mat worldPos(mvIniP3D[i]);
/// new map point ref pKFcur
MapPoint* pMP = new MapPoint(worldPos,pKFcur,mpMap);
/// add map point to keyframe, so that keyframe now which map point it can see
pKFini->AddMapPoint(pMP,i);
pKFcur->AddMapPoint(pMP,mvIniMatches[i]);
/// let map point know it can be seen by pKFini and pKFcur
pMP->AddObservation(pKFini,i);
pMP->AddObservation(pKFcur,mvIniMatches[i]);
/// compute it's descriptor
pMP->ComputeDistinctiveDescriptors();
/// update map point's mean view angle from camera center to map point and depth
pMP->UpdateNormalAndDepth();
//Fill Current Frame structure
mCurrentFrame.mvpMapPoints[mvIniMatches[i]] = pMP;
mCurrentFrame.mvbOutlier[mvIniMatches[i]] = false;
//Add to Map
mpMap->AddMapPoint(pMP);
}
// Update Connections
/// update covisibility graph, essential graph and spanning tree
pKFini->UpdateConnections();
pKFcur->UpdateConnections();
// Bundle Adjustment
cout << "New Map created with " << mpMap->MapPointsInMap() << " points" << endl;
/// a global BA
Optimizer::GlobalBundleAdjustemnt(mpMap,20);
// Set median depth to 1
float medianDepth = pKFini->ComputeSceneMedianDepth(2);
float invMedianDepth = 1.0f/medianDepth;
if(medianDepth<0 || pKFcur->TrackedMapPoints(1)<100)
{
cout << "Wrong initialization, reseting..." << endl;
Reset();
return;
}
// Scale initial baseline
cv::Mat Tc2w = pKFcur->GetPose();
Tc2w.col(3).rowRange(0,3) = Tc2w.col(3).rowRange(0,3)*invMedianDepth;
pKFcur->SetPose(Tc2w);
// Scale points
vector<MapPoint*> vpAllMapPoints = pKFini->GetMapPointMatches();
for(size_t iMP=0; iMP<vpAllMapPoints.size(); iMP++)
{
if(vpAllMapPoints[iMP])
{
MapPoint* pMP = vpAllMapPoints[iMP];
pMP->SetWorldPos(pMP->GetWorldPos()*invMedianDepth);
}
}
mpLocalMapper->InsertKeyFrame(pKFini);
mpLocalMapper->InsertKeyFrame(pKFcur);
mCurrentFrame.SetPose(pKFcur->GetPose());
mnLastKeyFrameId=mCurrentFrame.mnId;
mpLastKeyFrame = pKFcur;
mvpLocalKeyFrames.push_back(pKFcur);
mvpLocalKeyFrames.push_back(pKFini);
mvpLocalMapPoints=mpMap->GetAllMapPoints();
mpReferenceKF = pKFcur;
mCurrentFrame.mpReferenceKF = pKFcur;
mLastFrame = Frame(mCurrentFrame);
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
mpMapDrawer->SetCurrentCameraPose(pKFcur->GetPose());
mpMap->mvpKeyFrameOrigins.push_back(pKFini);
mState=OK;
}
///更新上一帧中被替换的地图点
void Tracking::CheckReplacedInLastFrame()
{
for(int i =0; i<mLastFrame.N; i++)
{
///上一帧的地图点
MapPoint* pMP = mLastFrame.mvpMapPoints[i];
if(pMP)
{
///地图点的新的替换点
MapPoint* pRep = pMP->GetReplaced();
if(pRep)
{
///指针指向新的替换点
mLastFrame.mvpMapPoints[i] = pRep;
}
}
}
}
///跟踪参考关键帧
bool Tracking::TrackReferenceKeyFrame()
{
// Compute Bag of Words vector
///1.计算当前帧的BOw
mCurrentFrame.ComputeBoW();
// We perform first an ORB matching with the reference keyframe
// If enough matches are found we setup a PnP solver
///2.和参考帧进行一个ORB匹配,利用BOW进行加速匹配
ORBmatcher matcher(0.7,true);
vector<MapPoint*> vpMapPointMatches;
int nmatches = matcher.SearchByBoW(mpReferenceKF,mCurrentFrame,vpMapPointMatches);
if(nmatches<15)
return false;
mCurrentFrame.mvpMapPoints = vpMapPointMatches;
///将上一帧的Pose作为当前帧的初始值,这样在优化的时候可以收敛的更快
mCurrentFrame.SetPose(mLastFrame.mTcw);
///3.优化当前帧的位姿
Optimizer::PoseOptimization(&mCurrentFrame);
// Discard outliers
///4.删除在上一步优化中被设置为outlier的3D点
int nmatchesMap = 0;
for(int i =0; i<mCurrentFrame.N; i++)
{
if(mCurrentFrame.mvpMapPoints[i])
{
if(mCurrentFrame.mvbOutlier[i])
{
MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
mCurrentFrame.mvbOutlier[i]=false;
pMP->mbTrackInView = false;
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
nmatches--;
}
else if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)
nmatchesMap++;
}
}
return nmatchesMap>=10;
}
/**
* 更新上一帧的Pose,
* 给上一帧创建新的3D MapPoint,并且添加100个到 mlpTemporalPoints中.
* */
void Tracking::UpdateLastFrame()
{
//取出上一帧和上一帧相对于参考帧的位姿
// Update pose according to reference keyframe
KeyFrame* pRef = mLastFrame.mpReferenceKF;
/// last frame's pose relative to reference frame
cv::Mat Tlr = mlRelativeFramePoses.back();
///得到上一帧在世界坐标系的Pose
mLastFrame.SetPose(Tlr*pRef->GetPose());
if(mnLastKeyFrameId==mLastFrame.mnId || mSensor==System::MONOCULAR || !mbOnlyTracking)
return;
// Create "visual odometry" MapPoints
// We sort points according to their measured depth by the stereo/RGB-D sensor
vector<pair<float,int> > vDepthIdx;
vDepthIdx.reserve(mLastFrame.N);
for(int i=0; i<mLastFrame.N;i++)
{
float z = mLastFrame.mvDepth[i];
if(z>0)
{
vDepthIdx.push_back(make_pair(z,i));
}
}
if(vDepthIdx.empty())
return;
///根据特征点的深度从小到大排列
sort(vDepthIdx.begin(),vDepthIdx.end());
// We insert all close points (depth
// If less than 100 close points, we insert the 100 closest ones.
///插入所有的深度小于mThDepth的MapPoint到mlpTemporalPoints
///如果小于100个,那么插入100个最近的点
int nPoints = 0;
for(size_t j=0; j<vDepthIdx.size();j++)
{
int i = vDepthIdx[j].second;
bool bCreateNew = false;
MapPoint* pMP = mLastFrame.mvpMapPoints[i];
if(!pMP)
bCreateNew = true;
else if(pMP->Observations()<1)
{
bCreateNew = true;
}
///创建新的3D地图点
if(bCreateNew)
{
cv::Mat x3D = mLastFrame.UnprojectStereo(i);
MapPoint* pNewMP = new MapPoint(x3D,mpMap,&mLastFrame,i);
mLastFrame.mvpMapPoints[i]=pNewMP;
mlpTemporalPoints.push_back(pNewMP);
nPoints++;
}
else
{
nPoints++;
}
if(vDepthIdx[j].first>mThDepth && nPoints>100)
break;
}//for
}
/// track current frame with constance velocity motion model
///匀速运动模型进行跟踪
///主要思想是将上一帧的MapPoint投影到当前帧,在当前帧中找到和上一帧的MapPoint最相近的匹配点
bool Tracking::TrackWithMotionModel()
{
ORBmatcher matcher(0.9,true);
// Update last frame pose according to its reference keyframe
// Create "visual odometry" points if in Localization Mode
///根据上一帧的参考帧,更新上一帧的位置姿态,然后创建新的地图点
UpdateLastFrame();
/// take velocity between last two frame to give current frame's a init pose
///根据上一帧的位置初始化当前帧的位姿
mCurrentFrame.SetPose(mVelocity*mLastFrame.mTcw);
///当前帧的地图点设置为null
fill(mCurrentFrame.mvpMapPoints.begin(),mCurrentFrame.mvpMapPoints.end(),static_cast<MapPoint*>(NULL));
// Project points seen in previous frame
int th;
if(mSensor!=System::STEREO)
th=15;
else
th=7;
///将上一帧的MapPoint投影到当前帧,在当前帧中找到和上一帧的MapPoint最相近的匹配点,并返回匹配特征点对数
int nmatches = matcher.SearchByProjection(mCurrentFrame,mLastFrame,th,mSensor==System::MONOCULAR);
// If few matches, uses a wider window search
if(nmatches<20)
{
fill(mCurrentFrame.mvpMapPoints.begin(),mCurrentFrame.mvpMapPoints.end(),static_cast<MapPoint*>(NULL));
nmatches = matcher.SearchByProjection(mCurrentFrame,mLastFrame,2*th,mSensor==System::MONOCULAR);
}
if(nmatches<20)
return false;
// Optimize frame pose with all matches
Optimizer::PoseOptimization(&mCurrentFrame);
// Discard outliers
int nmatchesMap = 0;
for(int i =0; i<mCurrentFrame.N; i++)
{
if(mCurrentFrame.mvpMapPoints[i])
{
if(mCurrentFrame.mvbOutlier[i])
{
MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
mCurrentFrame.mvbOutlier[i]=false;
pMP->mbTrackInView = false;
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
nmatches--;
}
else if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)
nmatchesMap++;
}
}
if(mbOnlyTracking)
{
//在只跟踪模式下如果匹配的地图点的数目小于10个,那么设置mbVO为true
mbVO = nmatchesMap<10;
return nmatches>20;
}
return nmatchesMap>=10;
}
/**
* 跟踪局部地图
* 1.更新局部地图;
* 2.在局部地图中给当前帧搜索匹配MapPoint;
* 3.优化当前帧的Pose;
* 4.更新MapPoint的统计数据
*/
bool Tracking::TrackLocalMap()
{
// We have an estimation of the camera pose and some map points tracked in the frame.
// We retrieve the local map and try to find matches to points in the local map.
//我们估计除了相机的位姿态,并且并且有些地图点在图像中,
//更细局部地图
UpdateLocalMap();
//在局部地图中给当前帧搜索匹配MapPoint
SearchLocalPoints();
// Optimize Pose
Optimizer::PoseOptimization(&mCurrentFrame);
mnMatchesInliers = 0;
// Update MapPoints Statistics
//更新MapPoint的统计数据
for(int i=0; i<mCurrentFrame.N; i++)
{
if(mCurrentFrame.mvpMapPoints[i])
{
if(!mCurrentFrame.mvbOutlier[i])
{
mCurrentFrame.mvpMapPoints[i]->IncreaseFound();
if(!mbOnlyTracking)
{
if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)
mnMatchesInliers++;
}
else
mnMatchesInliers++;
}
else if(mSensor==System::STEREO)
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint*>(NULL);
}
}
// Decide if the tracking was succesful
// More restrictive if there was a relocalization recently
//如果重定位不久,但是匹配的inlier很少,那么跟踪失败
if(mCurrentFrame.mnId<mnLastRelocFrameId+mMaxFrames && mnMatchesInliers<50)
return false;
if(mnMatchesInliers<30)
return false;
else
return true;
}
/**
* 是否需要插入关键帧
* 插入关键帧的条件
* 1.不是只跟踪模式;
* 2.localMapper不在工作;
* 3.刚进行完重定位;
* 4.离上一个关键帧已经有MaxFrames个普通帧了;
* 5.离上一个关键帧已经有mMinFrames个普通帧了,并且局部地图可以接受关键帧;
* 6.mnMatchesInliers点很少,跟踪很弱了;
* 7.跟踪点很少了;
* 8.局部地图中的关键帧的数目少于3个
* 9.等等
*/
bool Tracking::NeedNewKeyFrame()
{
if(mbOnlyTracking)
return false;
// If Local Mapping is freezed by a Loop Closure do not insert keyframes
if(mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())
return false;
const int nKFs = mpMap->KeyFramesInMap();
// Do not insert keyframes if not enough frames have passed from last relocalisation
if(mCurrentFrame.mnId<mnLastRelocFrameId+mMaxFrames && nKFs>mMaxFrames)
return false;
// Tracked MapPoints in the reference keyframe
//统计在参考帧中被追踪的地图点的数目
int nMinObs = 3;
if(nKFs<=2)
nMinObs=2;
int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs);
// Local Mapping accept keyframes?
bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames();
// Check how many "close" points are being tracked and how many could be potentially created.
int nNonTrackedClose = 0;
int nTrackedClose= 0;
if(mSensor!=System::MONOCULAR)
{
for(int i =0; i<mCurrentFrame.N; i++)
{
if(mCurrentFrame.mvDepth[i]>0 && mCurrentFrame.mvDepth[i]<mThDepth)
{
if(mCurrentFrame.mvpMapPoints[i] && !mCurrentFrame.mvbOutlier[i])
nTrackedClose++;
else
nNonTrackedClose++;
}
}
}
bool bNeedToInsertClose = (nTrackedClose<100) && (nNonTrackedClose>70);
// Thresholds
float thRefRatio = 0.75f;
if(nKFs<2)
thRefRatio = 0.4f;
if(mSensor==System::MONOCULAR)
thRefRatio = 0.9f;
// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames;
// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
const bool c1b = (mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames && bLocalMappingIdle);
//Condition 1c: tracking is weak
const bool c1c = mSensor!=System::MONOCULAR && (mnMatchesInliers<nRefMatches*0.25 || bNeedToInsertClose) ;
// Condition 2: Few tracked points compared to reference keyframe. Lots of visual odometry compared to map matches.
const bool c2 = ((mnMatchesInliers<nRefMatches*thRefRatio|| bNeedToInsertClose) && mnMatchesInliers>15);
if((c1a||c1b||c1c)&&c2)
{
// If the mapping accepts keyframes, insert keyframe.
// Otherwise send a signal to interrupt BA
//如果局部地图不能接受关键帧,那么直接打断BA
if(bLocalMappingIdle)
{
return true;
}
else
{
mpLocalMapper->InterruptBA();
if(mSensor!=System::MONOCULAR)
{
//局部地图中的关键帧的数目少于3个
if(mpLocalMapper->KeyframesInQueue()<3)
return true;
else
return false;
}
else
return false;
}
}
else
return false;
}
/**
* 创建新的关键帧,并插入LocalMapper
*/
void Tracking::CreateNewKeyFrame()
{
//1.Tracking线程添加关键帧时,必须保证LocaMapping不能被终止,则mbNotStop需要为真
if(!mpLocalMapper->SetNotStop(true))
return;
//2.创建新的关键帧
KeyFrame* pKF = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);
//3.更新参考帧为新的关键帧
mpReferenceKF = pKF;
mCurrentFrame.mpReferenceKF = pKF;
if(mSensor!=System::MONOCULAR)
{
mCurrentFrame.UpdatePoseMatrices();
// We sort points by the measured depth by the stereo/RGBD sensor.
// We create all those MapPoints whose depth < mThDepth.
// If there are less than 100 close points we create the 100 closest.
vector<pair<float,int> > vDepthIdx;
vDepthIdx.reserve(mCurrentFrame.N);
for(int i=0; i<mCurrentFrame.N; i++)
{
float z = mCurrentFrame.mvDepth[i];
if(z>0)
{
vDepthIdx.push_back(make_pair(z,i));
}
}
if(!vDepthIdx.empty())
{
sort(vDepthIdx.begin(),vDepthIdx.end());
int nPoints = 0;
for(size_t j=0; j<vDepthIdx.size();j++)
{
int i = vDepthIdx[j].second;
bool bCreateNew = false;
MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
if(!pMP)
bCreateNew = true;
else if(pMP->Observations()<1)
{
bCreateNew = true;
mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint*>(NULL);
}
if(bCreateNew)
{
cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);
MapPoint* pNewMP = new MapPoint(x3D,pKF,mpMap);
pNewMP->AddObservation(pKF,i);
pKF->AddMapPoint(pNewMP,i);
pNewMP->ComputeDistinctiveDescriptors();
pNewMP->UpdateNormalAndDepth();
mpMap->AddMapPoint(pNewMP);
mCurrentFrame.mvpMapPoints[i]=pNewMP;
nPoints++;
}
else
{
nPoints++;
}
if(vDepthIdx[j].first>mThDepth && nPoints>100)
break;
}
}
}
//4.插入关键帧
mpLocalMapper->InsertKeyFrame(pKF);
//5.设置localMapper不停止
mpLocalMapper->SetNotStop(false);
//6.更新上一个关键帧
mnLastKeyFrameId = mCurrentFrame.mnId;
mpLastKeyFrame = pKF;
}
/// search correspondence between curren frame's keypoints and local map points
///在局部地图点中寻找当前帧的特征点对应的地图点
void Tracking::SearchLocalPoints()
{
// Do not search map points already matched
/// cause some map point has been do matching in TrackWithMotionModel, TrackReferenceKeyFrame and Relocalization function
/// mark current frame's map point's not to matched
for(vector<MapPoint*>::iterator vit=mCurrentFrame.mvpMapPoints.begin(), vend=mCurrentFrame.mvpMapPoints.end(); vit!=vend; vit++)
{
MapPoint* pMP = *vit;
if(pMP)
{
if(pMP->isBad())
{
*vit = static_cast<MapPoint*>(NULL);
}
else
{
/// increase number by one that see this map point
pMP->IncreaseVisible();
/// update last see the map point to current frame id
pMP->mnLastFrameSeen = mCurrentFrame.mnId;
/// mark this map point not to do matching, cause have been matched
pMP->mbTrackInView = false;
}
}
}
int nToMatch=0;
// Project points in frame and check its visibility
/// handle all local map points
///将局部地图的MapPoint投影到当前帧,并检测是否在当前帧视野内
for(vector<MapPoint*>::iterator vit=mvpLocalMapPoints.begin(), vend=mvpLocalMapPoints.end(); vit!=vend; vit++)
{
MapPoint* pMP = *vit;
if(pMP->mnLastFrameSeen == mCurrentFrame.mnId)
continue;
if(pMP->isBad())
continue;
// Project (this fills MapPoint variables for matching)
/// if current frame can see this map point
if(mCurrentFrame.isInFrustum(pMP,0.5))
{
pMP->IncreaseVisible();
nToMatch++;
}
}
if(nToMatch>0)
{
ORBmatcher matcher(0.8);
int th = 1;
if(mSensor==System::RGBD)
th=3;
// If the camera has been relocalised recently, perform a coarser search
if(mCurrentFrame.mnId<mnLastRelocFrameId+2)
th=5;
/// match between current frame and local map point
///在局部地图点中寻找当前帧的特征点对应的地图点
matcher.SearchByProjection(mCurrentFrame,mvpLocalMapPoints,th);
}
}
///更新局部地图,更新局部关键帧和地图点
void Tracking::UpdateLocalMap()
{
// This is for visualization
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
// Update
UpdateLocalKeyFrames();
UpdateLocalPoints();
}
/***
* 更新局部地图点
* 将关键帧的地图点添加到mvpLocalMapPoints
*/
void Tracking::UpdateLocalPoints()
{
mvpLocalMapPoints.clear();
//遍历所有的局部关键帧
for(vector<KeyFrame*>::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
{
//获取关键帧的匹配地图点
KeyFrame* pKF = *itKF;
const vector<MapPoint*> vpMPs = pKF->GetMapPointMatches();
//遍历地图点,将MapPoint添加到mvpLocalMapPoints
for(vector<MapPoint*>::const_iterator itMP=vpMPs.begin(), itEndMP=vpMPs.end(); itMP!=itEndMP; itMP++)
{
MapPoint* pMP = *itMP;
if(!pMP)
continue;
//避免重复添加
if(pMP->mnTrackReferenceForFrame==mCurrentFrame.mnId)
continue;
if(!pMP->isBad())
{
mvpLocalMapPoints.push_back(pMP);
pMP->mnTrackReferenceForFrame=mCurrentFrame.mnId;
}
}
}
}
/**
* 更新局部地图中的关键帧
* 1.根据当前帧的地图点,找到更多的共视程度高的关键帧作为局部地图的关键帧,
*
*/
void Tracking::UpdateLocalKeyFrames()
{
// Each map point vote for the keyframes in which it has been observed
///<当前帧的共视帧,共视帧的共视地图点的数目>
map<KeyFrame*,int> keyframeCounter;
///遍历地图点
for(int i=0; i<mCurrentFrame.N; i++)
{
if(mCurrentFrame.mvpMapPoints[i])
{
///得到地图点
MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
if(!pMP->isBad())
{
///得到地图点的观测帧,即当前帧的共视帧
const map<KeyFrame*,size_t> observations = pMP->GetObservations();
///遍历共视帧,然后统计和当前共视帧共视的地图点数目
for(map<KeyFrame*,size_t>::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++)
keyframeCounter[it->first]++;
}
else
{
mCurrentFrame.mvpMapPoints[i]=NULL;
}
}
}
if(keyframeCounter.empty())
return;
int max=0;
KeyFrame* pKFmax= static_cast<KeyFrame*>(NULL);
mvpLocalKeyFrames.clear();
mvpLocalKeyFrames.reserve(3*keyframeCounter.size());
// All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points
///遍历所有的共视帧,找到共视程度最高的共视帧
for(map<KeyFrame*,int>::const_iterator it=keyframeCounter.begin(), itEnd=keyframeCounter.end(); it!=itEnd; it++)
{
KeyFrame* pKF = it->first;
if(pKF->isBad())
continue;
if(it->second>max)
{
max=it->second;
pKFmax=pKF;
}
///将共视帧放入mvpLocalKeyFrames
mvpLocalKeyFrames.push_back(it->first);
///将当前帧设置为pKF的跟踪参考帧,就是说局部地图中所有的关键帧的跟踪参考帧都是当前帧
pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
}
// Include also some not-already-included keyframes that are neighbors to already-included keyframes
///遍历局部地图关键帧,增加一些没有添加到局部地图里面的关键帧,但是这些关键帧是已经添加到局部地图关键帧的邻近帧
for(vector<KeyFrame*>::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
{
// Limit the number of keyframes
//限制局部地图的大小为80,这个是一个缺点,ORBSLAM中有很多这种hard_code的数字
if(mvpLocalKeyFrames.size()>80)
break;
//1.取出关键帧
KeyFrame* pKF = *itKF;
//2.在共视图中取出10个和其共视程度最高的共视帧
const vector<KeyFrame*> vNeighs = pKF->GetBestCovisibilityKeyFrames(10);
//3.遍历所有的共视帧,将不在局部地图中的帧添加到局部地图中
for(vector<KeyFrame*>::const_iterator itNeighKF=vNeighs.begin(), itEndNeighKF=vNeighs.end(); itNeighKF!=itEndNeighKF; itNeighKF++)
{
KeyFrame* pNeighKF = *itNeighKF;
if(!pNeighKF->isBad())
{
//这就是上面设置参考帧为当前帧的用处,排除已经在局部地图中的关键帧
if(pNeighKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pNeighKF);
pNeighKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;
break;
}
}
}
///4.遍历其子关键帧,将不在局部地图中的所有子关键帧添加到局部地图中
const set<KeyFrame*> spChilds = pKF->GetChilds();
for(set<KeyFrame*>::const_iterator sit=spChilds.begin(), send=spChilds.end(); sit!=send; sit++)
{
KeyFrame* pChildKF = *sit;
if(!pChildKF->isBad())
{
if(pChildKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pChildKF);
pChildKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;
break;
}
}
}
///5.将父关键帧添加到局部地图中
KeyFrame* pParent = pKF->GetParent();
if(pParent)
{
if(pParent->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
{
mvpLocalKeyFrames.push_back(pParent);
pParent->mnTrackReferenceForFrame=mCurrentFrame.mnId;
break;
}
}
}
///将共视程度最高的关键帧作为参考关键帧
if(pKFmax)
{
mpReferenceKF = pKFmax;
mCurrentFrame.mpReferenceKF = mpReferenceKF;
}
}
/**
* 重定位:
* 主要通过首先在关键帧数据库中选择候选帧,然后和候选帧进行PnP迭代,然后进行非线性优化,最后得到
* 有足够inlier MapPoint的相机位姿
*/
bool Tracking::Relocalization()
{
// Compute Bag of Words Vector
//1.计算当前帧的Bow向量
mCurrentFrame.ComputeBoW();
// Relocalization is performed when tracking is lost
// Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation
//2.在关键帧数据库中获取重定位候选帧
vector<KeyFrame*> vpCandidateKFs = mpKeyFrameDB->DetectRelocalizationCandidates(&mCurrentFrame);
if(vpCandidateKFs.empty())
return false;
const int nKFs = vpCandidateKFs.size();
// We perform first an ORB matching with each candidate
// If enough matches are found we setup a PnP solver
//3.将当前帧和候选帧之间进行ORB匹配
ORBmatcher matcher(0.75,true);
vector<PnPsolver*> vpPnPsolvers;
vpPnPsolvers.resize(nKFs);
vector<vector<MapPoint*> > vvpMapPointMatches;
vvpMapPointMatches.resize(nKFs);
vector<bool> vbDiscarded;
vbDiscarded.resize(nKFs);
int nCandidates=0;
for(int i=0; i<nKFs; i++)
{
KeyFrame* pKF = vpCandidateKFs[i];
if(pKF->isBad())
vbDiscarded[i] = true;
else
{
//在pKF和mCurrentFrame之间寻找ORB匹配,并将匹配得到的3DMapPoint点储存到vvpMapPointMatches
int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]);
if(nmatches<15)
{
vbDiscarded[i] = true;
continue;
}
else
{
//4.如果匹配成功,就进行PnP解算
PnPsolver* pSolver = new PnPsolver(mCurrentFrame,vvpMapPointMatches[i]);
pSolver->SetRansacParameters(0.99,10,300,4,0.5,5.991);
vpPnPsolvers[i] = pSolver;
nCandidates++;
}
}
}
// Alternatively perform some iterations of P4P RANSAC
// Until we found a camera pose supported by enough inliers
bool bMatch = false;
ORBmatcher matcher2(0.9,true);
while(nCandidates>0 && !bMatch)
{
for(int i=0; i<nKFs; i++)
{
if(vbDiscarded[i])
continue;
// Perform 5 Ransac Iterations
//执行5次PnP Ransac迭代,得到初步的当前帧Pose, Tcw
vector<bool> vbInliers;
int nInliers;
bool bNoMore;
PnPsolver* pSolver = vpPnPsolvers[i];
cv::Mat Tcw = pSolver->iterate(5,bNoMore,vbInliers,nInliers);
// If Ransac reachs max. iterations discard keyframe
if(bNoMore)
{
vbDiscarded[i]=true;
nCandidates--;
}
// If a Camera Pose is computed, optimize
//优化Pose
if(!Tcw.empty())
{
Tcw.copyTo(mCurrentFrame.mTcw);
set<MapPoint*> sFound;
//更新当前帧的对应的MapPoint
const int np = vbInliers.size();
for(int j=0; j<np; j++)
{
if(vbInliers[j])
{
mCurrentFrame.mvpMapPoints[j]=vvpMapPointMatches[i][j];
sFound.insert(vvpMapPointMatches[i][j]);
}
else
mCurrentFrame.mvpMapPoints[j]=NULL;
}
//进行优化
int nGood = Optimizer::PoseOptimization(&mCurrentFrame);
if(nGood<10)
continue;
for(int io =0; io<mCurrentFrame.N; io++)
if(mCurrentFrame.mvbOutlier[io])
mCurrentFrame.mvpMapPoints[io]=static_cast<MapPoint*>(NULL);
// If few inliers, search by projection in a coarse window and optimize again
//如果inliers太少了,在一个更粗的window进行搜索,并再次优化
if(nGood<50)
{
//再次进行ORB匹配
int nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,10,100);
if(nadditional+nGood>=50)
{
//再次进行优化
nGood = Optimizer::PoseOptimization(&mCurrentFrame);
// If many inliers but still not enough, search by projection again in a narrower window
// the camera has been already optimized with many points
//如果一些inlier,但是还是不够,那么在一个狭窄的窗口内再次进行搜索,再次优化
if(nGood>30 && nGood<50)
{
sFound.clear();
for(int ip =0; ip<mCurrentFrame.N; ip++)
if(mCurrentFrame.mvpMapPoints[ip])
sFound.insert(mCurrentFrame.mvpMapPoints[ip]);
nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,3,64);
// Final optimization
if(nGood+nadditional>=50)
{
nGood = Optimizer::PoseOptimization(&mCurrentFrame);
for(int io =0; io<mCurrentFrame.N; io++)
if(mCurrentFrame.mvbOutlier[io])
mCurrentFrame.mvpMapPoints[io]=NULL;
}
}
}
}
// If the pose is supported by enough inliers stop ransacs and continue
//匹配成功,退出
if(nGood>=50)
{
bMatch = true;
break;
}
}
} //for
}//while
if(!bMatch)
{
return false;
}
else
{
//记录这次的重定位帧id
mnLastRelocFrameId = mCurrentFrame.mnId;
return true;
}
}
void Tracking::Reset()
{
cout << "System Reseting" << endl;
if(mpViewer)
{
mpViewer->RequestStop();
while(!mpViewer->isStopped())
usleep(3000);
}
// Reset Local Mapping
cout << "Reseting Local Mapper...";
mpLocalMapper->RequestReset();
cout << " done" << endl;
// Reset Loop Closing
cout << "Reseting Loop Closing...";
mpLoopClosing->RequestReset();
cout << " done" << endl;
// Clear BoW Database
cout << "Reseting Database...";
mpKeyFrameDB->clear();
cout << " done" << endl;
// Clear Map (this erase MapPoints and KeyFrames)
mpMap->clear();
KeyFrame::nNextId = 0;
Frame::nNextId = 0;
mState = NO_IMAGES_YET;
if(mpInitializer)
{
delete mpInitializer;
mpInitializer = static_cast<Initializer*>(NULL);
}
mlRelativeFramePoses.clear();
mlpReferences.clear();
mlFrameTimes.clear();
mlbLost.clear();
if(mpViewer)
mpViewer->Release();
}
void Tracking::ChangeCalibration(const string &strSettingPath)
{
cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);
float fx = fSettings["Camera.fx"];
float fy = fSettings["Camera.fy"];
float cx = fSettings["Camera.cx"];
float cy = fSettings["Camera.cy"];
cv::Mat K = cv::Mat::eye(3,3,CV_32F);
K.at<float>(0,0) = fx;
K.at<float>(1,1) = fy;
K.at<float>(0,2) = cx;
K.at<float>(1,2) = cy;
K.copyTo(mK);
cv::Mat DistCoef(4,1,CV_32F);
DistCoef.at<float>(0) = fSettings["Camera.k1"];
DistCoef.at<float>(1) = fSettings["Camera.k2"];
DistCoef.at<float>(2) = fSettings["Camera.p1"];
DistCoef.at<float>(3) = fSettings["Camera.p2"];
const float k3 = fSettings["Camera.k3"];
if(k3!=0)
{
DistCoef.resize(5);
DistCoef.at<float>(4) = k3;
}
DistCoef.copyTo(mDistCoef);
mbf = fSettings["Camera.bf"];
Frame::mbInitialComputations = true;
}
void Tracking::InformOnlyTracking(const bool &flag)
{
mbOnlyTracking = flag;
}
} //namespace ORB_SLAM