VSLAM的前端又称为视觉里程计,它通过多视图几何的方法对相机的位姿进行估计。通过我们之前的阅读以及《十四讲》的学习,可以发现帧和帧之间精准的像素匹配是极为重要的,而稀疏的特征点法也是VSLAM实时运行的基础,因此我们从ORB特征匹配开始,对前端进行学习。当然,我们还是从下面这个函数着手进入。
cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp)
{
mImGray = im;
if(mImGray.channels()==3)
{
if(mbRGB)
cvtColor(mImGray,mImGray,CV_RGB2GRAY);
else
cvtColor(mImGray,mImGray,CV_BGR2GRAY);
}
else if(mImGray.channels()==4)
{
if(mbRGB)
cvtColor(mImGray,mImGray,CV_RGBA2GRAY);
else
cvtColor(mImGray,mImGray,CV_BGRA2GRAY);
}
if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET)
mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);//在frame的构造函数中,当前帧获取了ORB特征点
else
mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth);
Track();
return mCurrentFrame.mTcw.clone();
}
一、ORB特征匹配
正确的特征匹配是本篇的前提。ORB特征全称Oriented FAST and Rotated BRIEF特征,它包含了fast特征点的尺度不变性和brief特征的旋转不变性。当然,本篇的重点是相机的初始化定位,为了避免被大段的特征提取占据,我决定后面再专门学习ORB特征提取。在本文中就先假设我们如同调用了opencv接口一般地使用了它。
二、相机初始化
在相机最开始的若干帧,需要对相机的尺度进行初始化,以及建立一个最初始的稀疏点云地图。
void Tracking::MonocularInitialization()
{
if(!mpInitializer)
{
// Set Reference Frame
//当前帧需要具有100个以上的特征点才能进行进一步的初始化,mvKeys在Frame的构造函数中便由orbextractor提取到了
if(mCurrentFrame.mvKeys.size()>100)
{
mInitialFrame = Frame(mCurrentFrame);
mLastFrame = Frame(mCurrentFrame);
mvbPrevMatched.resize(mCurrentFrame.mvKeysUn.size());
for(size_t i=0; i(NULL);
fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
return;
}
// Find correspondences
//用matcher进行匹配,查看匹配对的数量
ORBmatcher matcher(0.9,true);
int nmatches = matcher.SearchForInitialization(mInitialFrame,mCurrentFrame,mvbPrevMatched,mvIniMatches,100);
// Check if there are enough correspondences
if(nmatches<100)
{
delete mpInitializer;
mpInitializer = static_cast(NULL);
return;
}
//恢复的相对相机位姿已准备好到initialize中计算
cv::Mat Rcw; // Current Camera Rotation
cv::Mat tcw; // Current Camera Translation
vector vbTriangulated; // Triangulated Correspondences (mvIniMatches)
if(mpInitializer->Initialize(mCurrentFrame, mvIniMatches, Rcw, tcw, mvIniP3D, vbTriangulated))
{
for(size_t i=0, iend=mvIniMatches.size(); i=0 && !vbTriangulated[i])
{
mvIniMatches[i]=-1;
nmatches--;
}
}
// Set Frame Poses
mInitialFrame.SetPose(cv::Mat::eye(4,4,CV_32F));
cv::Mat Tcw = cv::Mat::eye(4,4,CV_32F);
Rcw.copyTo(Tcw.rowRange(0,3).colRange(0,3));
tcw.copyTo(Tcw.rowRange(0,3).col(3));
mCurrentFrame.SetPose(Tcw);
CreateInitialMapMonocular();
}
}
}
我们直接进入对两帧进行正式的位姿计算部分,由于纯旋转或特征点共面的问题,基础矩阵的自由度会下降导致噪声对求解的影响较大,而单应矩阵对这种问题不敏感,因此分别计算基础矩阵和单应矩阵,从中选一个表现好的作为位姿分解R、t的依据。
bool Initializer::Initialize(const Frame &CurrentFrame, const vector &vMatches12, cv::Mat &R21, cv::Mat &t21,
vector &vP3D, vector &vbTriangulated)
{
// Fill structures with current keypoints and matches with reference frame
// Reference Frame: 1, Current Frame: 2
mvKeys2 = CurrentFrame.mvKeysUn;
mvMatches12.clear();
mvMatches12.reserve(mvKeys2.size());
mvbMatched1.resize(mvKeys1.size());
for(size_t i=0, iend=vMatches12.size();i=0)
{
mvMatches12.push_back(make_pair(i,vMatches12[i]));
mvbMatched1[i]=true;
}
else
mvbMatched1[i]=false;
}
const int N = mvMatches12.size();
// Indices for minimum set selection
vector vAllIndices;
vAllIndices.reserve(N);
vector vAvailableIndices;
for(int i=0; i >(mMaxIterations,vector(8,0));
DUtils::Random::SeedRandOnce(0);
for(int it=0; it vbMatchesInliersH, vbMatchesInliersF;
float SH, SF;
cv::Mat H, F;
//由于不确定相机照射到的是否是平面,因此同时计算单应矩阵和基础矩阵
//其评分的依据是将第一帧的特征点像素通过矩阵转换到第二帧,与第二帧对应的特征点重合的数量
thread threadH(&Initializer::FindHomography,this,ref(vbMatchesInliersH), ref(SH), ref(H));
thread threadF(&Initializer::FindFundamental,this,ref(vbMatchesInliersF), ref(SF), ref(F));
// Wait until both threads have finished
threadH.join();
threadF.join();
// Compute ratio of scores
float RH = SH/(SH+SF);
// Try to reconstruct from homography or fundamental depending on the ratio (0.40-0.45)
if(RH>0.40)//选择单应矩阵的形式
return ReconstructH(vbMatchesInliersH,H,mK,R21,t21,vP3D,vbTriangulated,1.0,50);
else //if(pF_HF>0.6)
return ReconstructF(vbMatchesInliersF,F,mK,R21,t21,vP3D,vbTriangulated,1.0,50);
//如果都没有好的数值就返回下一帧重新计算
return false;
}
接下来我们分别学习单应矩阵和基础矩阵的情况下求解两帧的位姿变换。
1.用迭代的方式计算单应矩阵:
void Initializer::FindHomography(vector &vbMatchesInliers, float &score, cv::Mat &H21)
{
// Number of putative matches
const int N = mvMatches12.size();
// Normalize coordinates
vector vPn1, vPn2;
cv::Mat T1, T2;
//归一化,先求特征点像素的质心,为的是使单应矩阵只有8个未知数,第九个数为0
Normalize(mvKeys1,vPn1, T1);
Normalize(mvKeys2,vPn2, T2);
cv::Mat T2inv = T2.inv();
// Best Results variables
score = 0.0;
vbMatchesInliers = vector(N,false);
// Iteration variables
vector vPn1i(8);
vector vPn2i(8);
cv::Mat H21i, H12i;
vector vbCurrentInliers(N,false);
float currentScore;
// Perform all RANSAC iterations and save the solution with highest score
//使用类似ransac的方法,先取8对点计算单应矩阵,
for(int it=0; itscore)
{
H21 = H21i.clone();
vbMatchesInliers = vbCurrentInliers;
score = currentScore;
}
}
}
计算的过程和书本上一样,因为这里是只取了8个点,关于矩阵的方程是正定的,直接求代数解即可。
cv::Mat Initializer::ComputeH21(const vector &vP1, const vector &vP2)
{
const int N = vP1.size();
cv::Mat A(2*N,9,CV_32F);
//单应矩阵是一个8自由度的3*3矩阵,元素分别是h1~h8,第九个元素是1或0
//参照十四讲中的求解过程,其实非常简单,一对特征点构造一对关于h1~8的方程,将四对方程写成矩阵形式,即得到A,未知数是h1~h8,当h9为1,右侧已知数是四对u、v,当h9为0,右侧为0矩阵
for(int i=0; i(2*i,0) = 0.0;
A.at(2*i,1) = 0.0;
A.at(2*i,2) = 0.0;
A.at(2*i,3) = -u1;
A.at(2*i,4) = -v1;
A.at(2*i,5) = -1;
A.at(2*i,6) = v2*u1;
A.at(2*i,7) = v2*v1;
A.at(2*i,8) = v2;
A.at(2*i+1,0) = u1;
A.at(2*i+1,1) = v1;
A.at(2*i+1,2) = 1;
A.at(2*i+1,3) = 0.0;
A.at(2*i+1,4) = 0.0;
A.at(2*i+1,5) = 0.0;
A.at(2*i+1,6) = -u2*u1;
A.at(2*i+1,7) = -u2*v1;
A.at(2*i+1,8) = -u2;
}
cv::Mat u,w,vt;
//对A进行SVD分解得到上面三矩阵,其中u和vt是正交矩阵,w是对角矩阵(x1,x2,0),对角线上的元素是奇异值
cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
//将最小的奇异值所对应的vt矩阵的向量进行重新构造,reshape的意义是在不改变矩阵元素数量的前提下重构一个3*3的矩阵,即本次求得的单应矩阵
return vt.row(8).reshape(0, 3);
}
求得单应矩阵之后,将矩阵投影回去,计算匹配得分。
float Initializer::CheckHomography(const cv::Mat &H21, const cv::Mat &H12, vector &vbMatchesInliers, float sigma)
{
const int N = mvMatches12.size();
const float h11 = H21.at(0,0);
const float h12 = H21.at(0,1);
const float h13 = H21.at(0,2);
const float h21 = H21.at(1,0);
const float h22 = H21.at(1,1);
const float h23 = H21.at(1,2);
const float h31 = H21.at(2,0);
const float h32 = H21.at(2,1);
const float h33 = H21.at(2,2);
const float h11inv = H12.at(0,0);
const float h12inv = H12.at(0,1);
const float h13inv = H12.at(0,2);
const float h21inv = H12.at(1,0);
const float h22inv = H12.at(1,1);
const float h23inv = H12.at(1,2);
const float h31inv = H12.at(2,0);
const float h32inv = H12.at(2,1);
const float h33inv = H12.at(2,2);
vbMatchesInliers.resize(N);
float score = 0;
const float th = 5.991;
const float invSigmaSquare = 1.0/(sigma*sigma);
//进行两次投影误差计算,分别是1投到2,2投到1,计算误差并平均
for(int i=0; ith)
bIn = false;
else
score += th - chiSquare1;
// Reprojection error in second image
// x1in2 = H21*x1
//直接计算反投影的像素值
const float w1in2inv = 1.0/(h31*u1+h32*v1+h33);
const float u1in2 = (h11*u1+h12*v1+h13)*w1in2inv;
const float v1in2 = (h21*u1+h22*v1+h23)*w1in2inv;
const float squareDist2 = (u2-u1in2)*(u2-u1in2)+(v2-v1in2)*(v2-v1in2);
const float chiSquare2 = squareDist2*invSigmaSquare;
if(chiSquare2>th)
bIn = false;
else
score += th - chiSquare2;
if(bIn)
vbMatchesInliers[i]=true;
else
vbMatchesInliers[i]=false;
}
return score;
}
对单应矩阵进行分解得到R与t,我们知道一个单应矩阵可以分解得到多组解,也会存在一组都不满足的解,这时候我们本次初始化便宣告失败。
bool Initializer::ReconstructH(vector &vbMatchesInliers, cv::Mat &H21, cv::Mat &K,
cv::Mat &R21, cv::Mat &t21, vector &vP3D, vector &vbTriangulated, float minParallax, int minTriangulated)
{
int N=0;
for(size_t i=0, iend = vbMatchesInliers.size() ; i(0);
float d2 = w.at(1);
float d3 = w.at(2);
if(d1/d2<1.00001 || d2/d3<1.00001)
{
return false;
}
vector vR, vt, vn;
vR.reserve(8);
vt.reserve(8);
vn.reserve(8);
//n'=[x1 0 x3] 4 posibilities e1=e3=1, e1=1 e3=-1, e1=-1 e3=1, e1=e3=-1
float aux1 = sqrt((d1*d1-d2*d2)/(d1*d1-d3*d3));
float aux3 = sqrt((d2*d2-d3*d3)/(d1*d1-d3*d3));
float x1[] = {aux1,aux1,-aux1,-aux1};
float x3[] = {aux3,-aux3,aux3,-aux3};
//case d'=d2
float aux_stheta = sqrt((d1*d1-d2*d2)*(d2*d2-d3*d3))/((d1+d3)*d2);
float ctheta = (d2*d2+d1*d3)/((d1+d3)*d2);
float stheta[] = {aux_stheta, -aux_stheta, -aux_stheta, aux_stheta};
for(int i=0; i<4; i++)
{
cv::Mat Rp=cv::Mat::eye(3,3,CV_32F);
Rp.at(0,0)=ctheta;
Rp.at(0,2)=-stheta[i];
Rp.at(2,0)=stheta[i];
Rp.at(2,2)=ctheta;
cv::Mat R = s*U*Rp*Vt;
vR.push_back(R);
cv::Mat tp(3,1,CV_32F);
tp.at(0)=x1[i];
tp.at(1)=0;
tp.at(2)=-x3[i];
tp*=d1-d3;
cv::Mat t = U*tp;
vt.push_back(t/cv::norm(t));
cv::Mat np(3,1,CV_32F);
np.at(0)=x1[i];
np.at(1)=0;
np.at(2)=x3[i];
cv::Mat n = V*np;
if(n.at(2)<0)
n=-n;
vn.push_back(n);
}
//case d'=-d2
float aux_sphi = sqrt((d1*d1-d2*d2)*(d2*d2-d3*d3))/((d1-d3)*d2);
float cphi = (d1*d3-d2*d2)/((d1-d3)*d2);
float sphi[] = {aux_sphi, -aux_sphi, -aux_sphi, aux_sphi};
for(int i=0; i<4; i++)
{
cv::Mat Rp=cv::Mat::eye(3,3,CV_32F);
Rp.at(0,0)=cphi;
Rp.at(0,2)=sphi[i];
Rp.at(1,1)=-1;
Rp.at(2,0)=sphi[i];
Rp.at(2,2)=-cphi;
cv::Mat R = s*U*Rp*Vt;
vR.push_back(R);
cv::Mat tp(3,1,CV_32F);
tp.at(0)=x1[i];
tp.at(1)=0;
tp.at(2)=x3[i];
tp*=d1+d3;
cv::Mat t = U*tp;
vt.push_back(t/cv::norm(t));
cv::Mat np(3,1,CV_32F);
np.at(0)=x1[i];
np.at(1)=0;
np.at(2)=x3[i];
cv::Mat n = V*np;
if(n.at(2)<0)
n=-n;
vn.push_back(n);
}
int bestGood = 0;
int secondBestGood = 0;
int bestSolutionIdx = -1;
float bestParallax = -1;
vector bestP3D;
vector bestTriangulated;
// Instead of applying the visibility constraints proposed in the Faugeras' paper (which could fail for points seen with low parallax)
// We reconstruct all hypotheses and check in terms of triangulated points and parallax
for(size_t i=0; i<8; i++)
{
float parallaxi;
vector vP3Di;
vector vbTriangulatedi;
int nGood = CheckRT(vR[i],vt[i],mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K,vP3Di, 4.0*mSigma2, vbTriangulatedi, parallaxi);
if(nGood>bestGood)
{
secondBestGood = bestGood;
bestGood = nGood;
bestSolutionIdx = i;
bestParallax = parallaxi;
bestP3D = vP3Di;
bestTriangulated = vbTriangulatedi;
}
else if(nGood>secondBestGood)
{
secondBestGood = nGood;
}
}
if(secondBestGood<0.75*bestGood && bestParallax>=minParallax && bestGood>minTriangulated && bestGood>0.9*N)
{
vR[bestSolutionIdx].copyTo(R21);
vt[bestSolutionIdx].copyTo(t21);
vP3D = bestP3D;
vbTriangulated = bestTriangulated;
return true;
}
return false;
}
检查求出的R和t是否使特征点在相机前方,并且使得视差足够小。视差如果过大,则匹配点的对应关系可能差错;视差太小则有可能使解受噪声的影响。
int Initializer::CheckRT(const cv::Mat &R, const cv::Mat &t, const vector &vKeys1, const vector &vKeys2,
const vector &vMatches12, vector &vbMatchesInliers,
const cv::Mat &K, vector &vP3D, float th2, vector &vbGood, float ¶llax)
{
// Calibration parameters
const float fx = K.at(0,0);
const float fy = K.at(1,1);
const float cx = K.at(0,2);
const float cy = K.at(1,2);
vbGood = vector(vKeys1.size(),false);
vP3D.resize(vKeys1.size());
vector vCosParallax;
vCosParallax.reserve(vKeys1.size());
// Camera 1 Projection Matrix K[I|0]
cv::Mat P1(3,4,CV_32F,cv::Scalar(0));
K.copyTo(P1.rowRange(0,3).colRange(0,3));//第一帧的位姿是固定为0的
cv::Mat O1 = cv::Mat::zeros(3,1,CV_32F);
// Camera 2 Projection Matrix K[R|t]
cv::Mat P2(3,4,CV_32F);
R.copyTo(P2.rowRange(0,3).colRange(0,3));
t.copyTo(P2.rowRange(0,3).col(3));//利用本次R与t建立4*4变换矩阵,是第二帧相对于第一帧的位姿
P2 = K*P2;
cv::Mat O2 = -R.t()*t;
int nGood=0;
for(size_t i=0, iend=vMatches12.size();i(0)) || !isfinite(p3dC1.at(1)) || !isfinite(p3dC1.at(2)))
{
vbGood[vMatches12[i].first]=false;
continue;
}
// Check parallax
//检查视差,视差的标准是两帧中特征点所变换的角度不能太大
//求解二范数的操作其实和求向量长度一样
cv::Mat normal1 = p3dC1 - O1;
float dist1 = cv::norm(normal1);
cv::Mat normal2 = p3dC1 - O2;
float dist2 = cv::norm(normal2);
//求两组向量的夹角
float cosParallax = normal1.dot(normal2)/(dist1*dist2);
// Check depth in front of first camera (only if enough parallax, as "infinite" points can easily go to negative depth)
if(p3dC1.at(2)<=0 && cosParallax<0.99998)//计算的三维点在相机背后,错误,而且向量夹角大也放弃
continue;
// Check depth in front of second camera (only if enough parallax, as "infinite" points can easily go to negative depth)
cv::Mat p3dC2 = R*p3dC1+t;
if(p3dC2.at(2)<=0 && cosParallax<0.99998)
continue;
// Check reprojection error in first image
float im1x, im1y;
float invZ1 = 1.0/p3dC1.at(2);
im1x = fx*p3dC1.at(0)*invZ1+cx;
im1y = fy*p3dC1.at(1)*invZ1+cy;
float squareError1 = (im1x-kp1.pt.x)*(im1x-kp1.pt.x)+(im1y-kp1.pt.y)*(im1y-kp1.pt.y);
if(squareError1>th2)
continue;
// Check reprojection error in second image
//求解重投影误差,将第二帧坐标系下的三维点投影求得坐标,并与观察值相减
float im2x, im2y;
float invZ2 = 1.0/p3dC2.at(2);
im2x = fx*p3dC2.at(0)*invZ2+cx;
im2y = fy*p3dC2.at(1)*invZ2+cy;
float squareError2 = (im2x-kp2.pt.x)*(im2x-kp2.pt.x)+(im2y-kp2.pt.y)*(im2y-kp2.pt.y);
if(squareError2>th2)
continue;
vCosParallax.push_back(cosParallax);
vP3D[vMatches12[i].first] = cv::Point3f(p3dC1.at(0),p3dC1.at(1),p3dC1.at(2));
nGood++;
if(cosParallax<0.99998)
vbGood[vMatches12[i].first]=true;
}
if(nGood>0)
{
sort(vCosParallax.begin(),vCosParallax.end());
size_t idx = min(50,int(vCosParallax.size()-1));
parallax = acos(vCosParallax[idx])*180/CV_PI;
}
else
parallax=0;
return nGood;
}
Triangulate函数的意图在于利用匹配好的特征点kp1、kp2以及假定的位姿变换P1、P2构造一个特征点的三维坐标,再利用这个三维坐标判断位姿变换的正确性。在三角化的过程中,我们需要依照两幅图像的光心与三维点构建一个类似于对极约束的叉乘方程,再求最小误差解。
void Initializer::Triangulate(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &P1, const cv::Mat &P2, cv::Mat &x3D)
{
cv::Mat A(4,4,CV_32F);
//构建方程的原则是kp1×P1*x3D=0,kp2×P2*x3D=0,而叉乘相当于乘反对称矩阵
//而P1是单位矩阵,意味着第一帧是世界坐标系,而P2是本次位姿变换,是变换后的相机坐标系,相对有一个负号
A.row(0) = kp1.pt.x*P1.row(2)-P1.row(0);
A.row(1) = kp1.pt.y*P1.row(2)-P1.row(1);
A.row(2) = kp2.pt.x*P2.row(2)-P2.row(0);
A.row(3) = kp2.pt.y*P2.row(2)-P2.row(1);
cv::Mat u,w,vt;
cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
x3D = vt.row(3).t();//奇异值最小的奇异向量就是所求的三维坐标
x3D = x3D.rowRange(0,3)/x3D.at(3);
}
最终我们得到本次初始化的最优的R、t,并且也计算出单应矩阵的得分,等待和基础矩阵的得分相比较。
2.在求解单应矩阵的同时,我们也用了对极约束的方法,计算匹配的得分以及基础矩阵,并分解出位姿变换:
void Initializer::FindFundamental(vector &vbMatchesInliers, float &score, cv::Mat &F21)
{
// Number of putative matches
const int N = vbMatchesInliers.size();
// Normalize coordinates
vector vPn1, vPn2;
cv::Mat T1, T2;
Normalize(mvKeys1,vPn1, T1);
Normalize(mvKeys2,vPn2, T2);
cv::Mat T2t = T2.t();
// Best Results variables
score = 0.0;
vbMatchesInliers = vector(N,false);
// Iteration variables
vector vPn1i(8);
vector vPn2i(8);
cv::Mat F21i;
vector vbCurrentInliers(N,false);
float currentScore;
// Perform all RANSAC iterations and save the solution with highest score
for(int it=0; itscore)
{
F21 = F21i.clone();
vbMatchesInliers = vbCurrentInliers;
score = currentScore;
}
}
}
在基础矩阵的过程中,也是用反投影的方法计算评分,相对应地,用基础矩阵恢复相机运动,将F分解为R、t两个矩阵。
cv::Mat Initializer::ComputeF21(const vector &vP1,const vector &vP2)
{
const int N = vP1.size();
cv::Mat A(N,9,CV_32F);
for(int i=0; i(i,0) = u2*u1;
A.at(i,1) = u2*v1;
A.at(i,2) = u2;
A.at(i,3) = v2*u1;
A.at(i,4) = v2*v1;
A.at(i,5) = v2;
A.at(i,6) = u1;
A.at(i,7) = v1;
A.at(i,8) = 1;
}
cv::Mat u,w,vt;
cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
cv::Mat Fpre = vt.row(8).reshape(0, 3);
cv::SVDecomp(Fpre,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
w.at(2)=0;
return u*cv::Mat::diag(w)*vt;
}
对计算得到的基础矩阵分解旋转和平移后,再利用匹配的特征点进行验算,分解的过程还是通过svd分解,同时利用了一些变换矩阵的性质。
void Initializer::DecomposeE(const cv::Mat &E, cv::Mat &R1, cv::Mat &R2, cv::Mat &t)
{
cv::Mat u,w,vt;
cv::SVD::compute(E,w,u,vt);//E=U*w*Vt
u.col(2).copyTo(t);//将U最右边的列赋予t
t=t/cv::norm(t);
cv::Mat W(3,3,CV_32F,cv::Scalar(0));
W.at(0,1)=-1;
W.at(1,0)=1;
W.at(2,2)=1;//W是绕z轴旋转90°的旋转矩阵
R1 = u*W*vt;//R=U*W*Vt,当然W可以朝两个方向旋转90°,因此R有正负之分
if(cv::determinant(R1)<0)
R1=-R1;
R2 = u*W.t()*vt;
if(cv::determinant(R2)<0)
R2=-R2;
}
至此完成了初始化后的前两帧位姿计算。
2.地图建立
当认可了初始化的结果后,将刚才初始化的第一帧作为第一帧关键帧,并将它的位姿设定为原点处,第二帧作为第二个关键帧,便建立了最初的稀疏地图,今后的地图都是在最初的地图上延伸拓展的,并且今后的位姿也是依靠最初的地图得以估计。
void Tracking::CreateInitialMapMonocular()
{
// Create KeyFrames
KeyFrame* pKFini = new KeyFrame(mInitialFrame,mpMap,mpKeyFrameDB);
KeyFrame* pKFcur = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);
//每个关键帧需要持有自身的词袋特征以便直接匹配
pKFini->ComputeBoW();
pKFcur->ComputeBoW();
// Insert KFs in the map
mpMap->AddKeyFrame(pKFini);
mpMap->AddKeyFrame(pKFcur);//均加入到地图持有者中,mpmap持有所有关键帧的信息
// Create MapPoints and asscoiate to keyframes
for(size_t i=0; iAddMapPoint(pMP,i);
pKFcur->AddMapPoint(pMP,mvIniMatches[i]);
//对地图点来说,可能多个帧都能观测到它,记录下帧编号
pMP->AddObservation(pKFini,i);
pMP->AddObservation(pKFcur,mvIniMatches[i]);
pMP->ComputeDistinctiveDescriptors();
pMP->UpdateNormalAndDepth();
//Fill Current Frame structure
mCurrentFrame.mvpMapPoints[mvIniMatches[i]] = pMP;
mCurrentFrame.mvbOutlier[mvIniMatches[i]] = false;
//Add to Map
mpMap->AddMapPoint(pMP);//地图中获得该地图点
}
// Update Connections
pKFini->UpdateConnections();//更新统计多帧观测到本帧所处理的地图点
pKFcur->UpdateConnections();
// Bundle Adjustment
cout << "New Map created with " << mpMap->MapPointsInMap() << " points" << endl;
//对所有帧进行ba优化,在这里只有最初始的两帧,它的具体实现我们下一部分再详细学习
Optimizer::GlobalBundleAdjustemnt(mpMap,20);
// Set median depth to 1
//计算初始化时各地图点深度的中位数,作为整个地图的所有点的尺度的参考
float medianDepth = pKFini->ComputeSceneMedianDepth(2);
float invMedianDepth = 1.0f/medianDepth;
if(medianDepth<0 || pKFcur->TrackedMapPoints(1)<100)
{
cout << "Wrong initialization, reseting..." << endl;
Reset();
return;
}
// Scale initial baseline
//旋转没有尺度但是平移有尺度,在这对平移的尺度进行“约定”
cv::Mat Tc2w = pKFcur->GetPose();
Tc2w.col(3).rowRange(0,3) = Tc2w.col(3).rowRange(0,3)*invMedianDepth;
pKFcur->SetPose(Tc2w);
// Scale points
vector vpAllMapPoints = pKFini->GetMapPointMatches();
for(size_t iMP=0; iMPSetWorldPos(pMP->GetWorldPos()*invMedianDepth);
}
}
mpLocalMapper->InsertKeyFrame(pKFini);
mpLocalMapper->InsertKeyFrame(pKFcur);
mCurrentFrame.SetPose(pKFcur->GetPose());
mnLastKeyFrameId=mCurrentFrame.mnId;
mpLastKeyFrame = pKFcur;
mvpLocalKeyFrames.push_back(pKFcur);
mvpLocalKeyFrames.push_back(pKFini);
mvpLocalMapPoints=mpMap->GetAllMapPoints();
mpReferenceKF = pKFcur;
mCurrentFrame.mpReferenceKF = pKFcur;
mLastFrame = Frame(mCurrentFrame);
mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
mpMapDrawer->SetCurrentCameraPose(pKFcur->GetPose());
mpMap->mvpKeyFrameOrigins.push_back(pKFini);
mState=OK;
}
如此便完成了算法开始运行时的相机初始化,地图获取了一个初始的尺度,而相机获取到了初始的位姿,至于如何将定位与建图延续下去,是Tracking函数接下来的内容,我们下一节再学习。