但通常会把顶点和边的类的定义放在初始化之前。
自带的顶点和边:
结点1:相机位姿结点:g2o::VertexSE3Expmap,来自
结点2:特征点空间坐标结点:g2o::VertexSBAPointXYZ,来自
边:重投影误差:g2o::EdgeProjectXYZ2UV,来自
virtual bool read(std::istream& is);
virtual bool write(std::ostream& os) const;
virtual void oplusImpl(const number_t* update);
virtual void setToOriginImpl();
其中read,write函数可以不进行覆写,仅仅声明一下就可以,setToOriginImpl设定被优化变量的原始值,oplusImpl比较重要,我们根据增量方程计算出增量之后,就是通过这个函数对估计值进行调整的,因此这个函数的内容一定要写对,否则会造成一直优化却得不到好的优化结果的现象。
virtual bool read(std::istream& is);
virtual bool write(std::ostream& os) const;
virtual void computeError();
virtual void linearizeOplus();
read和write函数同上,computeError函数是使用当前顶点的值计算的测量值与真实的测量值之间的误差,linearizeOplus函数是在当前顶点的值下,该误差对优化变量的偏导数,即jacobian。
注意:如果没有定义边的linearizeOplus函数,就会调用数值求导,运算比较慢。
以下是半自写的十四讲中第七讲的pose_estimation_3d2d:
//
// Created by y on 20-4-14.
//
#include
#include
#include //特征点提取、匹配
#include
#include
#include
#include //顶点
#include //一元边
#include //稀疏优化器
#include //块求解器
#include
#include //GN下降方式
#include
#include
using namespace std;
using namespace cv;
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;
void Feature_find_matches(const Mat &img_1,const Mat &img_2,
vector<KeyPoint> &keypoints_1,
vector<KeyPoint>&keypoints_2,
vector<DMatch> &matches);
//从深度图中获取3坐标以及第二帧下特征点的像素坐标
vector<Point3f> Attain_3D(const Mat &img_depth,const Mat &k,vector<DMatch> matches,
vector<KeyPoint>kp1,vector<KeyPoint>kp2,
vector<Point3f> &pts_3d,vector<Point2f> &pts_2d
);
void bundleAdjustmentG2O(
const VecVector3d &points_3d,
const VecVector2d &points_2d,
const Mat &K,
Sophus::SE3d &pose) ;
int main(int argc, char **argv){
Mat img1=imread("/home/y/slambook2/ch7/1.png",CV_LOAD_IMAGE_COLOR);
Mat img2=imread("/home/y/slambook2/ch7/2.png",CV_LOAD_IMAGE_COLOR);
Mat img1_depth=imread("/home/y/slambook2/ch7/1_depth.png",CV_LOAD_IMAGE_UNCHANGED);
if(!img1.data||!img2.data||!img1_depth.data){
cout<<"image faile"<<endl;
return -1;
}
//初始化
vector<KeyPoint> keypoint1,keypoint2;
vector<DMatch> matches;
Feature_find_matches(img1,img2,keypoint1,keypoint2,matches);
cout<<"-----Total keypoint1 number: "<<keypoint1.size()<<endl;
cout<<"-----Total keypoint2 number: "<<keypoint1.size()<<endl;
cout<<"-----Total matches number: "<<matches.size()<<endl;
//相机内参
Mat K=(Mat_<double>(3,3)<<520.9,0,325.1,0,521.0,249.7,0,0,1);
vector<Point3f> pts_3d;
vector<Point2f> pts_2d;
//从深度图中获取3坐标以及第二帧下特征点的像素坐标
Attain_3D(img1_depth,K, matches,keypoint1,keypoint2,pts_3d,pts_2d);
cout<<"3d-2d pairs: "<<pts_3d.size()<<endl;
VecVector3d pts_3d_eigen;
VecVector2d pts_2d_eigen;
for (size_t i = 0; i < pts_3d.size(); ++i) {
pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z));
pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y));
}
Sophus::SE3d pose;
bundleAdjustmentG2O(pts_3d_eigen,pts_2d_eigen,K,pose);
return 0;
}
void Feature_find_matches(const Mat &img_1,const Mat &img_2,
vector<KeyPoint> &keypoints_1,
vector<KeyPoint> &keypoints_2,
vector<DMatch> &matches) {
Mat des1, des2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
descriptor->compute(img_1, keypoints_1, des1);
descriptor->compute(img_2, keypoints_2, des2);
vector<DMatch> match;
matcher->match(des1, des2, match);
//特征点的筛选
double min_dist = 10000, max_dist = 0;
for (int i = 0; i < des1.rows; ++i) {
double dist = match[i].distance;
if (dist > max_dist) max_dist = dist;
if (dist < min_dist) min_dist = dist;
}
cout << "----Max distence: " << max_dist << endl;
cout << "----Min distence: " << min_dist << endl;
for (int i = 0; i < des1.rows; ++i) {
if (match[i].distance <= max(min_dist * 2, 30.0)) {
matches.push_back(match[i]);
}
}
}
//这里相对于十四讲可能有些麻烦了,十四讲里这里没有写成一个函数
vector<Point3f> Attain_3D(const Mat &img_depth,const Mat &K,vector<DMatch> matches,
vector<KeyPoint>kp1,vector<KeyPoint>kp2,
vector<Point3f>&pts_3d, vector<Point2f> &pts_2d)
{
for(DMatch m:matches)
{
//这一步是取出深度图中,特征点坐在位置的深度值
//m.queryIdx表示第m个匹配中,第一幅图中这个特征点索引,“.pt”是其对应的像素坐标
ushort d=img_depth.ptr<unsigned short>(int(kp1[m.queryIdx].pt.y))[int(kp1[m.queryIdx].pt.x)];
if(d==0) continue;
//这5000是相机的一个尺度
double dd=d/5000;
//X=(u-cx)*Z/fx;Y=(v-cy)*Z/fy
pts_3d.push_back(Point3d(((kp1[m.queryIdx].pt.x-K.at<float>(0,2))*dd)/K.at<double>(0,0),
((kp1[m.queryIdx].pt.y-K.at<double>(1,2))*dd)/K.at<double>(1,1),dd));
//m.trainIdx表示第m对匹配中第二幅图的特征点索引
pts_2d.push_back(kp2[m.trainIdx].pt);
}
}
//定义顶点 6表示优化变量在流形空间的最小维度;Sophus::SE3d表示变量的维度
class VertexPose:public g2o::BaseVertex<6,Sophus::SE3d>{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW //该宏表示指针对齐
//设置初始值
virtual void setToOriginImpl() override {
_estimate=Sophus::SE3d();
}
//根据增量方程计算出增量之后,就是通过这个函数对估计值进行调整的
virtual void oplusImpl(const double *update) override {
Eigen::Matrix<double,6,1> updata_eigen;
updata_eigen<<update[0], update[1], update[2], update[3], update[4], update[5];
_estimate=Sophus::SE3d::exp(updata_eigen)*_estimate;
}
//读盘和存盘 置空
virtual bool read(istream &in) override {}
virtual bool write(ostream &out)const override {}
};
//定义边,表示投影误差
class EdgeProjection: public g2o::BaseUnaryEdge<2,Eigen::Vector2d,VertexPose>{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeProjection(const Eigen::Vector3d &pos,const Eigen::Matrix3d &K): _pos3d(pos),_K(K){}
virtual void computeError() override {
const VertexPose *v= static_cast<VertexPose *>(_vertices[0]);
Sophus::SE3d T=v->estimate();
Eigen::Vector3d pos_pixel=_K*(T*_pos3d);
pos_pixel/=pos_pixel[2];
_error=_measurement-pos_pixel.head<2>();
}
virtual void linearizeOplus() override {
const VertexPose *v = static_cast<VertexPose *> (_vertices[0]);
Sophus::SE3d T = v->estimate();
Eigen::Vector3d pos_cam=T*_pos3d;
double fx=_K(0,0);
double fy=_K(1,1);
double cx=_K(0,2);
double cy=_K(1,2);
double X=pos_cam[0];
double Y=pos_cam[1];
double Z=pos_cam[2];
double Z2=Z*Z;
_jacobianOplusXi<< -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;
}
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {} //const 不能忽略
private:
Eigen::Vector3d _pos3d;
Eigen::Matrix3d _K;
};
void bundleAdjustmentG2O(
const VecVector3d &points_3d,
const VecVector2d &points_2d,
const Mat &K,
Sophus::SE3d &pose)
{
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,3>> BlockSolverType;
typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType;
auto solver=new g2o::OptimizationAlgorithmGaussNewton(
g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
//建立优化器
g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm(solver);
// 打开调试输出
optimizer.setVerbose(true);
//加顶点
VertexPose *v=new VertexPose();
v->setId(0); //Vertex独一无二的ID值,edge通过这个ID来与其建立联系
v->setEstimate(Sophus::SE3d());
optimizer.addVertex(v);
// K
Eigen::Matrix3d K_eigen;
K_eigen <<
K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);
int index = 1;
for (size_t i = 0; i < points_2d.size(); ++i) {
auto p2d = points_2d[i];
auto p3d = points_3d[i];
EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);
edge->setId(index);
//第一个参数指定上一步添加Vertex时的索引,这样edge就可以通过这个索引值和Vertex联系起来了
edge->setVertex(0, v); //设置连接的顶点
edge->setMeasurement(p2d); // 观测数值
edge->setInformation(Eigen::Matrix2d::Identity()); // 信息矩阵:协方差矩阵之逆
optimizer.addEdge(edge); //添加边
index++;
}
optimizer.setVerbose(true);
// 执行优化前的初始化
optimizer.initializeOptimization();
optimizer.optimize(10); ////开始执行优化,并设置迭代次数
cout << "pose estimated by g2o =\n" << v->estimate().matrix() << endl;
pose=v->estimate();
}
参考: