纯粹是演示性代码,用于展示BA过程,没有做稀疏矩阵相关的优化
#include
#include
#include
#include
#include
#include
#include "sophus/se3.h"
using namespace std;
using namespace Eigen;
using namespace cv;
typedef vector<Vector3d, Eigen::aligned_allocator<Vector3d>> VecVector3d;
typedef vector<Vector2d, Eigen::aligned_allocator<Vector3d>> VecVector2d;
typedef Matrix<double, 6, 1> Vector6d;
double fx = 520.9, fy = 521.0, cx = 325.1, cy = 249.7;
template<typename T>
Eigen::Matrix<T, 3, 3> RPY2mat(T roll, T pitch, T yaw)
{
Eigen::Matrix<T, 3, 3> m;
T cr = cos(roll);
T sr = sin(roll);
T cp = cos(pitch);
T sp = sin(pitch);
T cy = cos(yaw);
T sy = sin(yaw);
m(0,0) = cy * cp;
m(0,1) = cy * sp * sr - sy * cr;
m(0,2) = cy * sp * cr + sy * sr;
m(1,0) = sy * cp;
m(1,1) = sy * sp * sr + cy * cr;
m(1,2) = sy * sp * cr - cy * sr;
m(2,0) = - sp;
m(2,1) = cp * sr;
m(2,2) = cp * cr;
return m;
}
void getPt3d(VecVector3d& p3d, int size)
{
srand(time(NULL));
for(int i = 0; i < size; i++)
{
double x = rand() % 100 - 100;
double y = rand() % 100 - 100;
double z = rand() % 100 + 500;
p3d.push_back ( Vector3d( x, y, z) );
}
}
void outputPt3d(const VecVector3d & p3ds)
{
for(int i = 0; i < p3ds.size(); i++)
{
std::cout << p3ds[i](0) << " " << p3ds[i](0) << " " << p3ds[i](0) << std::endl;
}
}
void getUV(VecVector2d & p2ds,
VecVector3d& p3d,
Sophus::SE3 & SE3_RT);
int main()
{
// 1 生成位姿
Eigen::Matrix3d R = Eigen::AngleAxisd(M_PI/4, Eigen::Vector3d(0, 0, 0.81)).toRotationMatrix();
//Eigen::Matrix3d R = RPY2mat(0.1, 0.1, 0.0);
Eigen::Vector3d t(1, 0, 0);
// 从R, t 构造 SE3
Sophus::SE3 SE3_RT1(R, t);
Eigen::Matrix3d R2 = Eigen::AngleAxisd(M_PI/4, Eigen::Vector3d(0.81, 0, 1)).toRotationMatrix();
Eigen::Vector3d t2(20, 10, 100);
Sophus::SE3 SE3_RT2(R2, t2);
Eigen::Matrix3d R3 = Eigen::AngleAxisd(M_PI/4, Eigen::Vector3d(0.52, 0, 1)).toRotationMatrix();
Eigen::Vector3d t3(50, 30, 100);
Sophus::SE3 SE3_RT3(R3, t3);
vector<Sophus::SE3> poses = { SE3_RT1, SE3_RT2, SE3_RT3 };
//vector poses = { SE3_RT1};
// 2 生成3D 坐标点
const int POINT_SIZE = 20;
VecVector3d p3d;
getPt3d(p3d, POINT_SIZE);
// 3 生成像素坐标点
vector<VecVector2d> pt2ds(poses.size());
for(int i = 0; i < poses.size(); i++)
{
getUV(pt2ds[i], p3d, poses[i]);
}
//outputPt3d(p3d);
//对坐标点和误差加 噪声!
for(int i = 0; i < POINT_SIZE; i++)
{
p3d[i](0) += 4;//((i % 2) ==0 ? -4 : 4);
}
//Eigen::Matrix3d R4 = Eigen::AngleAxisd(M_PI/4, Eigen::Vector3d(0.01, 0.0, 0.81)).toRotationMatrix();
//Eigen::Matrix3d R4 = RPY2mat(0.15, 0.10, 0.0);
//Eigen::Vector3d t4(1, 0, 0);
//Sophus::SE3 SE3_RT4(R4, t4);
//poses[0] = SE3_RT4;
//outputPt3d(p3d);
//高斯牛顿迭代
int iterations = 10;
// Jij 的维度为 2 * (cols)
// H 的维度为 (cols) * (cols)
// b 的维度为 cols
int cols = poses.size() * 6 + p3d.size() * 3;
std::cout << "cols is " << cols << std::endl;
double lastCost = 0;
double cost = 0;
const int POSE_SIZE = poses.size();
for (int iter = 0; iter < iterations; iter++)
{
MatrixXd H = Eigen::MatrixXd::Zero(cols, cols);
MatrixXd b = Eigen::MatrixXd::Zero(cols, 1);
cost = 0;
for(int i = 0; i < poses.size(); i++)
{
for(int j = 0; j < p3d.size(); j++)
{
// 遍历每一个观测, 每次观测 对 Jij 中的两个块进行赋值
Vector2d p2 = pt2ds[i][j];
Vector3d p3 = p3d[j];
Vector3d P = poses[i] * p3;
double x = P[0];
double y = P[1];
double z = P[2];
Vector2d p2_ = { fx * ( x/z ) + cx, fy * ( y/z ) + cy };
Vector2d e = p2 - p2_; //误差error = 观测值 - 估计值
cost += (e[0]*e[0]+e[1]*e[1]);
MatrixXd J = Eigen::MatrixXd::Zero(2, cols);
// 和位姿相关的块
// 第一个位姿 要固定
if(i != 0)
{
J(0,6*i + 0) = -(fx/z);
J(0,6*i + 1) = 0;
J(0,6*i + 2) = (fx*x/(z*z));
J(0,6*i + 3) = (fx*x*y/(z*z));
J(0,6*i + 4) = -(fx*x*x/(z*z)+fx);
J(0,6*i + 5) = (fx*y/z);
J(1,6*i + 0) = 0;
J(1,6*i + 1) = -(fy/z);
J(1,6*i + 2) = (fy*y/(z*z));
J(1,6*i + 3) = (fy*y*y/(z*z)+fy);
J(1,6*i + 4) = -(fy*x*y/(z*z));
J(1,6*i + 5) = -(fy*x/z);
}
// 和 3D 坐标相关的块
Eigen::Matrix<double, 2, 3> K;
K << -(fx / z), 0, (fx *x / (z*z)), 0, -(fy / z), fy * y / (z*z);
Matrix3d R = poses[i].rotation_matrix();
J.block(0, 6 * POSE_SIZE + 3 * j, 2,3) = K * R;
H += J.transpose() * J;
b += -J.transpose() * e;
}
}
// solve dx
VectorXd dx(cols);
dx = H.ldlt().solve(b);
if (std::isnan(dx[0])) {
cout << "result is nan!" << endl;
break;
}
// 更新 解向量
// 先更新位姿
//std::cout << " dx " << dx << std::endl;
for(int i = 1; i < poses.size(); i++)
{
poses[i] = Sophus::SE3::exp(dx.block(i*6, 0, 6, 1)) * poses[i];
}
int startIdx = poses.size() * 6;
// 再更新 坐标点
for(int j = 0; j < p3d.size(); j++)
{
p3d[j] = dx.block(startIdx + j*3, 0, 3, 1) + p3d[j];
}
lastCost = cost;
cout << "iteration " << iter << " cost=" << cost << endl;
}
// TODO: 输出结果
return 0;
}
// p2ds 是观察到的像素点, 得保存对应的3D点的信息才行
// 记录每个 p3d 的所有观测数据
// p3d.size()== p2ds.size()
void getUV(VecVector2d & p2ds,
VecVector3d& p3d,
Sophus::SE3 & SE3_RT)
{
//先假设每个点能被每个相机观察到,这样比较简单一点
for(auto & p3 : p3d)
{
Vector3d P = SE3_RT * p3;
double x = P[0];
double y = P[1];
double z = P[2];
Vector2d p2_ = { fx * ( x/z ) + cx, fy * ( y/z ) + cy };
p2ds.push_back(p2_);
}
return ;
}