VINS-Fusion is an optimization-based multi-sensor state estimator, which achieves accurate self-localization for autonomous applications (drones, cars, and AR/VR). VINS-Fusion is an extension of VINS-Mono, which supports multiple visual-inertial sensor types (mono camera + IMU, stereo cameras + IMU, even stereo cameras only). We also show a toy example of fusing VINS with GPS.
// 多相机
multiple sensors support (stereo cameras / mono camera+IMU / stereo cameras+IMU)
// 在线空间标定
online spatial calibration (transformation between camera and IMU)
// 在线时域标定
online temporal calibration (time offset between camera and IMU)
// 闭环
visual loop closure
// 基于优化的多传感器的局部里程计估计
A General Optimization-based Framework for Local Odometry Estimation with Multiple Sensors, Tong Qin, Jie Pan, Shaozu Cao, Shaojie Shen, aiXiv
// 基于优化的多传感器的全局位姿估计
A General Optimization-based Framework for Global Pose Estimation with Multiple Sensors, Tong Qin, Shaozu Cao, Jie Pan, Shaojie Shen, aiXiv
// 单目视觉惯性系统的在线实时标定
Online Temporal Calibration for Monocular Visual-Inertial Systems, Tong Qin, Shaojie Shen, IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS, 2018)
// 一种鲁棒多用途的单目视觉惯性状态估计量
VINS-Mono: A Robust and Versatile Monocular Visual-Inertial State Estimator, Tong Qin, Peiliang Li, Shaojie Shen, IEEE Transactions on Robotics
该图指示全并行单目VINS设计的方法,包括初始化过程,重定位模块,位姿图优化模块。
The system starts with measurement preprocessing (see Section IV), in which features are extracted and tracked, and IMU measurements between two consecutive frames are preintegrated. The initialization procedure (see Section V) provides all necessary values, including, pose, velocity, gravity vector, gyroscope bias, and three-dimensional (3-D) feature location, for bootstrapping the subsequent nonlinear optimization-based VIO. The VIO (see Section VI) with relocalization (see Section VII) modules tightly fuses preintegrated IMU measurements, feature observations. Finally, the pose graph optimization module (see Section VIII) takes in geometrically verified relocalization results, and perform global optimization to eliminate the drift. It also achieves the pose graph reuse. The VIO and pose graph optimization modules run concurrently in separated threads.
计算重投影误差:
double
Camera::reprojectionError(const std::vector< std::vector >& objectPoints,
const std::vector< std::vector >& imagePoints,
const std::vector& rvecs,
const std::vector& tvecs,
cv::OutputArray _perViewErrors) const
{
int imageCount = objectPoints.size();
size_t pointsSoFar = 0;
double totalErr = 0.0;
bool computePerViewErrors = _perViewErrors.needed();
cv::Mat perViewErrors;
if (computePerViewErrors)
{
_perViewErrors.create(imageCount, 1, CV_64F);
perViewErrors = _perViewErrors.getMat();
}
for (int i = 0; i < imageCount; ++i)
{
size_t pointCount = imagePoints.at(i).size();
pointsSoFar += pointCount;
std::vector estImagePoints;
projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i),
estImagePoints);
double err = 0.0;
for (size_t j = 0; j < imagePoints.at(i).size(); ++j)
{
err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j));
}
if (computePerViewErrors)
{
perViewErrors.at(i) = err / pointCount;
}
totalErr += err;
}
return totalErr / pointsSoFar;
}
计算畸变校正映射表:
cv::Mat
PinholeCamera::initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2,
float fx, float fy,
cv::Size imageSize,
float cx, float cy,
cv::Mat rmat) const
{
if (imageSize == cv::Size(0, 0))
{
imageSize = cv::Size(mParameters.imageWidth(), mParameters.imageHeight());
}
cv::Mat mapX = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
cv::Mat mapY = cv::Mat::zeros(imageSize.height, imageSize.width, CV_32F);
Eigen::Matrix3f R, R_inv;
cv::cv2eigen(rmat, R);
R_inv = R.inverse();
// assume no skew
Eigen::Matrix3f K_rect;
if (cx == -1.0f || cy == -1.0f)
{
K_rect << fx, 0, imageSize.width / 2,
0, fy, imageSize.height / 2,
0, 0, 1;
}
else
{
K_rect << fx, 0, cx,
0, fy, cy,
0, 0, 1;
}
if (fx == -1.0f || fy == -1.0f)
{
K_rect(0,0) = mParameters.fx();
K_rect(1,1) = mParameters.fy();
}
Eigen::Matrix3f K_rect_inv = K_rect.inverse();
for (int v = 0; v < imageSize.height; ++v)
{
for (int u = 0; u < imageSize.width; ++u)
{
Eigen::Vector3f xo;
xo << u, v, 1;
Eigen::Vector3f uo = R_inv * K_rect_inv * xo;
Eigen::Vector2d p;
spaceToPlane(uo.cast(), p);
mapX.at(v,u) = p(0);
mapY.at(v,u) = p(1);
}
}
cv::convertMaps(mapX, mapY, map1, map2, CV_32FC1, false);
cv::Mat K_rect_cv;
cv::eigen2cv(K_rect, K_rect_cv);
return K_rect_cv;
}
计算雅克比矩阵:
bool
EigenQuaternionParameterization::ComputeJacobian(const double* x,
double* jacobian) const
{
jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1]; // NOLINT
jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0]; // NOLINT
jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3]; // NOLINT
jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2]; // NOLINT
return true;
}
enum ModelType
{
KANNALA_BRANDT,
MEI,
PINHOLE,
PINHOLE_FULL,
SCARAMUZZA
};
单目畸变:
/**
* \brief Apply distortion to input point (from the normalised plane)
* and calculate Jacobian
*
* \param p_u undistorted coordinates of point on the normalised plane
* \return to obtain the distorted point: p_d = p_u + d_u
*/
void
PinholeCamera::distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u,
Eigen::Matrix2d& J) const
{
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
double mx2_u, my2_u, mxy_u, rho2_u, rad_dist_u;
mx2_u = p_u(0) * p_u(0);
my2_u = p_u(1) * p_u(1);
mxy_u = p_u(0) * p_u(1);
rho2_u = mx2_u + my2_u;
rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
d_u << p_u(0) * rad_dist_u + 2.0 * p1 * mxy_u + p2 * (rho2_u + 2.0 * mx2_u),
p_u(1) * rad_dist_u + 2.0 * p2 * mxy_u + p1 * (rho2_u + 2.0 * my2_u);
double dxdmx = 1.0 + rad_dist_u + k1 * 2.0 * mx2_u + k2 * rho2_u * 4.0 * mx2_u + 2.0 * p1 * p_u(1) + 6.0 * p2 * p_u(0);
double dydmx = k1 * 2.0 * p_u(0) * p_u(1) + k2 * 4.0 * rho2_u * p_u(0) * p_u(1) + p1 * 2.0 * p_u(0) + 2.0 * p2 * p_u(1);
double dxdmy = dydmx;
double dydmy = 1.0 + rad_dist_u + k1 * 2.0 * my2_u + k2 * rho2_u * 4.0 * my2_u + 6.0 * p1 * p_u(1) + 2.0 * p2 * p_u(0);
J << dxdmx, dxdmy,
dydmx, dydmy;
}
主要包含各种数据集的配置信息,以yaml格式保存:
A3_ptgrey/
euroc/
extrinsic_parameter_example.pdf
fisheye_mask_752x480.jpg
fisheye_mask.jpg*
kitti_odom/
kitti_raw/
mynteye/
simulation/
vi_car/
vins_rviz_config.rviz
我们可以看到支持国内小觅双目摄像头。
void GlobalOptimization::optimize()
{
while(true)
{
if(newGPS)
{
newGPS = false;
printf("global optimization\n");
TicToc globalOptimizationTime;
ceres::Problem problem;
ceres::Solver::Options options;
options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;
//options.minimizer_progress_to_stdout = true;
//options.max_solver_time_in_seconds = SOLVER_TIME * 3;
options.max_num_iterations = 5;
ceres::Solver::Summary summary;
ceres::LossFunction *loss_function;
loss_function = new ceres::HuberLoss(1.0);
ceres::LocalParameterization* local_parameterization = new ceres::QuaternionParameterization();
//add param
mPoseMap.lock();
int length = localPoseMap.size();
// w^t_i w^q_i
double t_array[length][3];
double q_array[length][4];
map>::iterator iter;
iter = globalPoseMap.begin();
for (int i = 0; i < length; i++, iter++)
{
t_array[i][0] = iter->second[0];
t_array[i][1] = iter->second[1];
t_array[i][2] = iter->second[2];
q_array[i][0] = iter->second[3];
q_array[i][1] = iter->second[4];
q_array[i][2] = iter->second[5];
q_array[i][3] = iter->second[6];
problem.AddParameterBlock(q_array[i], 4, local_parameterization);
problem.AddParameterBlock(t_array[i], 3);
}
map>::iterator iterVIO, iterVIONext, iterGPS;
int i = 0;
for (iterVIO = localPoseMap.begin(); iterVIO != localPoseMap.end(); iterVIO++, i++)
{
//vio factor
iterVIONext = iterVIO;
iterVIONext++;
if(iterVIONext != localPoseMap.end())
{
Eigen::Matrix4d wTi = Eigen::Matrix4d::Identity();
Eigen::Matrix4d wTj = Eigen::Matrix4d::Identity();
wTi.block<3, 3>(0, 0) = Eigen::Quaterniond(iterVIO->second[3], iterVIO->second[4],
iterVIO->second[5], iterVIO->second[6]).toRotationMatrix();
wTi.block<3, 1>(0, 3) = Eigen::Vector3d(iterVIO->second[0], iterVIO->second[1], iterVIO->second[2]);
wTj.block<3, 3>(0, 0) = Eigen::Quaterniond(iterVIONext->second[3], iterVIONext->second[4],
iterVIONext->second[5], iterVIONext->second[6]).toRotationMatrix();
wTj.block<3, 1>(0, 3) = Eigen::Vector3d(iterVIONext->second[0], iterVIONext->second[1], iterVIONext->second[2]);
Eigen::Matrix4d iTj = wTi.inverse() * wTj;
Eigen::Quaterniond iQj;
iQj = iTj.block<3, 3>(0, 0);
Eigen::Vector3d iPj = iTj.block<3, 1>(0, 3);
ceres::CostFunction* vio_function = RelativeRTError::Create(iPj.x(), iPj.y(), iPj.z(),
iQj.w(), iQj.x(), iQj.y(), iQj.z(),
0.1, 0.01);
problem.AddResidualBlock(vio_function, NULL, q_array[i], t_array[i], q_array[i+1], t_array[i+1]);
}
//gps factor
double t = iterVIO->first;
iterGPS = GPSPositionMap.find(t);
if (iterGPS != GPSPositionMap.end())
{
ceres::CostFunction* gps_function = TError::Create(iterGPS->second[0], iterGPS->second[1],
iterGPS->second[2], iterGPS->second[3]);
//printf("inverse weight %f \n", iterGPS->second[3]);
problem.AddResidualBlock(gps_function, loss_function, t_array[i]);
}
}
//mPoseMap.unlock();
ceres::Solve(options, &problem, &summary);
//std::cout << summary.BriefReport() << "\n";
// update global pose
//mPoseMap.lock();
iter = globalPoseMap.begin();
for (int i = 0; i < length; i++, iter++)
{
vector globalPose{t_array[i][0], t_array[i][1], t_array[i][2],
q_array[i][0], q_array[i][1], q_array[i][2], q_array[i][3]};
iter->second = globalPose;
if(i == length - 1)
{
Eigen::Matrix4d WVIO_T_body = Eigen::Matrix4d::Identity();
Eigen::Matrix4d WGPS_T_body = Eigen::Matrix4d::Identity();
double t = iter->first;
WVIO_T_body.block<3, 3>(0, 0) = Eigen::Quaterniond(localPoseMap[t][3], localPoseMap[t][4],
localPoseMap[t][5], localPoseMap[t][6]).toRotationMatrix();
WVIO_T_body.block<3, 1>(0, 3) = Eigen::Vector3d(localPoseMap[t][0], localPoseMap[t][1], localPoseMap[t][2]);
WGPS_T_body.block<3, 3>(0, 0) = Eigen::Quaterniond(globalPose[3], globalPose[4],
globalPose[5], globalPose[6]).toRotationMatrix();
WGPS_T_body.block<3, 1>(0, 3) = Eigen::Vector3d(globalPose[0], globalPose[1], globalPose[2]);
WGPS_T_WVIO = WGPS_T_body * WVIO_T_body.inverse();
}
}
updateGlobalPath();
//printf("global time %f \n", globalOptimizationTime.toc());
mPoseMap.unlock();
}
std::chrono::milliseconds dura(2000);
std::this_thread::sleep_for(dura);
}
return;
}
void PoseGraph::addKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop)
{
//shift to base frame
Vector3d vio_P_cur;
Matrix3d vio_R_cur;
if (sequence_cnt != cur_kf->sequence)
{
sequence_cnt++;
sequence_loop.push_back(0);
w_t_vio = Eigen::Vector3d(0, 0, 0);
w_r_vio = Eigen::Matrix3d::Identity();
m_drift.lock();
t_drift = Eigen::Vector3d(0, 0, 0);
r_drift = Eigen::Matrix3d::Identity();
m_drift.unlock();
}
cur_kf->getVioPose(vio_P_cur, vio_R_cur);
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio * vio_R_cur;
cur_kf->updateVioPose(vio_P_cur, vio_R_cur);
cur_kf->index = global_index;
global_index++;
int loop_index = -1;
if (flag_detect_loop)
{
TicToc tmp_t;
loop_index = detectLoop(cur_kf, cur_kf->index);
}
else
{
addKeyFrameIntoVoc(cur_kf);
}
if (loop_index != -1)
{
//printf(" %d detect loop with %d \n", cur_kf->index, loop_index);
KeyFrame* old_kf = getKeyFrame(loop_index);
if (cur_kf->findConnection(old_kf))
{
if (earliest_loop_index > loop_index || earliest_loop_index == -1)
earliest_loop_index = loop_index;
Vector3d w_P_old, w_P_cur, vio_P_cur;
Matrix3d w_R_old, w_R_cur, vio_R_cur;
old_kf->getVioPose(w_P_old, w_R_old);
cur_kf->getVioPose(vio_P_cur, vio_R_cur);
Vector3d relative_t;
Quaterniond relative_q;
relative_t = cur_kf->getLoopRelativeT();
relative_q = (cur_kf->getLoopRelativeQ()).toRotationMatrix();
w_P_cur = w_R_old * relative_t + w_P_old;
w_R_cur = w_R_old * relative_q;
double shift_yaw;
Matrix3d shift_r;
Vector3d shift_t;
shift_yaw = Utility::R2ypr(w_R_cur).x() - Utility::R2ypr(vio_R_cur).x();
shift_r = Utility::ypr2R(Vector3d(shift_yaw, 0, 0));
shift_t = w_P_cur - w_R_cur * vio_R_cur.transpose() * vio_P_cur;
// shift vio pose of whole sequence to the world frame
if (old_kf->sequence != cur_kf->sequence && sequence_loop[cur_kf->sequence] == 0)
{
w_r_vio = shift_r;
w_t_vio = shift_t;
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio * vio_R_cur;
cur_kf->updateVioPose(vio_P_cur, vio_R_cur);
list::iterator it = keyframelist.begin();
for (; it != keyframelist.end(); it++)
{
if((*it)->sequence == cur_kf->sequence)
{
Vector3d vio_P_cur;
Matrix3d vio_R_cur;
(*it)->getVioPose(vio_P_cur, vio_R_cur);
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio * vio_R_cur;
(*it)->updateVioPose(vio_P_cur, vio_R_cur);
}
}
sequence_loop[cur_kf->sequence] = 1;
}
m_optimize_buf.lock();
optimize_buf.push(cur_kf->index);
m_optimize_buf.unlock();
}
}
m_keyframelist.lock();
Vector3d P;
Matrix3d R;
cur_kf->getVioPose(P, R);
P = r_drift * P + t_drift;
R = r_drift * R;
cur_kf->updatePose(P, R);
Quaterniond Q{R};
geometry_msgs::PoseStamped pose_stamped;
pose_stamped.header.stamp = ros::Time(cur_kf->time_stamp);
pose_stamped.header.frame_id = "world";
pose_stamped.pose.position.x = P.x() + VISUALIZATION_SHIFT_X;
pose_stamped.pose.position.y = P.y() + VISUALIZATION_SHIFT_Y;
pose_stamped.pose.position.z = P.z();
pose_stamped.pose.orientation.x = Q.x();
pose_stamped.pose.orientation.y = Q.y();
pose_stamped.pose.orientation.z = Q.z();
pose_stamped.pose.orientation.w = Q.w();
path[sequence_cnt].poses.push_back(pose_stamped);
path[sequence_cnt].header = pose_stamped.header;
if (SAVE_LOOP_PATH)
{
ofstream loop_path_file(VINS_RESULT_PATH, ios::app);
loop_path_file.setf(ios::fixed, ios::floatfield);
loop_path_file.precision(0);
loop_path_file << cur_kf->time_stamp * 1e9 << ",";
loop_path_file.precision(5);
loop_path_file << P.x() << ","
<< P.y() << ","
<< P.z() << ","
<< Q.w() << ","
<< Q.x() << ","
<< Q.y() << ","
<< Q.z() << ","
<< endl;
loop_path_file.close();
}
//draw local connection
if (SHOW_S_EDGE)
{
list::reverse_iterator rit = keyframelist.rbegin();
for (int i = 0; i < 4; i++)
{
if (rit == keyframelist.rend())
break;
Vector3d conncected_P;
Matrix3d connected_R;
if((*rit)->sequence == cur_kf->sequence)
{
(*rit)->getPose(conncected_P, connected_R);
posegraph_visualization->add_edge(P, conncected_P);
}
rit++;
}
}
if (SHOW_L_EDGE)
{
if (cur_kf->has_loop)
{
//printf("has loop \n");
KeyFrame* connected_KF = getKeyFrame(cur_kf->loop_index);
Vector3d connected_P,P0;
Matrix3d connected_R,R0;
connected_KF->getPose(connected_P, connected_R);
//cur_kf->getVioPose(P0, R0);
cur_kf->getPose(P0, R0);
if(cur_kf->sequence > 0)
{
//printf("add loop into visual \n");
posegraph_visualization->add_loopedge(P0, connected_P + Vector3d(VISUALIZATION_SHIFT_X, VISUALIZATION_SHIFT_Y, 0));
}
}
}
//posegraph_visualization->add_pose(P + Vector3d(VISUALIZATION_SHIFT_X, VISUALIZATION_SHIFT_Y, 0), Q);
keyframelist.push_back(cur_kf);
publish();
m_keyframelist.unlock();
}
这是比较难的一部分,放在vins_estimator文件下:
SFM算法是一种基于各种收集到的无序图片进行三维重建的离线算法。在进行核心的算法structure-from-motion之前需要一些准备工作,挑选出合适的图片。下面通过匹配点求相机之间的旋转和平移矩阵。
bool GlobalSFM::solveFrameByPnP(Matrix3d &R_initial, Vector3d &P_initial, int i,
vector &sfm_f)
{
vector pts_2_vector;
vector pts_3_vector;
for (int j = 0; j < feature_num; j++)
{
if (sfm_f[j].state != true)
continue;
Vector2d point2d;
for (int k = 0; k < (int)sfm_f[j].observation.size(); k++)
{
if (sfm_f[j].observation[k].first == i)
{
Vector2d img_pts = sfm_f[j].observation[k].second;
cv::Point2f pts_2(img_pts(0), img_pts(1));
pts_2_vector.push_back(pts_2);
cv::Point3f pts_3(sfm_f[j].position[0], sfm_f[j].position[1], sfm_f[j].position[2]);
pts_3_vector.push_back(pts_3);
break;
}
}
}
if (int(pts_2_vector.size()) < 15)
{
printf("unstable features tracking, please slowly move you device!\n");
if (int(pts_2_vector.size()) < 10)
return false;
}
cv::Mat r, rvec, t, D, tmp_r;
cv::eigen2cv(R_initial, tmp_r);
cv::Rodrigues(tmp_r, rvec);
cv::eigen2cv(P_initial, t);
cv::Mat K = (cv::Mat_(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);
bool pnp_succ;
pnp_succ = cv::solvePnP(pts_3_vector, pts_2_vector, K, D, rvec, t, 1);
if(!pnp_succ)
{
return false;
}
cv::Rodrigues(rvec, r);
//cout << "r " << endl << r << endl;
MatrixXd R_pnp;
cv::cv2eigen(r, R_pnp);
MatrixXd T_pnp;
cv::cv2eigen(t, T_pnp);
R_initial = R_pnp;
P_initial = T_pnp;
return true;
}
稀疏的3D点云:
bool GlobalSFM::construct(int frame_num, Quaterniond* q, Vector3d* T, int l,
const Matrix3d relative_R, const Vector3d relative_T,
vector &sfm_f, map &sfm_tracked_points)
{
feature_num = sfm_f.size();
//cout << "set 0 and " << l << " as known " << endl;
// have relative_r relative_t
// intial two view
q[l].w() = 1;
q[l].x() = 0;
q[l].y() = 0;
q[l].z() = 0;
T[l].setZero();
q[frame_num - 1] = q[l] * Quaterniond(relative_R);
T[frame_num - 1] = relative_T;
//cout << "init q_l " << q[l].w() << " " << q[l].vec().transpose() << endl;
//cout << "init t_l " << T[l].transpose() << endl;
//rotate to cam frame
Matrix3d c_Rotation[frame_num];
Vector3d c_Translation[frame_num];
Quaterniond c_Quat[frame_num];
double c_rotation[frame_num][4];
double c_translation[frame_num][3];
Eigen::Matrix Pose[frame_num];
c_Quat[l] = q[l].inverse();
c_Rotation[l] = c_Quat[l].toRotationMatrix();
c_Translation[l] = -1 * (c_Rotation[l] * T[l]);
Pose[l].block<3, 3>(0, 0) = c_Rotation[l];
Pose[l].block<3, 1>(0, 3) = c_Translation[l];
c_Quat[frame_num - 1] = q[frame_num - 1].inverse();
c_Rotation[frame_num - 1] = c_Quat[frame_num - 1].toRotationMatrix();
c_Translation[frame_num - 1] = -1 * (c_Rotation[frame_num - 1] * T[frame_num - 1]);
Pose[frame_num - 1].block<3, 3>(0, 0) = c_Rotation[frame_num - 1];
Pose[frame_num - 1].block<3, 1>(0, 3) = c_Translation[frame_num - 1];
//1: trangulate between l ----- frame_num - 1
//2: solve pnp l + 1; trangulate l + 1 ------- frame_num - 1;
for (int i = l; i < frame_num - 1 ; i++)
{
// solve pnp
if (i > l)
{
Matrix3d R_initial = c_Rotation[i - 1];
Vector3d P_initial = c_Translation[i - 1];
if(!solveFrameByPnP(R_initial, P_initial, i, sfm_f))
return false;
c_Rotation[i] = R_initial;
c_Translation[i] = P_initial;
c_Quat[i] = c_Rotation[i];
Pose[i].block<3, 3>(0, 0) = c_Rotation[i];
Pose[i].block<3, 1>(0, 3) = c_Translation[i];
}
// triangulate point based on the solve pnp result
triangulateTwoFrames(i, Pose[i], frame_num - 1, Pose[frame_num - 1], sfm_f);
}
//3: triangulate l-----l+1 l+2 ... frame_num -2
for (int i = l + 1; i < frame_num - 1; i++)
triangulateTwoFrames(l, Pose[l], i, Pose[i], sfm_f);
//4: solve pnp l-1; triangulate l-1 ----- l
// l-2 l-2 ----- l
for (int i = l - 1; i >= 0; i--)
{
//solve pnp
Matrix3d R_initial = c_Rotation[i + 1];
Vector3d P_initial = c_Translation[i + 1];
if(!solveFrameByPnP(R_initial, P_initial, i, sfm_f))
return false;
c_Rotation[i] = R_initial;
c_Translation[i] = P_initial;
c_Quat[i] = c_Rotation[i];
Pose[i].block<3, 3>(0, 0) = c_Rotation[i];
Pose[i].block<3, 1>(0, 3) = c_Translation[i];
//triangulate
triangulateTwoFrames(i, Pose[i], l, Pose[l], sfm_f);
}
//5: triangulate all other points
for (int j = 0; j < feature_num; j++)
{
if (sfm_f[j].state == true)
continue;
if ((int)sfm_f[j].observation.size() >= 2)
{
Vector2d point0, point1;
int frame_0 = sfm_f[j].observation[0].first;
point0 = sfm_f[j].observation[0].second;
int frame_1 = sfm_f[j].observation.back().first;
point1 = sfm_f[j].observation.back().second;
Vector3d point_3d;
triangulatePoint(Pose[frame_0], Pose[frame_1], point0, point1, point_3d);
sfm_f[j].state = true;
sfm_f[j].position[0] = point_3d(0);
sfm_f[j].position[1] = point_3d(1);
sfm_f[j].position[2] = point_3d(2);
//cout << "trangulated : " << frame_0 << " " << frame_1 << " 3d point : " << j << " " << point_3d.transpose() << endl;
}
}
/*
for (int i = 0; i < frame_num; i++)
{
q[i] = c_Rotation[i].transpose();
cout << "solvePnP q" << " i " << i <<" " <
姿态估计:
bool MotionEstimator::solveRelativeRT(const vector> &corres, Matrix3d &Rotation, Vector3d &Translation)
{
if (corres.size() >= 15)
{
vector ll, rr;
for (int i = 0; i < int(corres.size()); i++)
{
ll.push_back(cv::Point2f(corres[i].first(0), corres[i].first(1)));
rr.push_back(cv::Point2f(corres[i].second(0), corres[i].second(1)));
}
cv::Mat mask;
cv::Mat E = cv::findFundamentalMat(ll, rr, cv::FM_RANSAC, 0.3 / 460, 0.99, mask);
cv::Mat cameraMatrix = (cv::Mat_(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);
cv::Mat rot, trans;
int inlier_cnt = cv::recoverPose(E, ll, rr, cameraMatrix, rot, trans, mask);
//cout << "inlier_cnt " << inlier_cnt << endl;
Eigen::Matrix3d R;
Eigen::Vector3d T;
for (int i = 0; i < 3; i++)
{
T(i) = trans.at(i, 0);
for (int j = 0; j < 3; j++)
R(i, j) = rot.at(i, j);
}
Rotation = R.transpose();
Translation = -R.transpose() * T;
if(inlier_cnt > 12)
return true;
else
return false;
}
return false;
}
。。。
后续逐渐增加原理推导。