VINS-Mono源码分析6— pose_graph1(回环检测与重定位)

VINS-Mono源码分析6— pose_graph1

ros::init(argc, argv, "pose_graph");
ros::NodeHandle n("~");

pose_graph包的入口,ROS程序的初始处理。

posegraph.registerPub(n);
void PoseGraph::registerPub(ros::NodeHandle &n)
{
    //RVIZ中显示的轨迹绿线
    pub_pg_path = n.advertise<nav_msgs::Path>("pose_graph_path", 1000);
    pub_base_path = n.advertise<nav_msgs::Path>("base_path", 1000);
    pub_pose_graph = n.advertise<visualization_msgs::MarkerArray>("pose_graph", 1000);
    for (int i = 1; i < 10; i++)
        pub_path[i] = n.advertise<nav_msgs::Path>("path_" + to_string(i), 1000);
}

一些待发布话题的定义

n.getParam("visualization_shift_x", VISUALIZATION_SHIFT_X);//0
n.getParam("visualization_shift_y", VISUALIZATION_SHIFT_Y);//0
n.getParam("skip_cnt", SKIP_CNT);//0
n.getParam("skip_dis", SKIP_DIS);//0
std::string config_file;
n.getParam("config_file", config_file);

~/vins_estimator/launch文件夹下的euroc.launch文件中读取参数。

cv::FileStorage fsSettings(config_file, cv::FileStorage::READ);
if(!fsSettings.isOpened())
{
    std::cerr << "ERROR: Wrong path to settings" << std::endl;
}

读取设置文件,默认是~/config/euroc文件夹下的euroc_config.yaml

double camera_visual_size = fsSettings["visualize_camera_size"];//0.4
cameraposevisual.setScale(camera_visual_size);//相框显示的尺度设置
cameraposevisual.setLineWidth(camera_visual_size / 10.0);//线宽

从设置文件读取参数,用于配置RVIZ下相机位姿状态的显示。

LOOP_CLOSURE = fsSettings["loop_closure"];//1
std::string IMAGE_TOPIC;
//默认为0
int LOAD_PREVIOUS_POSE_GRAPH;

从设置文件读取参数,用于配置是否开启回环检测,默认开启。

if (LOOP_CLOSURE)
{
    ROW = fsSettings["image_height"];//行数480
    COL = fsSettings["image_width"];//列数752
    //读取"pose_graph包的地址
    std::string pkg_path = ros::package::getPath("pose_graph");
    string vocabulary_file = pkg_path + "/../support_files/brief_k10L6.bin";
    cout << "vocabulary_file" << vocabulary_file << endl;
    posegraph.loadVocabulary(vocabulary_file);
}

从设置文件读取各种参数,注意loadVocabulary()函数

void PoseGraph::loadVocabulary(std::string voc_path)
{
    voc = new BriefVocabulary(voc_path);
    db.setVocabulary(*voc, false, 0);
}

它的作用是加载词典,图像数据库的一些初始化操作,参考博客《VINS-Mono中的DBoW2关键代码注释》。

ros::Subscriber sub_imu_forward = n.subscribe("/vins_estimator/imu_propagate", 2000, imu_forward_callback);
ros::Subscriber sub_vio = n.subscribe("/vins_estimator/odometry", 2000, vio_callback);
ros::Subscriber sub_image = n.subscribe(IMAGE_TOPIC, 2000, image_callback);
ros::Subscriber sub_pose = n.subscribe("/vins_estimator/keyframe_pose", 2000, pose_callback);
ros::Subscriber sub_extrinsic = n.subscribe("/vins_estimator/extrinsic", 2000, extrinsic_callback);
ros::Subscriber sub_point = n.subscribe("/vins_estimator/keyframe_point", 2000, point_callback);
ros::Subscriber sub_relo_relative_pose = n.subscribe("/vins_estimator/relo_relative_pose", 2000, relo_relative_pose_callback);

订阅的话题及回调函数。

分析一下各种回调函数吧!

void imu_forward_callback(const nav_msgs::Odometry::ConstPtr &forward_msg)
{
    if (VISUALIZE_IMU_FORWARD)
    {
       ......
    }
}

目前没用到,修改参数文件的相关参数,可调用此函数

void vio_callback(const nav_msgs::Odometry::ConstPtr &pose_msg)
{
    //vio_t和vio_q是IMU在世界坐标系的位姿
    Vector3d vio_t(pose_msg->pose.pose.position.x, pose_msg->pose.pose.position.y, pose_msg->pose.pose.position.z);
    Quaterniond vio_q;
    vio_q.w() = pose_msg->pose.pose.orientation.w;
    vio_q.x() = pose_msg->pose.pose.orientation.x;
    vio_q.y() = pose_msg->pose.pose.orientation.y;
    vio_q.z() = pose_msg->pose.pose.orientation.z;
    //w_r_vio默认单位阵、w_t_vio默认零向量
    vio_t = posegraph.w_r_vio * vio_t + posegraph.w_t_vio;
    vio_q = posegraph.w_r_vio *  vio_q;
    //r_drift默认单位阵、t_drift默认零向量,回环检测后值会变化
    vio_t = posegraph.r_drift * vio_t + posegraph.t_drift;
    vio_q = posegraph.r_drift * vio_q;

    Vector3d vio_t_cam;
    Quaterniond vio_q_cam;
    //相机在世界坐标系的位姿
    vio_t_cam = vio_t + vio_q * tic;
    vio_q_cam = vio_q * qic;        

    if (!VISUALIZE_IMU_FORWARD)
    {
        //相机位姿发布出去,待订阅后在RVIZ显示,即系统运行时的相框
        cameraposevisual.reset();
        cameraposevisual.add_pose(vio_t_cam, vio_q_cam);
        cameraposevisual.publish_by(pub_camera_pose_visual, pose_msg->header);
    }
    
    odometry_buf.push(vio_t_cam);
    if (odometry_buf.size() > 10)
    {
        odometry_buf.pop();
    }
    //key_odometrys的一些参数配置工作
    visualization_msgs::Marker key_odometrys;
    key_odometrys.header = pose_msg->header;
    key_odometrys.header.frame_id = "world";
    key_odometrys.ns = "key_odometrys";
    key_odometrys.type = visualization_msgs::Marker::SPHERE_LIST;
    key_odometrys.action = visualization_msgs::Marker::ADD;
    key_odometrys.pose.orientation.w = 1.0;
    key_odometrys.lifetime = ros::Duration();

    //static int key_odometrys_id = 0;
    key_odometrys.id = 0; //key_odometrys_id++;
    key_odometrys.scale.x = 0.1;
    key_odometrys.scale.y = 0.1;
    key_odometrys.scale.z = 0.1;
    key_odometrys.color.r = 1.0;
    //key_odometrys.color.g = 1.0;
    //key_odometrys.color.b = 1.0;
    key_odometrys.color.a = 1.0;
    //为了实现遍历功能,odometry_buf的元素先弹出,再压进去
    for (unsigned int i = 0; i < odometry_buf.size(); i++)
    {
        geometry_msgs::Point pose_marker;
        Vector3d vio_t;
        vio_t = odometry_buf.front();
        odometry_buf.pop();
        pose_marker.x = vio_t.x();
        pose_marker.y = vio_t.y();
        pose_marker.z = vio_t.z();
        key_odometrys.points.push_back(pose_marker);
        odometry_buf.push(vio_t);
    }
    //滑窗内相机轨迹发布出去,待订阅后在RVIZ显示,即系统运行时框框后的尾巴
    pub_key_odometrys.publish(key_odometrys);
   //默认不执行
    if (!LOOP_CLOSURE)
    {
        geometry_msgs::PoseStamped pose_stamped;
        pose_stamped.header = pose_msg->header;
        pose_stamped.header.frame_id = "world";
        pose_stamped.pose.position.x = vio_t.x();
        pose_stamped.pose.position.y = vio_t.y();
        pose_stamped.pose.position.z = vio_t.z();
        no_loop_path.header = pose_msg->header;
        no_loop_path.header.frame_id = "world";
        no_loop_path.poses.push_back(pose_stamped);
        pub_vio_path.publish(no_loop_path);
    }
}
void image_callback(const sensor_msgs::ImageConstPtr &image_msg)
{
    if(!LOOP_CLOSURE)
        return;
    m_buf.lock();
    //得到图片,存储在image_buf中
    image_buf.push(image_msg);
    m_buf.unlock();
   
    // 检测不稳定的相机流
    if (last_image_time == -1)
        last_image_time = image_msg->header.stamp.toSec();
    else if (image_msg->header.stamp.toSec() - last_image_time > 1.0 || image_msg->header.stamp.toSec() < last_image_time)
    {
        ROS_WARN("image discontinue! detect a new sequence!");
        new_sequence();
    }
    last_image_time = image_msg->header.stamp.toSec();
}
void pose_callback(const nav_msgs::Odometry::ConstPtr &pose_msg)
{
    if(!LOOP_CLOSURE)
        return;
    m_buf.lock();
    //得到关键帧位姿信息,存储在pose_buf中
    pose_buf.push(pose_msg);
    m_buf.unlock();
}
void extrinsic_callback(const nav_msgs::Odometry::ConstPtr &pose_msg)
{
    m_process.lock();
    //得到IMU-相机间的外参
    tic = Vector3d(pose_msg->pose.pose.position.x,
                   pose_msg->pose.pose.position.y,
                   pose_msg->pose.pose.position.z);
    qic = Quaterniond(pose_msg->pose.pose.orientation.w,
                      pose_msg->pose.pose.orientation.x,
                      pose_msg->pose.pose.orientation.y,
                      pose_msg->pose.pose.orientation.z).toRotationMatrix();
    m_process.unlock();
}
void point_callback(const sensor_msgs::PointCloudConstPtr &point_msg)
{
    if(!LOOP_CLOSURE)
        return;
    m_buf.lock();
    //得到关键帧的特征点信息存储在point_buf中
    point_buf.push(point_msg);
    m_buf.unlock();
}
void relo_relative_pose_callback(const nav_msgs::Odometry::ConstPtr &pose_msg)
{
    Vector3d relative_t = Vector3d(pose_msg->pose.pose.position.x,
                                   pose_msg->pose.pose.position.y,
                                   pose_msg->pose.pose.position.z);
    Quaterniond relative_q;
    relative_q.w() = pose_msg->pose.pose.orientation.w;
    relative_q.x() = pose_msg->pose.pose.orientation.x;
    relative_q.y() = pose_msg->pose.pose.orientation.y;
    relative_q.z() = pose_msg->pose.pose.orientation.z;
    double relative_yaw = pose_msg->twist.twist.linear.x;
    int index = pose_msg->twist.twist.linear.y;
    //printf("receive index %d \n", index );
    Eigen::Matrix<double, 8, 1 > loop_info;
    loop_info << relative_t.x(), relative_t.y(), relative_t.z(),
                 relative_q.w(), relative_q.x(), relative_q.y(), relative_q.z(),
                 relative_yaw;
    //更新index关键帧的回环信息loop_info
    posegraph.updateKeyFrameLoop(index, loop_info);

}
void PoseGraph::updateKeyFrameLoop(int index, Eigen::Matrix<double, 8, 1 > &_loop_info)
{
    KeyFrame* kf = getKeyFrame(index);
    kf->updateLoop(_loop_info); //更新kf关键帧的回环信息loop_info
    if (abs(_loop_info(7)) < 30.0 && Vector3d(_loop_info(0), _loop_info(1), _loop_info(2)).norm() < 20.0)
    {
        if (FAST_RELOCALIZATION)//默认不执行
        {
           ......
        }
    }
}

所有回调函数已经分析过了,然后是下面的代码,

//系统运行时RVIZ显示的匹配图像帧
pub_match_img = n.advertise<sensor_msgs::Image>("match_image", 1000);
//系统运行时RVIZ显示的相框
pub_camera_pose_visual = n.advertise<visualization_msgs::MarkerArray>("camera_pose_visual", 1000);
pub_key_odometrys = n.advertise<visualization_msgs::Marker>("key_odometrys", 1000);
pub_vio_path = n.advertise<nav_msgs::Path>("no_loop_path", 1000);
pub_match_points = n.advertise<sensor_msgs::PointCloud>("match_points", 100);

发布一些话题。

std::thread measurement_process;
std::thread keyboard_command_process;

measurement_process = std::thread(process);
keyboard_command_process = std::thread(command);

command()线程是一些键盘操作,就不说了。主要是process()线程,

m_buf.lock();
if(!image_buf.empty() && !point_buf.empty() && !pose_buf.empty())
{
    if (image_buf.front()->header.stamp.toSec() > pose_buf.front()->header.stamp.toSec())
    {
        pose_buf.pop();
        printf("throw pose at beginning\n");
    }
    else if (image_buf.front()->header.stamp.toSec() > point_buf.front()->header.stamp.toSec())
    {
        point_buf.pop();
        printf("throw point at beginning\n");
    }
    else if (image_buf.back()->header.stamp.toSec() >= pose_buf.front()->header.stamp.toSec() 
        && point_buf.back()->header.stamp.toSec() >= pose_buf.front()->header.stamp.toSec())
    {
        pose_msg = pose_buf.front();
        pose_buf.pop();
        while (!pose_buf.empty())
            pose_buf.pop();
        while (image_buf.front()->header.stamp.toSec() < pose_msg->header.stamp.toSec())
            image_buf.pop();
        image_msg = image_buf.front();
        image_buf.pop();

        while (point_buf.front()->header.stamp.toSec() < pose_msg->header.stamp.toSec())
            point_buf.pop();
        point_msg = point_buf.front();
        point_buf.pop();
    }
}
m_buf.unlock();

得到相同时间下的image_msg、point_msgpose_msg

//前SKIP_FIRST_CNT个帧跳过
if (skip_first_cnt < SKIP_FIRST_CNT)
{
    skip_first_cnt++;
    continue;
}
//每隔SKIP_CNT个帧,继续往下执行
if (skip_cnt < SKIP_CNT)
{
    skip_cnt++;
    continue;
}
else
{
    skip_cnt = 0;
}
cv_bridge::CvImageConstPtr ptr;
if (image_msg->encoding == "8UC1")
{
    sensor_msgs::Image img;
    img.header = image_msg->header;
    img.height = image_msg->height;
    img.width = image_msg->width;
    img.is_bigendian = image_msg->is_bigendian;
    img.step = image_msg->step;
    img.data = image_msg->data;
    img.encoding = "mono8";
    ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::MONO8);
}
else
    ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::MONO8);
    
cv::Mat image = ptr->image;

通过桥梁指针ptr将ROS图像消息转为OpenCV库可以识别的图像矩阵image

//得到T、R
Vector3d T = Vector3d(pose_msg->pose.pose.position.x,
                      pose_msg->pose.pose.position.y,
                      pose_msg->pose.pose.position.z);
Matrix3d R = Quaterniond(pose_msg->pose.pose.orientation.w,
                         pose_msg->pose.pose.orientation.x,
                         pose_msg->pose.pose.orientation.y,
                         pose_msg->pose.pose.orientation.z).toRotationMatrix();
if((T - last_t).norm() > SKIP_DIS)
{
    vector<cv::Point3f> point_3d;//世界坐标系下的坐标 
    vector<cv::Point2f> point_2d_uv; //像素坐标
    vector<cv::Point2f> point_2d_normal;//归一化相机坐标
    vector<double> point_id;

    for (unsigned int i = 0; i < point_msg->points.size(); i++)
    {
        cv::Point3f p_3d;
        p_3d.x = point_msg->points[i].x;
        p_3d.y = point_msg->points[i].y;
        p_3d.z = point_msg->points[i].z;
        point_3d.push_back(p_3d);

        cv::Point2f p_2d_uv, p_2d_normal;
        double p_id;
        p_2d_normal.x = point_msg->channels[i].values[0];
        p_2d_normal.y = point_msg->channels[i].values[1];
        p_2d_uv.x = point_msg->channels[i].values[2];
        p_2d_uv.y = point_msg->channels[i].values[3];
        p_id = point_msg->channels[i].values[4];
        point_2d_normal.push_back(p_2d_normal);
        point_2d_uv.push_back(p_2d_uv);
        point_id.push_back(p_id);

        //printf("u %f, v %f \n", p_2d_uv.x, p_2d_uv.y);
    }
    //构造关键帧
    KeyFrame* keyframe = new KeyFrame(pose_msg->header.stamp.toSec(), frame_index, T, R, image,
                       point_3d, point_2d_uv, point_2d_normal, point_id, sequence);   
    m_process.lock();
    start_flag = 1;
    posegraph.addKeyFrame(keyframe, 1);
    m_process.unlock();
    frame_index++;//关键帧索引
    last_t = T;
}

构造关键帧函数为

KeyFrame::KeyFrame(double _time_stamp, int _index, Vector3d &_vio_T_w_i, Matrix3d &_vio_R_w_i, cv::Mat &_image,
		           vector<cv::Point3f> &_point_3d, vector<cv::Point2f> &_point_2d_uv, vector<cv::Point2f> &_point_2d_norm,
		           vector<double> &_point_id, int _sequence)
{
	time_stamp = _time_stamp;//pose_msg->header.stamp.toSec()
	index = _index;//frame_index
	vio_T_w_i = _vio_T_w_i;//T
	vio_R_w_i = _vio_R_w_i;//R
	T_w_i = vio_T_w_i;//T
	R_w_i = vio_R_w_i;//R
	origin_vio_T = vio_T_w_i;//T		
	origin_vio_R = vio_R_w_i;//R
	image = _image.clone();//image
	cv::resize(image, thumbnail, cv::Size(80, 60));
	point_3d = _point_3d;//point_3d
	point_2d_uv = _point_2d_uv;//point_2d_uv
	point_2d_norm = _point_2d_norm;//point_2d_normal
	point_id = _point_id;//point_id
	has_loop = false;
	loop_index = -1;
	has_fast_point = false;
	loop_info << 0, 0, 0, 0, 0, 0, 0, 0;
	sequence = _sequence;
	computeWindowBRIEFPoint();
	computeBRIEFPoint();
	if(!DEBUG_IMAGE)
		image.release();
}
void KeyFrame::computeWindowBRIEFPoint()
{
	BriefExtractor extractor(BRIEF_PATTERN_FILE.c_str());
	for(int i = 0; i < (int)point_2d_uv.size(); i++)
	{
	    cv::KeyPoint key;
	    key.pt = point_2d_uv[i];
	    window_keypoints.push_back(key);
	}
	extractor(image, window_keypoints, window_brief_descriptors);
}

计算关键点window_keypoints的描述子window_brief_descriptors。详细代码参考DVision文件夹下的代码

void KeyFrame::computeBRIEFPoint()
{
	BriefExtractor extractor(BRIEF_PATTERN_FILE.c_str());
	const int fast_th = 20; // corner detector response threshold
	if(1)
		cv::FAST(image, keypoints, fast_th, true);
	else
	{
		......
	}
	extractor(image, keypoints, brief_descriptors);
	for (int i = 0; i < (int)keypoints.size(); i++)
	{
		Eigen::Vector3d tmp_p;
		m_camera->liftProjective(Eigen::Vector2d(keypoints[i].pt.x, keypoints[i].pt.y), tmp_p);
		cv::KeyPoint tmp_norm;
		tmp_norm.pt = cv::Point2f(tmp_p.x()/tmp_p.z(), tmp_p.y()/tmp_p.z());
		keypoints_norm.push_back(tmp_norm);
	}
}

提取FAST角点keypoints,并计算描述子brief_descriptors,得到归一化相机坐标keypoints_norm

接下来再往下阅读pose_graph_node.cpp的代码,

posegraph.addKeyFrame(keyframe, 1);
void PoseGraph::addKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop)
{
    代码有点多,一点一点分析
    ......
}

addKeyFrame()函数的具体代码实现如下,

Vector3d vio_P_cur;
Matrix3d vio_R_cur;
//如果当前关键帧cur_kf不在sequence_cnt序列中
if (sequence_cnt != cur_kf->sequence)
{
    sequence_cnt++;
    sequence_loop.push_back(0);
    w_t_vio = Eigen::Vector3d(0, 0, 0);
    w_r_vio = Eigen::Matrix3d::Identity();
    m_drift.lock();
    t_drift = Eigen::Vector3d(0, 0, 0);
    r_drift = Eigen::Matrix3d::Identity();
    m_drift.unlock();
}
//得到IMU在世界坐标系的位姿
cur_kf->getVioPose(vio_P_cur, vio_R_cur);
//w_r_vio默认单位阵、w_t_vio默认零向量
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio *  vio_R_cur;
//更新IMU在世界坐标系的位姿
cur_kf->updateVioPose(vio_P_cur, vio_R_cur);
cur_kf->index = global_index;//全局关键帧索引
global_index++;
int loop_index = -1;
//flag_detect_loop=1
if (flag_detect_loop)
{
    TicToc tmp_t;
    loop_index = detectLoop(cur_kf, cur_kf->index);
}
else
{
    addKeyFrameIntoVoc(cur_kf);
}
int PoseGraph::detectLoop(KeyFrame* keyframe, int frame_index)
{
         detectLoop()函数代码量比较大,分段分析,
}

detectLoop()函数的具体代码实现如下,

cv::Mat compressed_image;
//DEBUG_IMAGE=1
if (DEBUG_IMAGE)
{
    int feature_num = keyframe->keypoints.size();
    //压缩关键帧图像的宽高为原来的一半,得到compressed_image
    cv::resize(keyframe->image, compressed_image, cv::Size(376, 240));
    //在compressed_image上显示文本
    putText(compressed_image, "feature_num:" + to_string(feature_num), cv::Point2f(10, 10), CV_FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255));
    //compressed_image加入到image_pool中
    image_pool[frame_index] = compressed_image;
}
QueryResults ret;
//查询字典数据库db,得到与每一帧的相似度评分ret,ret很重要
db.query(keyframe->brief_descriptors, ret, 4, frame_index - 50);
//添加当前关键帧到字典数据库db中
db.add(keyframe->brief_descriptors);
bool find_loop = false;
cv::Mat loop_result;
if (DEBUG_IMAGE)
{
    loop_result = compressed_image.clone();
    if (ret.size() > 0)
        //在loop_result上显示文本
        putText(loop_result, "neighbour score:" + to_string(ret[0].Score), cv::Point2f(10, 50), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(255));
}
if (DEBUG_IMAGE)
{
    for (unsigned int i = 0; i < ret.size(); i++)
    {
        int tmp_index = ret[i].Id;
        //在image_pool中找到得分ret对应的关键帧tmp_image
        auto it = image_pool.find(tmp_index);
        cv::Mat tmp_image = (it->second).clone();
         //在tmp_image上显示文本
        putText(tmp_image, "index:  " + to_string(tmp_index) + "loop score:" + to_string(ret[i].Score), cv::Point2f(10, 50), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(255));
        //图像水平拼接
        cv::hconcat(loop_result, tmp_image, loop_result);
    }
}
if (ret.size() >= 1 &&ret[0].Score > 0.05)
	for (unsigned int i = 1; i < ret.size(); i++)
	{
	    //if (ret[i].Score > ret[0].Score * 0.3)
	    if (ret[i].Score > 0.015)
	    {          
	        find_loop = true;
	        int tmp_index = ret[i].Id;
	        //不执行
	        if (DEBUG_IMAGE && 0)
	        {
	            ......
	        }
	    }
	
	}
//前50个关键帧不检测回环
if (find_loop && frame_index > 50)
{
    int min_index = -1;
    for (unsigned int i = 0; i < ret.size(); i++)
    {
        if (min_index == -1 || (ret[i].Id < min_index && ret[i].Score > 0.015))
            min_index = ret[i].Id;
    }
    //返回最小回环关键帧索引
    return min_index;
}
else
    return -1;

detectLoop()函数实现的功能就是在db库中检测回环帧,如果有就返回回环帧的索引。

返回addKeyFrame()函数接着往下读,

//回环关键帧索引对应的关键帧
KeyFrame* old_kf = getKeyFrame(loop_index);
if (cur_kf->findConnection(old_kf))
{
    ......
}

findConnection()函数的具体代码实现如下,

//定义相关变量,并赋值
vector<cv::Point2f> matched_2d_cur, matched_2d_old;
vector<cv::Point2f> matched_2d_cur_norm, matched_2d_old_norm;
vector<cv::Point3f> matched_3d;
vector<double> matched_id;
vector<uchar> status;

matched_3d = point_3d;//空间坐标
matched_2d_cur = point_2d_uv;//像素坐标
matched_2d_cur_norm = point_2d_norm;//归一化相机坐标
matched_id = point_id;//特征点id
searchByBRIEFDes(matched_2d_old, matched_2d_old_norm, status, old_kf->brief_descriptors, old_kf->keypoints, old_kf->keypoints_norm);
reduceVector(matched_2d_cur, status);
reduceVector(matched_2d_old, status);
reduceVector(matched_2d_cur_norm, status);
reduceVector(matched_2d_old_norm, status);
reduceVector(matched_3d, status);
reduceVector(matched_id, status);

把当前关键帧本来就有的特征点与回环帧的新提取的特征点进行匹配,得到matched_2d_old、matched_2d_old_normstatus。并且根据status的值剔除没有匹配成功的特征点的相关变量。匹配代码具体实现如下,

void KeyFrame::searchByBRIEFDes(std::vector<cv::Point2f> &matched_2d_old,
								std::vector<cv::Point2f> &matched_2d_old_norm,
                                std::vector<uchar> &status,
                                const std::vector<BRIEF::bitset> &descriptors_old,
                                const std::vector<cv::KeyPoint> &keypoints_old,
                                const std::vector<cv::KeyPoint> &keypoints_old_norm)
{
    //遍历当前关键帧本来就有的特征点的描述子
    for(int i = 0; i < (int)window_brief_descriptors.size(); i++)
    {
        cv::Point2f pt(0.f, 0.f);
        cv::Point2f pt_norm(0.f, 0.f);
        //寻找回环帧中新提取的特征点 与 当前关键帧本来就有的特征点的 匹配特征点的像素坐标和归一化相机坐标
        if (searchInAera(window_brief_descriptors[i], descriptors_old, keypoints_old, keypoints_old_norm, pt, pt_norm))
          status.push_back(1);
        else
          status.push_back(0);
        //存储回环帧匹配特征点的像素坐标
        matched_2d_old.push_back(pt);
        //存储回环帧匹配特征点的归一化相机坐标
        matched_2d_old_norm.push_back(pt_norm);
    }

}
Eigen::Vector3d PnP_T_old;
Eigen::Matrix3d PnP_R_old;
Eigen::Vector3d relative_t;
Quaterniond relative_q;
if ((int)matched_2d_cur.size() > MIN_LOOP_NUM)//MIN_LOOP_NUM=25
{
	status.clear();
	//PnP求解回环帧的位姿PnP_T_old和PnP_R_old,并进一步剔除误匹配点
	//这个函数就不详细分析了
    PnPRANSAC(matched_2d_old_norm, matched_3d, status, PnP_T_old, PnP_R_old);
    reduceVector(matched_2d_cur, status);
    reduceVector(matched_2d_old, status);
    reduceVector(matched_2d_cur_norm, status);
    reduceVector(matched_2d_old_norm, status);
    reduceVector(matched_3d, status);
    reduceVector(matched_id, status);
    #if 1
    	if (DEBUG_IMAGE)
        {
        	int gap = 10;
        	cv::Mat gap_image(ROW, gap, CV_8UC1, cv::Scalar(255, 255, 255));
            cv::Mat gray_img, loop_match_img;
            cv::Mat old_img = old_kf->image;
            //把当前关键帧图像和回环帧图像中间加个缝隙拼接在一起,并转成彩色图像
            cv::hconcat(image, gap_image, gap_image);
            cv::hconcat(gap_image, old_img, gray_img);
            cvtColor(gray_img, loop_match_img, CV_GRAY2RGB);
            for(int i = 0; i< (int)matched_2d_cur.size(); i++)
            {
                cv::Point2f cur_pt = matched_2d_cur[i];
                //在拼接图像上标记当前关键帧的匹配特征点
                cv::circle(loop_match_img, cur_pt, 5, cv::Scalar(0, 255, 0));
            }
            for(int i = 0; i< (int)matched_2d_old.size(); i++)
            {
                cv::Point2f old_pt = matched_2d_old[i];
                old_pt.x += (COL + gap);
                //在拼接图像上标记回环帧的匹配特征点
                cv::circle(loop_match_img, old_pt, 5, cv::Scalar(0, 255, 0));
            }
            for (int i = 0; i< (int)matched_2d_cur.size(); i++)
            {
                cv::Point2f old_pt = matched_2d_old[i];
                old_pt.x += (COL + gap) ;
                //画出匹配点间的连线
                cv::line(loop_match_img, matched_2d_cur[i], old_pt, cv::Scalar(0, 255, 0), 2, 8, 0);
            }
            //在当前关键帧图像的上方,显示文本
            cv::Mat notation(50, COL + gap + COL, CV_8UC3, cv::Scalar(255, 255, 255));
            putText(notation, "current frame: " + to_string(index) + "  sequence: " + to_string(sequence), cv::Point2f(20, 30), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255), 3);
            //在回环帧图像的上方,显示文本
            putText(notation, "previous frame: " + to_string(old_kf->index) + "  sequence: " + to_string(old_kf->sequence), cv::Point2f(20 + COL + gap, 30), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255), 3);
            //图像垂直方向拼接
            cv::vconcat(notation, loop_match_img, loop_match_img);
            //
            if ((int)matched_2d_cur.size() > MIN_LOOP_NUM)
            {
                //将loop_match_img压缩为原来的一半,得到thumbimage
            	cv::Mat thumbimage;
            	cv::resize(loop_match_img, thumbimage, cv::Size(loop_match_img.cols / 2, loop_match_img.rows / 2));
            	//将thumbimage构造为ROS消息
    	    	sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", thumbimage).toImageMsg();
                msg->header.stamp = ros::Time(time_stamp);
                //发布出去
    	    	pub_match_img.publish(msg);
            }
        }
    #endif
}
if ((int)matched_2d_cur.size() > MIN_LOOP_NUM)
{
    //回环帧与当前关键帧的相对位姿,和航向角误差
    relative_t = PnP_R_old.transpose() * (origin_vio_T - PnP_T_old);
    relative_q = PnP_R_old.transpose() * origin_vio_R;
    relative_yaw = Utility::normalizeAngle(Utility::R2ypr(origin_vio_R).x() - Utility::R2ypr(PnP_R_old).x());
    //航向角误差小于30°,并且平移误差小于20米
    if (abs(relative_yaw) < 30.0 && relative_t.norm() < 20.0)
    {
        //确定有回环存在,并且获得回环帧索引,存储相对位姿和航向角误差信息
    	has_loop = true;
    	loop_index = old_kf->index;
    	loop_info << relative_t.x(), relative_t.y(), relative_t.z(),
    	             relative_q.w(), relative_q.x(), relative_q.y(), relative_q.z(),
    	             relative_yaw;
    	 //默认不执行          
    	if(FAST_RELOCALIZATION)
    	{
		    sensor_msgs::PointCloud msg_match_points;
		    msg_match_points.header.stamp = ros::Time(time_stamp);
		    for (int i = 0; i < (int)matched_2d_old_norm.size(); i++)
		    {
	            geometry_msgs::Point32 p;
	            p.x = matched_2d_old_norm[i].x;
	            p.y = matched_2d_old_norm[i].y;
	            p.z = matched_id[i];
	            msg_match_points.points.push_back(p);
		    }
		    Eigen::Vector3d T = old_kf->T_w_i; 
		    Eigen::Matrix3d R = old_kf->R_w_i;
		    Quaterniond Q(R);
		    sensor_msgs::ChannelFloat32 t_q_index;
		    t_q_index.values.push_back(T.x());
		    t_q_index.values.push_back(T.y());
		    t_q_index.values.push_back(T.z());
		    t_q_index.values.push_back(Q.w());
		    t_q_index.values.push_back(Q.x());
		    t_q_index.values.push_back(Q.y());
		    t_q_index.values.push_back(Q.z());
		    t_q_index.values.push_back(index);
		    msg_match_points.channels.push_back(t_q_index);
		    pub_match_points.publish(msg_match_points);
    	}
        return true;
    }
}

findConnection()函数至此完结。

//记录下回环帧索引
if (earliest_loop_index > loop_index || earliest_loop_index == -1)
    earliest_loop_index = loop_index;
//得到回环帧和当前关键帧的位姿
Vector3d w_P_old, w_P_cur, vio_P_cur;
Matrix3d w_R_old, w_R_cur, vio_R_cur;
old_kf->getVioPose(w_P_old, w_R_old);
cur_kf->getVioPose(vio_P_cur, vio_R_cur);
//利用当前关键帧和回环帧的相对位姿,重新定位当前关键帧位姿
Vector3d relative_t;
Quaterniond relative_q;
relative_t = cur_kf->getLoopRelativeT();
relative_q = (cur_kf->getLoopRelativeQ()).toRotationMatrix();
w_P_cur = w_R_old * relative_t + w_P_old;
w_R_cur = w_R_old * relative_q;
//当前关键帧的航向角漂移,和轨迹漂移
double shift_yaw;
Matrix3d shift_r;
Vector3d shift_t; 
shift_yaw = Utility::R2ypr(w_R_cur).x() - Utility::R2ypr(vio_R_cur).x();
shift_r = Utility::ypr2R(Vector3d(shift_yaw, 0, 0));
shift_t = w_P_cur - w_R_cur * vio_R_cur.transpose() * vio_P_cur;
//看不懂
if (old_kf->sequence != cur_kf->sequence && sequence_loop[cur_kf->sequence] == 0)
{  
    w_r_vio = shift_r;
    w_t_vio = shift_t;
    vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
    vio_R_cur = w_r_vio *  vio_R_cur;
    cur_kf->updateVioPose(vio_P_cur, vio_R_cur);
    list<KeyFrame*>::iterator it = keyframelist.begin();
    for (; it != keyframelist.end(); it++)   
    {
        if((*it)->sequence == cur_kf->sequence)
        {
            Vector3d vio_P_cur;
            Matrix3d vio_R_cur;
            (*it)->getVioPose(vio_P_cur, vio_R_cur);
            vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
            vio_R_cur = w_r_vio *  vio_R_cur;
            (*it)->updateVioPose(vio_P_cur, vio_R_cur);
        }
    }
    sequence_loop[cur_kf->sequence] = 1;
}
//当前关键帧索引添加到optimize_buf中
m_optimize_buf.lock();
optimize_buf.push(cur_kf->index);
m_optimize_buf.unlock();
Vector3d P;
Matrix3d R;
cur_kf->getVioPose(P, R);
P = r_drift * P + t_drift;
R = r_drift * R;
//更新当前关键帧的位姿
cur_kf->updatePose(P, R);
Quaterniond Q{R};
//用当前关键帧的位姿构造ROS消息,存储到path[]中,等待发布
geometry_msgs::PoseStamped pose_stamped;
pose_stamped.header.stamp = ros::Time(cur_kf->time_stamp);
pose_stamped.header.frame_id = "world";
pose_stamped.pose.position.x = P.x() + VISUALIZATION_SHIFT_X;
pose_stamped.pose.position.y = P.y() + VISUALIZATION_SHIFT_Y;
pose_stamped.pose.position.z = P.z();
pose_stamped.pose.orientation.x = Q.x();
pose_stamped.pose.orientation.y = Q.y();
pose_stamped.pose.orientation.z = Q.z();
pose_stamped.pose.orientation.w = Q.w();
path[sequence_cnt].poses.push_back(pose_stamped);
path[sequence_cnt].header = pose_stamped.header;
//保存当前关键帧位姿
if (SAVE_LOOP_PATH)
{
    ofstream loop_path_file(VINS_RESULT_PATH, ios::app);
    loop_path_file.setf(ios::fixed, ios::floatfield);
    loop_path_file.precision(0);
    loop_path_file << cur_kf->time_stamp * 1e9 << ",";
    loop_path_file.precision(5);
    loop_path_file  << P.x() << ","
          << P.y() << ","
          << P.z() << ","
          << Q.w() << ","
          << Q.x() << ","
          << Q.y() << ","
          << Q.z() << ","
          << endl;
    loop_path_file.close();
}
//添加顺序边连线,默认不执行
if (SHOW_S_EDGE)
{
    //rit是一个反向迭代器
    list<KeyFrame*>::reverse_iterator rit = keyframelist.rbegin();
    for (int i = 0; i < 4; i++)
    {
        if (rit == keyframelist.rend())
            break;
        Vector3d conncected_P;
        Matrix3d connected_R;
        if((*rit)->sequence == cur_kf->sequence)
        {
            (*rit)->getPose(conncected_P, connected_R);
            posegraph_visualization->add_edge(P, conncected_P);
        }
        rit++;
    }
}
//回环帧和当前关键帧之间添加连线
if (SHOW_L_EDGE)
{
    if (cur_kf->has_loop)
    {
        //printf("has loop \n");
        KeyFrame* connected_KF = getKeyFrame(cur_kf->loop_index);
        Vector3d connected_P,P0;
        Matrix3d connected_R,R0;
        connected_KF->getPose(connected_P, connected_R);
        //cur_kf->getVioPose(P0, R0);
        cur_kf->getPose(P0, R0);
        if(cur_kf->sequence > 0)
        {
            //printf("add loop into visual \n");
            posegraph_visualization->add_loopedge(P0, connected_P + Vector3d(VISUALIZATION_SHIFT_X, VISUALIZATION_SHIFT_Y, 0));
        }
        
    }
}
//当前关键帧添加到keyframelist中
keyframelist.push_back(cur_kf);
//发布一些话题
publish();

你可能感兴趣的:(笔记)