在上一章中,我们过完了主函数以及LIO的操作,由于这部分代码在FAST-LIO2中已经充分详细的介绍过了,所以说这里在R3LIVE中就不过多介绍了,下面我们来看一下本系列的重点,即VIO部分。
我们在之前分析过,R3LIVE主要的公式推导在VIO上,所以我们来细细的分析这部分的功能。首先我们在之前的博客《经典文献阅读之–R3LIVE》提到,R3LIVE将VIO分成了两步,一是直接通过帧间的光流来追踪地图点,并且通过最小化追踪到的地图点的PNP投影误差来获取系统状态;二是通过这些地图点的出现的帧到地图的光度误差来优化状态。这部分就是我们需要了解的主要内容。
首先r3live_vio.cpp
中,我们会先通过一个线程来获取图像的信息
void R3LIVE::image_comp_callback( const sensor_msgs::CompressedImageConstPtr &msg )
{
std::unique_lock< std::mutex > lock2( mutex_image_callback );
if ( sub_image_typed == 1 )
{
return; // Avoid subscribe the same image twice.
}
sub_image_typed = 2;
g_received_compressed_img_msg.push_back( msg );
if ( g_flag_if_first_rec_img )
{
g_flag_if_first_rec_img = 0;
m_thread_pool_ptr->commit_task( &R3LIVE::service_process_img_buffer, this );
}
return;
}
// ANCHOR - image_callback
void R3LIVE::image_callback( const sensor_msgs::ImageConstPtr &msg )
{
std::unique_lock< std::mutex > lock( mutex_image_callback );
if ( sub_image_typed == 2 )
{
return; // Avoid subscribe the same image twice.
}
sub_image_typed = 1;
// 如果是第一次收到图片,则启动一个线程,用来处理image_comp_callback回调中接收的压缩图片,内部其实在循环调用process_image()函数
if ( g_flag_if_first_rec_img )
{
g_flag_if_first_rec_img = 0;
m_thread_pool_ptr->commit_task( &R3LIVE::service_process_img_buffer, this );
}
// 转opencv格式
cv::Mat temp_img = cv_bridge::toCvCopy( msg, sensor_msgs::image_encodings::BGR8 )->image.clone();
// 图像预处理,然后保存到m_queue_image_with_pose队列
process_image( temp_img, msg->header.stamp.toSec() );
}
这里面通过 m_thread_pool_ptr->commit_task
线程池的方式完成了图像buffer的压入与处理。在这个函数的末尾,会调用process_image
函数
void R3LIVE::service_process_img_buffer()
{
while ( 1 )
{
// To avoid uncompress so much image buffer, reducing the use of memory.
// 如果m_queue_image_with_pose队列内的数据>4,表示这些数据还没被处理,暂时挂起预处理线程(丢一些数据)
if ( m_queue_image_with_pose.size() > 4 )
{
while ( m_queue_image_with_pose.size() > 4 )
{
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( 2 ) );
std::this_thread::yield();
}
}
cv::Mat image_get;
double img_rec_time;
if ( sub_image_typed == 2 )
{
while ( g_received_compressed_img_msg.size() == 0 )
{
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( 1 ) );
std::this_thread::yield();
}
sensor_msgs::CompressedImageConstPtr msg = g_received_compressed_img_msg.front();
try
{
cv_bridge::CvImagePtr cv_ptr_compressed = cv_bridge::toCvCopy( msg, sensor_msgs::image_encodings::BGR8 );
img_rec_time = msg->header.stamp.toSec();
image_get = cv_ptr_compressed->image;
cv_ptr_compressed->image.release();
}
catch ( cv_bridge::Exception &e )
{
printf( "Could not convert from '%s' to 'bgr8' !!! ", msg->format.c_str() );
}
mutex_image_callback.lock();
g_received_compressed_img_msg.pop_front();
mutex_image_callback.unlock();
}
else
{
while ( g_received_img_msg.size() == 0 )
{
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( 1 ) );
std::this_thread::yield();
}
sensor_msgs::ImageConstPtr msg = g_received_img_msg.front();
image_get = cv_bridge::toCvCopy( msg, sensor_msgs::image_encodings::BGR8 )->image.clone();
img_rec_time = msg->header.stamp.toSec();
mutex_image_callback.lock();
g_received_img_msg.pop_front();
mutex_image_callback.unlock();
}
process_image( image_get, img_rec_time );
}
}
process_image
函数主要做了三件事情,分别是检测时间戳,初始化参数,并启动service_pub_rgb_maps
与service_VIO_update
线程,以及去畸变与图像处理。
void R3LIVE::process_image( cv::Mat &temp_img, double msg_time )
{
cv::Mat img_get;
if ( temp_img.rows == 0 )
{
cout << "Process image error, image rows =0 " << endl;
return;
}
// 检查时间戳是否回跳
if ( msg_time < last_accept_time )
{
cout << "Error, image time revert!!" << endl;
return;
}
// 频率控制
if ( ( msg_time - last_accept_time ) < ( 1.0 / m_control_image_freq ) * 0.9 )
{
return;
}
last_accept_time = msg_time;
// 如果是第一次运行
if ( m_camera_start_ros_tim < 0 )
{
m_camera_start_ros_tim = msg_time;
m_vio_scale_factor = m_vio_image_width * m_image_downsample_ratio / temp_img.cols; // 320 * 24
// load_vio_parameters();
// 设置初始状态和参数(以LIO的结果作为初始状态,同时根据配置文件设置其他状态初始值)
set_initial_camera_parameter( g_lio_state, m_camera_intrinsic.data(), m_camera_dist_coeffs.data(), m_camera_ext_R.data(),
m_camera_ext_t.data(), m_vio_scale_factor );
cv::eigen2cv( g_cam_K, intrinsic );
cv::eigen2cv( g_cam_dist, dist_coeffs );
// 准备用来去畸变
initUndistortRectifyMap( intrinsic, dist_coeffs, cv::Mat(), intrinsic, cv::Size( m_vio_image_width / m_vio_scale_factor, m_vio_image_heigh / m_vio_scale_factor ),
CV_16SC2, m_ud_map1, m_ud_map2 );
// 启动两个线程
m_thread_pool_ptr->commit_task( &R3LIVE::service_pub_rgb_maps, this);
m_thread_pool_ptr->commit_task( &R3LIVE::service_VIO_update, this); // vio线程
// 初始化数据记录器
m_mvs_recorder.init( g_cam_K, m_vio_image_width / m_vio_scale_factor, &m_map_rgb_pts );
m_mvs_recorder.set_working_dir( m_map_output_dir );
}
if ( m_image_downsample_ratio != 1.0 )
{
cv::resize( temp_img, img_get, cv::Size( m_vio_image_width / m_vio_scale_factor, m_vio_image_heigh / m_vio_scale_factor ) );
}
else
{
img_get = temp_img; // clone ?
}
std::shared_ptr< Image_frame > img_pose = std::make_shared< Image_frame >( g_cam_K );
// 是否发布原始img
if ( m_if_pub_raw_img )
{
img_pose->m_raw_img = img_get;
}
// 以img_get为输入,进行去畸变,输出到img_pose->m_img
cv::remap( img_get, img_pose->m_img, m_ud_map1, m_ud_map2, cv::INTER_LINEAR );
// cv::imshow("sub Img", img_pose->m_img);
img_pose->m_timestamp = msg_time;
img_pose->init_cubic_interpolation(); // 转灰度图
img_pose->image_equalize(); // 直方图均衡化
m_camera_data_mutex.lock();
m_queue_image_with_pose.push_back( img_pose ); // 保存到队列
m_camera_data_mutex.unlock();
total_frame_count++;
if ( m_queue_image_with_pose.size() > buffer_max_frame )
{
buffer_max_frame = m_queue_image_with_pose.size();
}
// cout << "Image queue size = " << m_queue_image_with_pose.size() << endl;
}
这一章我们开始讲解VIO的主线程。首先会先默认判断是否收到激光的信息,即收到激光信息才会进入视觉VIO。如果是第一帧图像,则需要等待点云地图中的点数量大于阈值后,再选取active点云投影到图像上,作为初始跟踪点,初始化跟踪器op_track。
while ( g_camera_lidar_queue.m_if_have_lidar_data == 0 ) // 检查是否收到第一帧激光雷达扫描,没收到,则循环等待
{
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( THREAD_SLEEP_TIM ) );
std::this_thread::yield();
continue;
}
if ( m_queue_image_with_pose.size() == 0 ) // 检查是否收到预处理后的图像
{
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( THREAD_SLEEP_TIM ) );
std::this_thread::yield();//其他线程有需要时让出cpu
continue;
}
m_camera_data_mutex.lock();
// 如果m_queue_image_with_pose队列内的缓存数据大于buffer,则将最旧的图像帧用于track? 然后pop掉
while ( m_queue_image_with_pose.size() > m_maximum_image_buffer )
{
cout << ANSI_COLOR_BLUE_BOLD << "=== Pop image! current queue size = " << m_queue_image_with_pose.size() << " ===" << ANSI_COLOR_RESET
<< endl;
op_track.track_img( m_queue_image_with_pose.front(), -20 );//光流跟踪,3D地图点投影后,像素距离大于-20(负值不做剔除)认为无效
m_queue_image_with_pose.pop_front();
}
// 取与图像对应的Image_frame数据结构
std::shared_ptr< Image_frame > img_pose = m_queue_image_with_pose.front();
double message_time = img_pose->m_timestamp;
m_queue_image_with_pose.pop_front();
m_camera_data_mutex.unlock();
g_camera_lidar_queue.m_last_visual_time = img_pose->m_timestamp + g_lio_state.td_ext_i2c;//image时间加时间补偿
// 设置该图像帧的frame_idx
img_pose->set_frame_idx( g_camera_frame_idx );//记录frame的索引
tim.tic( "Frame" );
// 如果是第一帧
if ( g_camera_frame_idx == 0 )
{
std::vector< cv::Point2f > pts_2d_vec; // 选中的地图点反投影到图像上的坐标
std::vector< std::shared_ptr< RGB_pts > > rgb_pts_vec; // 选中的地图点
// while ( ( m_map_rgb_pts.is_busy() ) || ( ( m_map_rgb_pts.m_rgb_pts_vec.size() <= 100 ) ) )
// 检查地图点是否足够,因为LIO模块会调用Global_map::append_points_to_global_map,向全局地图添加点
while ( ( ( m_map_rgb_pts.m_rgb_pts_vec.size() <= 100 ) ) )
{//地图点太少,等待数据累积
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( 1 ) );
}
// 取此时LIO的状态,并根据外参,设置相机状态
// 对于第一帧,这里假设是静止的运动状态
set_image_pose( img_pose, g_lio_state ); // For first frame pose, we suppose that the motion is static.
// 调用全局地图模块,根据相机状态,选择一些点
m_map_rgb_pts.selection_points_for_projection( img_pose, &rgb_pts_vec, &pts_2d_vec, m_track_windows_size / m_vio_scale_factor );
// 初始化跟踪模块
op_track.init( img_pose, rgb_pts_vec, pts_2d_vec );//初始化光流
g_camera_frame_idx++;//累计影像索引
continue;
}
这一步和上一章讲的LIO类似,都是对比相机和lidar队列头的时间戳,如果lidar的时间戳更早则等待lio线程把更早的激光处理完
g_camera_frame_idx++;
tim.tic( "Wait" );
// if_camera_can_process(): 当雷达有数据,并且lidar buffer中最旧的雷达数据时间 > 当前正在处理的图像时间戳,则返回true,
while ( g_camera_lidar_queue.if_camera_can_process() == false )
{
// 否则,在这里循环等待处理雷达数据
ros::spinOnce();
std::this_thread::sleep_for( std::chrono::milliseconds( THREAD_SLEEP_TIM ) );
std::this_thread::yield();
cv_keyboard_callback();
}
g_cost_time_logger.record( tim, "Wait" );
m_mutex_lio_process.lock();
tim.tic( "Frame" );
tim.tic( "Track_img" );
然后下面是预积分部分,通过IMU结合图像的形式完成位置的预积分
// body 从LIO预积分到当前帧时刻
if ( vio_preintegration( g_lio_state, state_out, img_pose->m_timestamp + g_lio_state.td_ext_i2c ) == false )
{
// 如果图像帧时间戳小于当前状态g_lio_state的时间,则出问题了,直接跳过这帧图像
m_mutex_lio_process.unlock();
continue;
}
set_image_pose( img_pose, state_out );//将从上一LIO预积分的结果,设定为当前帧对应的pose
下面是vio_preintegration
函数的详细代码,我们可以看到,在该代码中主要是拿到imu_buffer_vio
的图像与IMU数据,并将buffer里面的视觉数据都传入imu_preintegration
函数中完成差值的传递,并计算出state_inout
的信息。
bool R3LIVE::vio_preintegration( StatesGroup &state_in, StatesGroup &state_out, double current_frame_time )
{
state_out = state_in;
if ( current_frame_time <= state_in.last_update_time )
{
// cout << ANSI_COLOR_RED_BOLD << "Error current_frame_time <= state_in.last_update_time | " <<
// current_frame_time - state_in.last_update_time << ANSI_COLOR_RESET << endl;
return false;
}
mtx_buffer.lock();
std::deque< sensor_msgs::Imu::ConstPtr > vio_imu_queue;
for ( auto it = imu_buffer_vio.begin(); it != imu_buffer_vio.end(); it++ )
{
vio_imu_queue.push_back( *it );
if ( ( *it )->header.stamp.toSec() > current_frame_time )
{
break;
}
}
while ( !imu_buffer_vio.empty() )
{
double imu_time = imu_buffer_vio.front()->header.stamp.toSec();
if ( imu_time < current_frame_time - 0.2 )
{
imu_buffer_vio.pop_front();
}
else
{
break;
}
}
// cout << "Current VIO_imu buffer size = " << imu_buffer_vio.size() << endl;
state_out = m_imu_process->imu_preintegration( state_out, vio_imu_queue, current_frame_time - vio_imu_queue.back()->header.stamp.toSec() );
eigen_q q_diff( state_out.rot_end.transpose() * state_in.rot_end );
// cout << "Pos diff = " << (state_out.pos_end - state_in.pos_end).transpose() << endl;
// cout << "Euler diff = " << q_diff.angularDistance(eigen_q::Identity()) * 57.3 << endl;
mtx_buffer.unlock();
state_out.last_update_time = current_frame_time;
return true;
}
我们发现这部分的内容和FAST-LIO2的代码有点相像,但是有不完全一样。所以我们单独抽出来看一下这个函数。首先完成一系列参数的初始化,包含imu加速度acc_imu,平均角速度angvel_avr,速度vel_imu,位置pos_imu。以及imu自身的旋转角,状态转移矩阵F_x,以及协方差cov_w。这一部分其实就是ESIKF的输入转化矩阵。测量更新在主函数中。
std::unique_lock< std::mutex > lock( g_imu_premutex );
StatesGroup state_inout = state_in;
//检查速度分量是否过大
if ( check_state( state_inout ) )
{
state_inout.display( state_inout, "state_inout" );
state_in.display( state_in, "state_in" );
}
Eigen::Vector3d acc_imu( 0, 0, 0 ), angvel_avr( 0, 0, 0 ), acc_avr( 0, 0, 0 ), vel_imu( 0, 0, 0 ), pos_imu( 0, 0, 0 );
vel_imu = state_inout.vel_end;
pos_imu = state_inout.pos_end;
Eigen::Matrix3d R_imu( state_inout.rot_end );
Eigen::MatrixXd F_x( Eigen::Matrix< double, DIM_OF_STATES, DIM_OF_STATES >::Identity() );//状态量雅可比
Eigen::MatrixXd cov_w( Eigen::Matrix< double, DIM_OF_STATES, DIM_OF_STATES >::Zero() );
double dt = 0;
int if_first_imu = 1;
这一部分和FAS-LIO2中UndistortPcl
函数类似,其实是做了一个中值积分,并计算出两个IMU时刻之间的时间间隔。
// 遍历Imu,不包括最后一帧imu数据,IMU数据序列内计算积分、更新协方差矩阵
for ( std::deque< sensor_msgs::Imu::ConstPtr >::iterator it_imu = v_imu.begin(); it_imu != ( v_imu.end() - 1 ); it_imu++ )
{
// if(g_lidar_star_tim == 0 || state_inout.last_update_time == 0)
// {
// return state_inout;
// }
// 取两帧imu
sensor_msgs::Imu::ConstPtr head = *( it_imu );
sensor_msgs::Imu::ConstPtr tail = *( it_imu + 1 );
// 求平均,然后减去bias
angvel_avr << 0.5 * ( head->angular_velocity.x + tail->angular_velocity.x ), 0.5 * ( head->angular_velocity.y + tail->angular_velocity.y ),
0.5 * ( head->angular_velocity.z + tail->angular_velocity.z );
acc_avr << 0.5 * ( head->linear_acceleration.x + tail->linear_acceleration.x ),
0.5 * ( head->linear_acceleration.y + tail->linear_acceleration.y ), 0.5 * ( head->linear_acceleration.z + tail->linear_acceleration.z );
angvel_avr -= state_inout.bias_g;
acc_avr = acc_avr - state_inout.bias_a;
// 检查时间,如果位于一帧的imu时间小于 上一次优化状态的时间,则表示imu数据无效
if ( tail->header.stamp.toSec() < state_inout.last_update_time )
{
continue;
}
// 计算前向积分时间dt
if ( if_first_imu )
{
if_first_imu = 0;
dt = tail->header.stamp.toSec() - state_inout.last_update_time;
}
else
{
dt = tail->header.stamp.toSec() - head->header.stamp.toSec();
}
if ( dt > 0.05 )
{
dt = 0.05;
}
然后下面这部分就是ESIKF的初始化,和FAST-LIO2中的use-ikfom.hpp
文件类似。但是在R3LIVE中是29个维度的,文中在公式1中说明了这29个维度分别代表了什么:全局到imu旋转,全局到imu位置,速度,ba,bg,重力分量,imu到camera旋转,imu到camera位置, I t C ^It_C ItC激光雷达已经与IMU同步时IMU和摄像机之间的时间偏移, ϕ T \phi^T ϕT内参 [ f x , f y , c x , c y ] T [f_x,f_y,c_x,c_y]^T [fx,fy,cx,cy]T。
/* covariance propagation */
Eigen::Matrix3d acc_avr_skew;
Eigen::Matrix3d Exp_f = Exp( angvel_avr, dt );
acc_avr_skew << SKEW_SYM_MATRIX( acc_avr );
// Eigen::Matrix3d Jr_omega_dt = right_jacobian_of_rotion_matrix(angvel_avr*dt);
Eigen::Matrix3d Jr_omega_dt = Eigen::Matrix3d::Identity();
F_x.block< 3, 3 >( 0, 0 ) = Exp_f.transpose();
// F_x.block<3, 3>(0, 9) = -Eye3d * dt;
F_x.block< 3, 3 >( 0, 9 ) = -Jr_omega_dt * dt;
// F_x.block<3,3>(3,0) = -R_imu * off_vel_skew * dt;
F_x.block< 3, 3 >( 3, 3 ) = Eye3d; // Already the identity.
F_x.block< 3, 3 >( 3, 6 ) = Eye3d * dt;
F_x.block< 3, 3 >( 6, 0 ) = -R_imu * acc_avr_skew * dt;
F_x.block< 3, 3 >( 6, 12 ) = -R_imu * dt;
F_x.block< 3, 3 >( 6, 15 ) = Eye3d * dt;
Eigen::Matrix3d cov_acc_diag, cov_gyr_diag, cov_omega_diag;
cov_omega_diag = Eigen::Vector3d( COV_OMEGA_NOISE_DIAG, COV_OMEGA_NOISE_DIAG, COV_OMEGA_NOISE_DIAG ).asDiagonal();
cov_acc_diag = Eigen::Vector3d( COV_ACC_NOISE_DIAG, COV_ACC_NOISE_DIAG, COV_ACC_NOISE_DIAG ).asDiagonal();
cov_gyr_diag = Eigen::Vector3d( COV_GYRO_NOISE_DIAG, COV_GYRO_NOISE_DIAG, COV_GYRO_NOISE_DIAG ).asDiagonal();
// cov_w.block<3, 3>(0, 0) = cov_omega_diag * dt * dt;
cov_w.block< 3, 3 >( 0, 0 ) = Jr_omega_dt * cov_omega_diag * Jr_omega_dt * dt * dt;
cov_w.block< 3, 3 >( 3, 3 ) = R_imu * cov_gyr_diag * R_imu.transpose() * dt * dt;
cov_w.block< 3, 3 >( 6, 6 ) = cov_acc_diag * dt * dt;
cov_w.block< 3, 3 >( 9, 9 ).diagonal() =
Eigen::Vector3d( COV_BIAS_GYRO_NOISE_DIAG, COV_BIAS_GYRO_NOISE_DIAG, COV_BIAS_GYRO_NOISE_DIAG ) * dt * dt; // bias gyro covariance
cov_w.block< 3, 3 >( 12, 12 ).diagonal() =
Eigen::Vector3d( COV_BIAS_ACC_NOISE_DIAG, COV_BIAS_ACC_NOISE_DIAG, COV_BIAS_ACC_NOISE_DIAG ) * dt * dt; // bias acc covariance
state_inout.cov = F_x * state_inout.cov * F_x.transpose() + cov_w;
// 状态更新
R_imu = R_imu * Exp_f; //姿态
acc_imu = R_imu * acc_avr - state_inout.gravity; //加速度
pos_imu = pos_imu + vel_imu * dt + 0.5 * acc_imu * dt * dt; //位置
vel_imu = vel_imu + acc_imu * dt; //速度
angvel_last = angvel_avr;
acc_s_last = acc_imu;
最后就是将传递的状态量作为输出,用于后面的状态更新
// 继续推算,直到 激光扫描结束时刻
dt = end_pose_dt;
state_inout.last_update_time = v_imu.back()->header.stamp.toSec() + dt;
// cout << "Last update time = " << state_inout.last_update_time - g_lidar_star_tim << endl;
if ( dt > 0.1 )
{
scope_color( ANSI_COLOR_RED_BOLD );
for ( int i = 0; i < 1; i++ )
{
cout << __FILE__ << ", " << __LINE__ << "dt = " << dt << endl;
}
dt = 0.1;
}
// 将状态保存给state_inout,超出IMU数据时间部分,直接按时间差和最后一个IMU数据传播
state_inout.vel_end = vel_imu + acc_imu * dt;
state_inout.rot_end = R_imu * Exp( angvel_avr, dt );
state_inout.pos_end = pos_imu + vel_imu * dt + 0.5 * acc_imu * dt * dt;
// cout <<__FILE__ << ", " << __LINE__ <<" ,diagnose lio_state = " << std::setprecision(2) <<(state_inout - StatesGroup()).transpose() << endl;
// cout << "Preintegration State diff = " << std::setprecision(2) << (state_inout - state_in).head<15>().transpose()
// << endl;
// std::cout << __FILE__ << " " << __LINE__ << std::endl;
// check_state(state_inout);
if ( 0 )
{
if ( check_state( state_inout ) )
{
// printf_line;
std::cout << __FILE__ << " " << __LINE__ << std::endl;
state_inout.display( state_inout, "state_inout" );
state_in.display( state_in, "state_in" );
}
check_in_out_state( state_in, state_inout );//检查预积分前后pos是否超过1m,超过认为无效,将传播后状态重置为传播前
}
// cout << (state_inout - state_in).transpose() << endl;
return state_inout;
这一部分的操作和VINS类似,即通过光流完成特征点跟踪。具体的操作流程如下:
op_track.track_img( img_pose, -20 )
跟踪特征点。