跑通代码之后可以深入看代码了,整体代码很多,可先从配置文件开始看。
参考启动文件launch与参数配置文件yaml介绍
启动文件launch:euroc.launch
参数配置文件yaml:euroc_config.yaml:包括通用参数,相机内参,IMU噪声参数,Tbc相机外参,需要的topic,前端feature tracker的参数,后端loop的参数,优化的参数,以及时间戳补偿的参数。
从ROS源码看一下数据结构,基本上都是一些stl的数据结构的封装
typedef ::std_msgs::Header_<ContainerAllocator> header;
uint32_t height;
uint32_t width;
std::basic_string encoding;
uint8_t is_bigendian; #大端big endian(从低地址到高地址的顺序存放数据的高位字节到低位字节)和小端little endian
uint32_t step;
vector<uint8_t> data;
typedef ::sensor_msgs::Image_<std::allocator<void> > Image;
typedef boost::shared_ptr< ::sensor_msgs::Image > ImagePtr;
from file:sensor_msgs/PointCloud.msg
//头信息
std_msgs::Header_ header
feature_points->header = img_msg->header;
feature_points->header.frame_id = "world";
//3D landmark(如下是在归一化camera系下)
std::vector< ::geometry_msgs::Point32_> points;
geometry_msgs::Point32 p;
p.x = un_pts[j].x;
p.y = un_pts[j].y;
p.z = 1;
feature_points->points.push_back(p);//3D点
//channels
std::vector< ::sensor_msgs::ChannelFloat32_> channels;
sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
feature_points->channels.push_back(id_of_point);//这里每个channel都是一个独立的ChannelFloat32_,就使用了多个channel
feature_points->channels.push_back(u_of_point);
feature_points->channels.push_back(v_of_point);
feature_points->channels.push_back(velocity_x_of_point);
feature_points->channels.push_back(velocity_y_of_point);
//对于ChannelFloat32_
std::basic_string<char> name;//名称
std::vector<float> values;//值
typedef ::sensor_msgs::ChannelFloat32_<std::allocator<void> > ChannelFloat32;
typedef boost::shared_ptr< ::sensor_msgs::ChannelFloat32 > ChannelFloat32Ptr;
上面的feature_points是类成员指针,这个直接是msg类成员
//头信息
std_msgs::Header_ header
msg_match_points.header.stamp = ros::Time(time_stamp);
//3D landmark(如下是在归一化camera系下)
std::vector< ::geometry_msgs::Point32_> points;
geometry_msgs::Point32 p;
p.x = matched_2d_old_norm[i].x;
p.y = matched_2d_old_norm[i].y;
p.z = matched_id[i];
msg_match_points.points.push_back(p);
//channels
std::vector< ::sensor_msgs::ChannelFloat32_> channels;
Eigen::Vector3d T = old_kf->T_w_i;
Eigen::Matrix3d R = old_kf->R_w_i;
Quaterniond Q(R);
sensor_msgs::ChannelFloat32 t_q_index;//一个channel是一个通道,这里代表的是一个量就是两帧间的T
t_q_index.values.push_back(T.x());
t_q_index.values.push_back(T.y());
t_q_index.values.push_back(T.z());
t_q_index.values.push_back(Q.w());
t_q_index.values.push_back(Q.x());
t_q_index.values.push_back(Q.y());
t_q_index.values.push_back(Q.z());
t_q_index.values.push_back(index);
msg_match_points.channels.push_back(t_q_index);
底层const指针,指向的IMU数据不可变,即只读
from file:sensor_msgs/Imu.msg
{
std_msgs::Header_ header; //头
geometry_msgs::Quaternion_ orientation; //pose rotation
boost::array<double, 9> orientation_covariance;//rotation 协方差
geometry_msgs::Vector3_<> angular_velocity;//角速度
boost::array<double, 9> angular_velocity_covariance;//角速度cov
geometry_msgs::Vector3_<> linear_acceleration;//加速度
boost::array<double, 9> linear_acceleration_covariance;//加速度cov
} //Imu_
typedef ::sensor_msgs::Imu_<std::allocator<void> > Imu;
typedef boost::shared_ptr< ::sensor_msgs::Imu > ImuPtr;
typedef boost::shared_ptr< ::sensor_msgs::Imu const> ImuConstPtr;
挖坑:对ROS数据结构中的理解。
rosrun rqt_graph rqt_graph
看图更好理解topic:
本节参考自博客:https://blog.csdn.net/qq_41839222/article/details/86030962
std::vector<std::pair<std::vector<sensor_msgs::ImuConstPtr>, sensor_msgs::PointCloudConstPtr>> measurements;
estimator_node.cpp中getMeasurements()函数将对imu和图像数据进行初步对齐得到的数据结构,确保图像关联着对应时间戳内的所有IMU数据
sensor_msgs::PointCloudConstPtr 表示某一帧图像的feature_points
std::vector
将两者组合成一个数据结构,并构建元素为这种结构的vector进行存储
map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> image;
在estimator.cpp中的process()中被建立,在Estimator::processImage()中被调用
作用是建立每个特征点(camera_id,[x,y,z,u,v,vx,vy])构成的map,索引为feature_id。
由于是单目,所以pair内的camera_id都为0
for (unsigned int i = 0; i < img_msg->points.size(); i++){
int v = img_msg->channels[0].values[i] + 0.5;
int feature_id = v / NUM_OF_CAM;
int camera_id = v % NUM_OF_CAM;
double x = img_msg->points[i].x;
double y = img_msg->points[i].y;
double z = img_msg->points[i].z;
double p_u = img_msg->channels[1].values[i];
double p_v = img_msg->channels[2].values[i];
double velocity_x = img_msg->channels[3].values[i];
double velocity_y = img_msg->channels[4].values[i];
ROS_ASSERT(z == 1);
Eigen::Matrix<double, 7, 1> xyz_uv_velocity;
xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y;
image[feature_id].emplace_back(camera_id, xyz_uv_velocity);
}
map<double, ImageFrame> all_image_frame
在estimator.h中作为class Estimator的属性:键是图像帧的时间戳,值是图像帧类。
图像帧类可由图像帧的特征点与时间戳构造,此外还保存了位姿Rt,预积分对象pre_integration,是否是关键帧。
class ImageFrame
{
public:
ImageFrame(){};
ImageFrame(const map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>>& _points, double _t):t{_t},is_key_frame{false}
{
points = _points;
};
map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>> > > points;
double t;
Matrix3d R;
Vector3d T;
IntegrationBase *pre_integration;
bool is_key_frame;
};
数据结构暂时总结到这里,下面开始前端部分。
list feature
即list
,具体数据结构如下
class FeaturePerFrame
{
public:
FeaturePerFrame(const Eigen::Matrix<double, 7, 1> &_point, double td)
{
point.x() = _point(0);
point.y() = _point(1);
point.z() = _point(2);
uv.x() = _point(3);
uv.y() = _point(4);
velocity.x() = _point(5);
velocity.y() = _point(6);
cur_td = td;
}
double cur_td;
Vector3d point;
Vector2d uv;
Vector2d velocity;
double z;
bool is_used;
double parallax;
MatrixXd A;
VectorXd b;
double dep_gradient;
};
//这个id的feature在sliding window内的所有表达,每个表达都是一个FeaturePerFrame对象
class FeaturePerId
{
public:
const int feature_id;
int start_frame;
vector<FeaturePerFrame> feature_per_frame;//id对应的point,size==sliding window内该id对应的feature被tracking的次数
int used_num;
bool is_outlier;
bool is_margin;
double estimated_depth;
int solve_flag; // 0 haven't solve yet; 1 solve succ; 2 solve fail;
Vector3d gt_p;
FeaturePerId(int _feature_id, int _start_frame)
: feature_id(_feature_id), start_frame(_start_frame),
used_num(0), estimated_depth(-1.0), solve_flag(0)
{
}
int endFrame();
};
feature用主要的数据结构理解为
list< vector< const Eigen::Matrix
每个feature_id对应一个list,元素是装了window内的所有该id的feature的vector,即一个feature_id对应一个vector,其size代表该id的feature已经被tracking的次数。
这里的Recursive distortion model
还不太懂,需要补一下。
readIamge() 是关键
PinholeCamera.cc
void
PinholeCamera::liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const
{
double mx_d, my_d,mx2_d, mxy_d, my2_d, mx_u, my_u;
double rho2_d, rho4_d, radDist_d, Dx_d, Dy_d, inv_denom_d;
//double lambda;
// Lift points to normalised plane 将内参的逆变化转化为4个算子invKxx,从uv变为归一化平面内的xy1
mx_d = m_inv_K11 * p(0) + m_inv_K13;
my_d = m_inv_K22 * p(1) + m_inv_K23;
if (m_noDistortion)
{
mx_u = mx_d;
my_u = my_d;
}
else
{
if (0)
{
double k1 = mParameters.k1();
double k2 = mParameters.k2();
double p1 = mParameters.p1();
double p2 = mParameters.p2();
// Apply inverse distortion model
// proposed by Heikkila
mx2_d = mx_d*mx_d;
my2_d = my_d*my_d;
mxy_d = mx_d*my_d;
rho2_d = mx2_d+my2_d;
rho4_d = rho2_d*rho2_d;
radDist_d = k1*rho2_d+k2*rho4_d;
Dx_d = mx_d*radDist_d + p2*(rho2_d+2*mx2_d) + 2*p1*mxy_d;
Dy_d = my_d*radDist_d + p1*(rho2_d+2*my2_d) + 2*p2*mxy_d;
inv_denom_d = 1/(1+4*k1*rho2_d+6*k2*rho4_d+8*p1*my_d+8*p2*mx_d);
mx_u = mx_d - inv_denom_d*Dx_d;
my_u = my_d - inv_denom_d*Dy_d;
}
else
{
// Recursive distortion model(这个递归distortion到底是什么意思?)
int n = 8;
Eigen::Vector2d d_u;
distortion(Eigen::Vector2d(mx_d, my_d), d_u);
// Approximate value
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
for (int i = 1; i < n; ++i)
{
distortion(Eigen::Vector2d(mx_u, my_u), d_u);
mx_u = mx_d - d_u(0);
my_u = my_d - d_u(1);
}
}
}
// Obtain a projective ray
P << mx_u, my_u, 1.0;
}
被设为0的区域是黑的,手画的所以没保证半径相同-_- ),在白色区域进行提点,保证了features的均匀分布。
至此,readImage完毕。
如果需要pub则将所有cur_相关的量都打包成feature_points,并pub
打包:
pub_count++;
sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
sensor_msgs::ChannelFloat32 id_of_point;
sensor_msgs::ChannelFloat32 u_of_point;
sensor_msgs::ChannelFloat32 v_of_point;
sensor_msgs::ChannelFloat32 velocity_x_of_point;
sensor_msgs::ChannelFloat32 velocity_y_of_point;
feature_points->header = img_msg->header;
feature_points->header.frame_id = "world";
vector<set<int>> hash_ids(NUM_OF_CAM);
for (int i = 0; i < NUM_OF_CAM; i++)
{
auto &un_pts = trackerData[i].cur_un_pts;
auto &cur_pts = trackerData[i].cur_pts;
auto &ids = trackerData[i].ids;
auto &pts_velocity = trackerData[i].pts_velocity;
for (unsigned int j = 0; j < ids.size(); j++)
{
if (trackerData[i].track_cnt[j] > 1)
{
int p_id = ids[j];
hash_ids[i].insert(p_id);
geometry_msgs::Point32 p;
p.x = un_pts[j].x;
p.y = un_pts[j].y;
p.z = 1;
feature_points->points.push_back(p);//3D点
id_of_point.values.push_back(p_id * NUM_OF_CAM + i);//track到的特征点都是成对出现的,左目的i=0,就是自身,右目相应的特征点的id就是多加了1,效果是左右目的tracked points的id只相差1
u_of_point.values.push_back(cur_pts[j].x);
v_of_point.values.push_back(cur_pts[j].y);
velocity_x_of_point.values.push_back(pts_velocity[j].x);//光流速度作为该点的速度
velocity_y_of_point.values.push_back(pts_velocity[j].y);
}
}
}
feature_points->channels.push_back(id_of_point);//这里每个channel都是一个独立的量,就使用了多个channel
feature_points->channels.push_back(u_of_point);
feature_points->channels.push_back(v_of_point);
feature_points->channels.push_back(velocity_x_of_point);
feature_points->channels.push_back(velocity_y_of_point);
发布:pub_img.publish(feature_points);//发布一幅图像中的特征点信息
得到了最新一帧的,归一化平面上的(目前好像没看到rpj to unit sphere的操作),去了畸变的features。
关于图像去畸变,可视化了一下,注意KLT和提特征都是在原图上进行的:
下一节是状态估计器vins_estimator,是VINS的重点。