本文主要分析readImage()
函数
trackerData[i].readImage(ptr->image.rowRange(ROW * i, ROW * (i + 1)), img_msg->header.stamp.toSec());
注意:以下在源程序中并不是连续的
其中:step1 - step3在feature_tracker.cpp
中的readImage()
函数,
step4在feature_tracker_node.cpp
中的img_callback()
函数
// step1.清理数组
forw_pts.clear();
// step2.获取第一帧的特征点,数量为MAX_CNT
ROS_DEBUG("detect feature begins");
TicToc t_t;
int n_max_cnt = MAX_CNT - static_cast<int>(forw_pts.size());
if (n_max_cnt > 0)
{
if(mask.empty())
cout << "mask is empty " << endl;
if (mask.type() != CV_8UC1)
cout << "mask type wrong " << endl;
if (mask.size() != forw_img.size())
cout << "wrong size " << endl;
// 只有发布才可以提取更多特征点,同时避免提的点进mask
// 会不会这些点集中?会,不过没关系,他们下一次作为老将就得接受均匀化的洗礼
cv::goodFeaturesToTrack(forw_img, n_pts, MAX_CNT - forw_pts.size(), 0.01, MIN_DIST, mask);
}
// step3.迭代、去畸变
prev_img = cur_img;
prev_pts = cur_pts;
prev_un_pts = cur_un_pts; // 以上三个量无用
cur_img = forw_img; // 实际上是上一帧的图像
cur_pts = forw_pts; // 上一帧的特征点
undistortedPoints();
prev_time = cur_time;
undistortedPoints()
函数,主要流程分两步:
第一步:利用虚拟相机将真实相机有畸变2d像素坐标转为虚拟相机无畸变3d坐标
第二步:无畸变3d坐标转为无畸变归一化平面(z=1)2d相机坐标
重要:后续的操作都是基于归一化相机坐标操作的
虚拟相机可以看一下:https://zhuanlan.zhihu.com/p/414047132
// step4.更新id,每一个特征点都有一个独一无二的id
for (unsigned int i = 0;; i++)
{
bool completed = false;
for (int j = 0; j < NUM_OF_CAM; j++)
if (j != 1 || !STEREO_TRACK)
completed |= trackerData[j].updateID(i); // 单目的情况下可以直接用=号
if (!completed)
break;
}
注意:以下在源程序中并不是连续的
其中:step1 - step5在feature_tracker.cpp
中的readImage()
函数,
step6在feature_tracker_node.cpp
中的img_callback()
函数
forw_pts.clear();
if (cur_pts.size() > 0) // 上一帧有特征点,就可以进行光流追踪了
{
TicToc t_o;
vector<uchar> status;
vector<float> err;
// 调用opencv函数进行光流追踪
// Step 1 通过opencv光流追踪给的状态位剔除outlier
cv::calcOpticalFlowPyrLK(cur_img, forw_img, cur_pts, forw_pts, status, err, cv::Size(21, 21), 3);
for (int i = 0; i < int(forw_pts.size()); i++)
// Step 2 通过图像边界剔除outlier
if (status[i] && !inBorder(forw_pts[i])) // 追踪状态好检查在不在图像范围
status[i] = 0;
reduceVector(prev_pts, status); // 没用到
reduceVector(cur_pts, status);
reduceVector(forw_pts, status);
reduceVector(ids, status); // 特征点的id
reduceVector(cur_un_pts, status); // 去畸变后的坐标
reduceVector(track_cnt, status); // 追踪次数
ROS_DEBUG("temporal optical flow costs: %fms", t_o.toc());
}
// 被追踪到的是上一帧就存在的,因此追踪数+1
for (auto &n : track_cnt)
n++;
// Step 3 通过对级约束来剔除outlier
rejectWithF();
ROS_DEBUG("set mask begins");
TicToc t_m;
setMask();
ROS_DEBUG("set mask costs %fms", t_m.toc());
// Step 4 提取新的特征点
ROS_DEBUG("detect feature begins");
TicToc t_t;
int n_max_cnt = MAX_CNT - static_cast<int>(forw_pts.size());
if (n_max_cnt > 0)
{
if(mask.empty())
cout << "mask is empty " << endl;
if (mask.type() != CV_8UC1)
cout << "mask type wrong " << endl;
if (mask.size() != forw_img.size())
cout << "wrong size " << endl;
// 只有发布才可以提取更多特征点,同时避免提的点进mask
// 会不会这些点集中?会,不过没关系,他们下一次作为老将就得接受均匀化的洗礼
cv::goodFeaturesToTrack(forw_img, n_pts, MAX_CNT - forw_pts.size(), 0.01, MIN_DIST, mask);
}
// step5.迭代、去畸变
prev_img = cur_img;
prev_pts = cur_pts;
prev_un_pts = cur_un_pts; // 以上三个量无用
cur_img = forw_img; // 实际上是上一帧的图像
cur_pts = forw_pts; // 上一帧的特征点
undistortedPoints();
prev_time = cur_time;
// step6.更新id,每一个特征点都有一个独一无二的id
for (unsigned int i = 0;; i++)
{
bool completed = false;
for (int j = 0; j < NUM_OF_CAM; j++)
if (j != 1 || !STEREO_TRACK)
completed |= trackerData[j].updateID(i); // 单目的情况下可以直接用=号
if (!completed)
break;
}
特征点信息包括
1.去畸变归一化相机坐标 很重要:后端用于恢复姿态
2.像素id
3.像素坐标
4.归一化坐标下的速度
feature_points->points.push_back(p);//去畸变归一化相机坐标
id_of_point.values.push_back(p_id * NUM_OF_CAM + i);//像素id
u_of_point.values.push_back(cur_pts[j].x);//像素坐标
v_of_point.values.push_back(cur_pts[j].y);
velocity_x_of_point.values.push_back(pts_velocity[j].x);//归一化坐标下的速度
velocity_y_of_point.values.push_back(pts_velocity[j].y);
具体在feature_tracker_node.cpp
中的img_callback()
函数
// 给后端喂数据
if (PUB_THIS_FRAME)
{
pub_count++; // 计数器更新
sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
sensor_msgs::ChannelFloat32 id_of_point;
sensor_msgs::ChannelFloat32 u_of_point;
sensor_msgs::ChannelFloat32 v_of_point;
sensor_msgs::ChannelFloat32 velocity_x_of_point;
sensor_msgs::ChannelFloat32 velocity_y_of_point;
feature_points->header = img_msg->header;
feature_points->header.frame_id = "world";
vector<set<int>> hash_ids(NUM_OF_CAM);
for (int i = 0; i < NUM_OF_CAM; i++)
{
auto &un_pts = trackerData[i].cur_un_pts; // 去畸变的归一化相机坐标系
auto &cur_pts = trackerData[i].cur_pts; // 像素坐标
auto &ids = trackerData[i].ids; // id
auto &pts_velocity = trackerData[i].pts_velocity; // 归一化坐标下的速度
for (unsigned int j = 0; j < ids.size(); j++)
{
// 只发布追踪大于1的,因为等于1没法构成重投影约束,也没法三角化
if (trackerData[i].track_cnt[j] > 1)
{
int p_id = ids[j];
hash_ids[i].insert(p_id); // 这个并没有用到
geometry_msgs::Point32 p;
p.x = un_pts[j].x;
p.y = un_pts[j].y;
p.z = 1;
// 利用这个ros消息的格式进行信息存储
feature_points->points.push_back(p);
id_of_point.values.push_back(p_id * NUM_OF_CAM + i);
u_of_point.values.push_back(cur_pts[j].x);
v_of_point.values.push_back(cur_pts[j].y);
velocity_x_of_point.values.push_back(pts_velocity[j].x);
velocity_y_of_point.values.push_back(pts_velocity[j].y);
}
}
}
feature_points->channels.push_back(id_of_point);
feature_points->channels.push_back(u_of_point);
feature_points->channels.push_back(v_of_point);
feature_points->channels.push_back(velocity_x_of_point);
feature_points->channels.push_back(velocity_y_of_point);
ROS_DEBUG("publish %f, at %f", feature_points->header.stamp.toSec(), ros::Time::now().toSec());
// skip the first image; since no optical speed on frist image
if (!init_pub)
{
init_pub = 1;
}
else
pub_img.publish(feature_points); // 前端得到的信息通过这个publisher发布出去
...
}