1)该VINS 代码的学习系列主要是为了记录自己学习VINS代码的过程。
2)若有错误,欢迎各位大牛吐槽,如有版权问题请联系:[email protected]
**
网络上已经有论文的解析,本文只给出以下两个特征提取部分的代码的注释。
feature_tracker_node.cpp
#include
#include
#include
#include
#include
#include
#include
#include "feature_tracker.h"
#define SHOW_UNDISTORTION 0
vector r_status;
vector<float> r_err;
queue img_buf;
ros::Publisher pub_img,pub_match; // 发布的全局变量
FeatureTracker trackerData[NUM_OF_CAM]; // 区分双目和单目的数组
double first_image_time; // 第一帧图像的时间
int pub_count = 1; //
bool first_image_flag = true; //用于判断是否是第一帧图像
void img_callback(const sensor_msgs::ImageConstPtr &img_msg)
{
if(first_image_flag)
{
first_image_flag = false;
first_image_time = img_msg->header.stamp.toSec();
}
// frequency control
if (round(1.0 * pub_count / (img_msg->header.stamp.toSec() - first_image_time)) <= FREQ)
{ // 图像获取的时间足够长,是的频率达到 FREQ 的要求,则可以发布这个帧
PUB_THIS_FRAME = true;
// reset the frequency control
if (abs(1.0 * pub_count / (img_msg->header.stamp.toSec() - first_image_time) - FREQ) < 0.01 * FREQ)
{ //只有当前关键帧获取的频率和设定的频率很接近的时候,重新设置下一帧的参照时间
first_image_time = img_msg->header.stamp.toSec();
pub_count = 0;
}
}
else
PUB_THIS_FRAME = false;
cv_bridge::CvImageConstPtr ptr = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::MONO8);
cv::Mat show_img = ptr->image;
TicToc t_r;
for (int i = 0; i < NUM_OF_CAM; i++) // 左右两帧分别放在trackerData 的数组中[0],[1] 中
{
ROS_DEBUG("processing camera %d", i);
if (i != 1 || !STEREO_TRACK) //非双目
trackerData[i].readImage(ptr->image.rowRange(ROW * i, ROW * (i + 1)));
// 单目使用readImage 函数,会用光流跟踪上一帧
else
{
if (EQUALIZE)
{
cv::Ptr clahe = cv::createCLAHE();// 直方图均衡化
clahe->apply(ptr->image.rowRange(ROW * i, ROW * (i + 1)), trackerData[i].cur_img);
}
else
trackerData[i].cur_img = ptr->image.rowRange(ROW * i, ROW * (i + 1));
}
#if SHOW_UNDISTORTION
trackerData[i].showUndistortion("undistrotion_" + std::to_string(i));
#endif
}
if ( PUB_THIS_FRAME && STEREO_TRACK && trackerData[0].cur_pts.size() > 0)
{
pub_count++;
r_status.clear();
r_err.clear();
TicToc t_o;
// 双目模式下,当要发部特征,则左右两帧图像做光流,(有一种结合了直接法,光流法去除outliers)
cv::calcOpticalFlowPyrLK(trackerData[0].cur_img, trackerData[1].cur_img, trackerData[0].cur_pts, trackerData[1].cur_pts, r_status, r_err, cv::Size(21, 21), 3);
ROS_DEBUG("spatial optical flow costs: %fms", t_o.toc());
vector ll, rr;
vector<int> idx;
for (unsigned int i = 0; i < r_status.size(); i++)
{
if (!inBorder(trackerData[1].cur_pts[i]))
r_status[i] = 0;
if (r_status[i])
{
idx.push_back(i);
Eigen::Vector3d tmp_p;
trackerData[0].m_camera->liftProjective(Eigen::Vector2d(trackerData[0].cur_pts[i].x, trackerData[0].cur_pts[i].y), tmp_p);
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
ll.push_back(cv::Point2f(tmp_p.x(), tmp_p.y()));
trackerData[1].m_camera->liftProjective(Eigen::Vector2d(trackerData[1].cur_pts[i].x, trackerData[1].cur_pts[i].y), tmp_p);
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
rr.push_back(cv::Point2f(tmp_p.x(), tmp_p.y()));
}
}
if (ll.size() >= 8)
{
vector status;
TicToc t_f;
cv::findFundamentalMat(ll, rr, cv::FM_RANSAC, 1.0, 0.5, status); // 用 RANSAC 去除outliers
ROS_DEBUG("find f cost: %f", t_f.toc());
int r_cnt = 0;
for (unsigned int i = 0; i < status.size(); i++)
{
if (status[i] == 0)
r_status[idx[i]] = 0;
r_cnt += r_status[idx[i]];
}
}
}
for (unsigned int i = 0;; i++)
{
bool completed = false;
for (int j = 0; j < NUM_OF_CAM; j++)
if (j != 1 || !STEREO_TRACK)
completed |= trackerData[j].updateID(i);
if (!completed)
break;
}
if (PUB_THIS_FRAME)
{
pub_count++;
sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
sensor_msgs::ChannelFloat32 id_of_point;
sensor_msgs::ChannelFloat32 u_of_point;
sensor_msgs::ChannelFloat32 v_of_point;
feature_points->header = img_msg->header;
feature_points->header.frame_id = "world";
vector<set<int>> hash_ids(NUM_OF_CAM); // set 类型的insert 和 find 的速度都是logN
for (int i = 0; i < NUM_OF_CAM; i++)
{
if (i != 1 || !STEREO_TRACK)
{
auto un_pts = trackerData[i].undistortedPoints();
auto &cur_pts = trackerData[i].cur_pts;
auto &ids = trackerData[i].ids;
for (unsigned int j = 0; j < ids.size(); j++)
{
int p_id = ids[j];
hash_ids[i].insert(p_id);
geometry_msgs::Point32 p;
p.x = un_pts[j].x;
p.y = un_pts[j].y;
p.z = 1;
feature_points->points.push_back(p);
id_of_point.values.push_back(p_id * NUM_OF_CAM + i);
u_of_point.values.push_back(cur_pts[j].x);
v_of_point.values.push_back(cur_pts[j].y);
ROS_ASSERT(inBorder(cur_pts[j]));
}
}
else if (STEREO_TRACK)
{
auto r_un_pts = trackerData[1].undistortedPoints();
auto &ids = trackerData[0].ids;
for (unsigned int j = 0; j < ids.size(); j++)
{
if (r_status[j])
{
int p_id = ids[j];
hash_ids[i].insert(p_id);
geometry_msgs::Point32 p;
p.x = r_un_pts[j].x;
p.y = r_un_pts[j].y;
p.z = 1;
feature_points->points.push_back(p);
id_of_point.values.push_back(p_id * NUM_OF_CAM + i);
}
}
}
}
feature_points->channels.push_back(id_of_point);
feature_points->channels.push_back(u_of_point);
feature_points->channels.push_back(v_of_point);
ROS_DEBUG("publish %f, at %f", feature_points->header.stamp.toSec(), ros::Time::now().toSec());
pub_img.publish(feature_points);
if (SHOW_TRACK) //show track_image
{
ptr = cv_bridge::cvtColor(ptr, sensor_msgs::image_encodings::BGR8);
//cv::Mat stereo_img(ROW * NUM_OF_CAM, COL, CV_8UC3);
cv::Mat stereo_img = ptr->image;
for (int i = 0; i < NUM_OF_CAM; i++)
{
cv::Mat tmp_img = stereo_img.rowRange(i * ROW, (i + 1) * ROW);
cv::cvtColor(show_img, tmp_img, CV_GRAY2RGB);
if (i != 1 || !STEREO_TRACK)
{
for (unsigned int j = 0; j < trackerData[i].cur_pts.size(); j++)
{
double len = std::min(1.0, 1.0 * trackerData[i].track_cnt[j] / WINDOW_SIZE);
cv::circle(tmp_img, trackerData[i].cur_pts[j], 2, cv::Scalar(255 * (1 - len), 0, 255 * len), 2); //cv::Scalar(b,g,r)
//char name[10];
//sprintf(name, "%d", trackerData[i].ids[j]);
//cv::putText(tmp_img, name, trackerData[i].cur_pts[j], cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}
}
else
{
for (unsigned int j = 0; j < trackerData[i].cur_pts.size(); j++)
{
if (r_status[j])
{
cv::circle(tmp_img, trackerData[i].cur_pts[j], 2, cv::Scalar(0, 255, 0), 2);
cv::line(stereo_img, trackerData[i - 1].cur_pts[j], trackerData[i].cur_pts[j] + cv::Point2f(0, ROW), cv::Scalar(0, 255, 0));
}
}
}
}
/*
cv::imshow("vis", stereo_img);
cv::waitKey(5);
*/
pub_match.publish(ptr->toImageMsg());
}
}
ROS_INFO("whole feature tracker processing costs: %f", t_r.toc());
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "feature_tracker");
ros::NodeHandle n("~");
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
readParameters(n);
for (int i = 0; i < NUM_OF_CAM; i++)
trackerData[i].readIntrinsicParameter(CAM_NAMES[i]); // 不加鱼眼模板的标定文件
if(FISHEYE) // 添加鱼眼的模板
{
for (int i = 0; i < NUM_OF_CAM; i++)
{
trackerData[i].fisheye_mask = cv::imread(FISHEYE_MASK, 0 ); // 添加到trackerData[].fisheye_mask 数组中
if(!trackerData[i].fisheye_mask.data)
{
ROS_INFO("load mask fail");
ROS_BREAK();
}
else
ROS_INFO("load mask success");
}
}
ros::Subscriber sub_img = n.subscribe(IMAGE_TOPIC, 100, img_callback); // 订阅了图片的topic
pub_img = n.advertise("feature", 1000); //发布了点云
pub_match = n.advertise("feature_img",1000); //发布特征图片,后面有待考察
/*
if (SHOW_TRACK)
cv::namedWindow("vis", cv::WINDOW_NORMAL);
*/
ros::spin();
return 0;
}
feature_trakcer.cpp
#include "feature_tracker.h"
int FeatureTracker::n_id = 0;
bool inBorder(const cv::Point2f &pt) // 判断点是否在边界内
{
const int BORDER_SIZE = 1;
int img_x = cvRound(pt.x);
int img_y = cvRound(pt.y);
return BORDER_SIZE <= img_x && img_x < COL - BORDER_SIZE && BORDER_SIZE <= img_y && img_y < ROW - BORDER_SIZE;
}
void reduceVector(vector &v, vector status) //将 status 判断为0 的点去掉,即没有匹配上的点
{
int j = 0;
for (int i = 0; i < int(v.size()); i++)
if (status[i])
v[j++] = v[i];
v.resize(j);
}
void reduceVector(vector<int> &v, vector status) //将数组中 status 为0的 位置去掉
{
int j = 0;
for (int i = 0; i < int(v.size()); i++)
if (status[i])
v[j++] = v[i];
v.resize(j);
}
FeatureTracker::FeatureTracker()
{
}
void FeatureTracker::setMask()
{
if(FISHEYE)
mask = fisheye_mask.clone();
else
mask = cv::Mat(ROW, COL, CV_8UC1, cv::Scalar(255)); // mask 初始化为255
// prefer to keep features that are tracked for long time
vectorint , pairint>>> cnt_pts_id;
for (unsigned int i = 0; i < forw_pts.size(); i++)
cnt_pts_id.push_back(make_pair(track_cnt[i], make_pair(forw_pts[i], ids[i])));
// 对图像中提取的 forw_pts 中的点排序 按照 track_cnt[i] 由大到小排序。sort(begin, end, compare)
sort(cnt_pts_id.begin(), cnt_pts_id.end(), [](const pair<int, pairint>> &a, const pair<int, pairint>> &b)
{
return a.first > b.first;
});
forw_pts.clear();
ids.clear();
track_cnt.clear();
for (auto &it : cnt_pts_id)
{
if (mask.at(it.second.first) == 255) // at 为取出图像中的点。当该点的灰度值为255,
{
forw_pts.push_back(it.second.first); // 将满足上述条件的点push到forw_pts 中
ids.push_back(it.second.second); // 保存对应的id
track_cnt.push_back(it.first); // 保存对应对应forw_pts 在 cnt_pts_id 里面的索引
cv::circle(mask, it.second.first, MIN_DIST, 0, -1); // mask 一定范围内 设为 0
}
}
}
void FeatureTracker::addPoints()
{
for (auto &p : n_pts)
{
forw_pts.push_back(p);
ids.push_back(-1);
track_cnt.push_back(1);
}
}
void FeatureTracker::readImage(const cv::Mat &_img)
{
cv::Mat img;
TicToc t_r;
if (EQUALIZE) // 判断均衡化的标志
{
cv::Ptr clahe = cv::createCLAHE(3.0, cv::Size(8, 8));
TicToc t_c;
clahe->apply(_img, img);
ROS_DEBUG("CLAHE costs: %fms", t_c.toc());
}
else
img = _img;
if (forw_img.empty())
{
prev_img = cur_img = forw_img = img;
}
else
{
forw_img = img;
}
forw_pts.clear();
if (cur_pts.size() > 0) //在已提取了上一帧特征点的情况下进入
{
TicToc t_o;
vector status;
vector<float> err;
cv::calcOpticalFlowPyrLK(cur_img, forw_img, cur_pts, forw_pts, status, err, cv::Size(21, 21), 3);
// 求 Lucas-Kanade 光流, 参数分别为,上一帧图像,当前帧,上一帧特征点,当前跟踪特征点,status, error,金字塔所有尺寸,最大金字塔层数,
// status :数组(发现对应光流的特征,设置为1,否则为0)
// error: float 型, 包含原始图像碎片与一动点之间的差
for (int i = 0; i < int(forw_pts.size()); i++)
if (status[i] && !inBorder(forw_pts[i])) //判断是否光流匹配的是否在图像边界内,(1< border < row/col -1)
status[i] = 0;
reduceVector(prev_pts, status);
reduceVector(cur_pts, status);
reduceVector(forw_pts, status);
reduceVector(ids, status);
reduceVector(track_cnt, status); // 以上所有reduceVector 将点或者索引的向量中的没有匹配的点或者索引去掉
ROS_DEBUG("temporal optical flow costs: %fms", t_o.toc());
}
if (PUB_THIS_FRAME)
{
rejectWithF(); //删除outliers
for (auto &n : track_cnt)
n++;
ROS_DEBUG("set mask begins");
TicToc t_m;
setMask(); //
ROS_DEBUG("set mask costs %fms", t_m.toc());
ROS_DEBUG("detect feature begins");
TicToc t_t;
int n_max_cnt = MAX_CNT - static_cast<int>(forw_pts.size()); // MAX_CNT 是最大光流特征点数量,
if (n_max_cnt > 0)
{
if(mask.empty())
cout << "mask is empty " << endl;
if (mask.type() != CV_8UC1)
cout << "mask type wrong " << endl;
if (mask.size() != forw_img.size())
cout << "wrong size " << endl;
cv::goodFeaturesToTrack(forw_img, n_pts, MAX_CNT - forw_pts.size(), 0.1, MIN_DIST, mask);
// 这个地方应该有问题,如果有MAX_CNT 个点匹配上,那不会追踪。
// 前面有 forw_pts.clear() 所以这里应该一直都是零
}
else
n_pts.clear();
ROS_DEBUG("detect feature costs: %fms", t_t.toc());
ROS_DEBUG("add feature begins");
TicToc t_a;
addPoints();
ROS_DEBUG("selectFeature costs: %fms", t_a.toc());
prev_img = forw_img;
prev_pts = forw_pts;
}
cur_img = forw_img;
cur_pts = forw_pts;
}
void FeatureTracker::rejectWithF()
{
if (forw_pts.size() >= 8)
{
ROS_DEBUG("FM ransac begins");
TicToc t_f;
vector un_prev_pts(prev_pts.size()), un_forw_pts(forw_pts.size());
for (unsigned int i = 0; i < prev_pts.size(); i++)
{
Eigen::Vector3d tmp_p;
m_camera->liftProjective(Eigen::Vector2d(prev_pts[i].x, prev_pts[i].y), tmp_p);
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
un_prev_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
m_camera->liftProjective(Eigen::Vector2d(forw_pts[i].x, forw_pts[i].y), tmp_p);
tmp_p.x() = FOCAL_LENGTH * tmp_p.x() / tmp_p.z() + COL / 2.0;
tmp_p.y() = FOCAL_LENGTH * tmp_p.y() / tmp_p.z() + ROW / 2.0;
un_forw_pts[i] = cv::Point2f(tmp_p.x(), tmp_p.y());
}
vector status;
// 删除outliers
cv::findFundamentalMat(un_prev_pts, un_forw_pts, cv::FM_RANSAC, F_THRESHOLD, 0.99, status);
int size_a = prev_pts.size();
reduceVector(prev_pts, status);
reduceVector(cur_pts, status);
reduceVector(forw_pts, status);
reduceVector(ids, status);
reduceVector(track_cnt, status);
ROS_DEBUG("FM ransac: %d -> %lu: %f", size_a, forw_pts.size(), 1.0 * forw_pts.size() / size_a);
ROS_DEBUG("FM ransac costs: %fms", t_f.toc());
}
}
bool FeatureTracker::updateID(unsigned int i)
{
if (i < ids.size())
{
if (ids[i] == -1)
ids[i] = n_id++;
return true;
}
else
return false;
}
void FeatureTracker::readIntrinsicParameter(const string &calib_file)
{
ROS_INFO("reading paramerter of camera %s", calib_file.c_str());
m_camera = CameraFactory::instance()->generateCameraFromYamlFile(calib_file); //读内参文件
}
void FeatureTracker::showUndistortion(const string &name) // 图像去畸变
{
cv::Mat undistortedImg(ROW + 600, COL + 600, CV_8UC1, cv::Scalar(0));
vector distortedp, undistortedp;
for (int i = 0; i < COL; i++)
for (int j = 0; j < ROW; j++)
{
Eigen::Vector2d a(i, j);
Eigen::Vector3d b;
m_camera->liftProjective(a, b);
distortedp.push_back(a);
undistortedp.push_back(Eigen::Vector2d(b.x() / b.z(), b.y() / b.z()));
//printf("%f,%f->%f,%f,%f\n)\n", a.x(), a.y(), b.x(), b.y(), b.z());
}
for (int i = 0; i < int(undistortedp.size()); i++)
{
cv::Mat pp(3, 1, CV_32FC1);
pp.at<float>(0, 0) = undistortedp[i].x() * FOCAL_LENGTH + COL / 2;
pp.at<float>(1, 0) = undistortedp[i].y() * FOCAL_LENGTH + ROW / 2;
pp.at<float>(2, 0) = 1.0;
//cout << trackerData[0].K << endl;
//printf("%lf %lf\n", p.at(1, 0), p.at(0, 0));
//printf("%lf %lf\n", pp.at(1, 0), pp.at(0, 0));
if (pp.at<float>(1, 0) + 300 >= 0 && pp.at<float>(1, 0) + 300 < ROW + 600 && pp.at<float>(0, 0) + 300 >= 0 && pp.at<float>(0, 0) + 300 < COL + 600)
{
undistortedImg.at(pp.at<float>(1, 0) + 300, pp.at<float>(0, 0) + 300) = cur_img.at(distortedp[i].y(), distortedp[i].x());
}
else
{
//ROS_ERROR("(%f %f) -> (%f %f)", distortedp[i].y, distortedp[i].x, pp.at(1, 0), pp.at(0, 0));
}
}
cv::imshow(name, undistortedImg);
cv::waitKey(0);
}
vector FeatureTracker::undistortedPoints()
{
vector un_pts;
//cv::undistortPoints(cur_pts, un_pts, K, cv::Mat());
for (unsigned int i = 0; i < cur_pts.size(); i++)
{
Eigen::Vector2d a(cur_pts[i].x, cur_pts[i].y);
Eigen::Vector3d b;
m_camera->liftProjective(a, b);
un_pts.push_back(cv::Point2f(b.x() / b.z(), b.y() / b.z()));
}
return un_pts;
}