《视觉slam十四讲》第七讲课后习题

 

1.除了本书的ORB特征点,你还能找到哪些特征点?请说说SIFT或SURF的原理,并对比它们与ORB之间的优劣。

除了ORB之外,一般还有还有SIFT、SURF、BRISK、AKAZE,这些都是在OpenCV中已经实现了的。

SIFT算法,又称为尺度不变特征转换(Scale-invariant feature transform),大致方法是首先搜索所有尺度下图像的位置,通过高斯微分函数来识别潜在的对于尺度和旋转不变的兴趣点,然后在候选位置通过拟合精细的模型来确定位置和尺度,再基于图像梯度,分配给关键点一个或多个方向,最后在每个关键点的周围邻域,在选定的尺度下测量图像局部梯度来描述关键点。

SURF算法,又称为加速稳健特征(Speeded Up Robust Features),其方法和构建图像金字塔的方法相反,其主要方法是通过修改滤波器的尺寸和模糊度从而得到不同层级的影像,但是寻找特征点的方法和SIFT类似。

SIFT和SURF能够比较稳定地提供较高的准确率,其中SIFT比SURF跟准确一点,但是二者都特别慢。相较而言ORB速度更快,但是更容易出现问题,而且错误率远比其他二者大。

 

2.设计程序调用OpenCV中其它种类的特征点。统计在提取1000个特征点在你机器上所使用的时间

我用的机器是intel NUC ,处理器是Intel® Core™ i5-7260U CPU @ 2.20GHz × 4,64位,内存8G,固态硬盘250G。

系统是ubuntu16.04,IDE是KDevelop,调用OpenCV3.2及contrib模块庫。

这是本次测试的图片(随手拍的)。

《视觉slam十四讲》第七讲课后习题_第1张图片

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_2)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
)

add_executable(homeworks7_2 homeworks7_2.cpp)

target_link_libraries(homeworks7_2 ${OpenCV_LIBS})

install(TARGETS homeworks7_2 RUNTIME DESTINATION bin)

cpp文件代码如下:

#include 
#include 
#include 
#include 
#include 
#include 

using namespace std;

int main(int argc, char **argv) {
    
    cv::Mat img = cv::imread("/home/fuhang/projects/homeworks7_2/lab.png", CV_LOAD_IMAGE_COLOR);
    
    cv::namedWindow("img", 0);
    cv::resizeWindow("img", 1280, 1024);
    cv::imshow("img", img);
    cv::waitKey(0);
    
    std::vector keypoint;
    
    //orb算法
    cv::Ptr orb = cv::ORB::create(1000);
    clock_t t = clock();    
    orb->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "ORB算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_orb;
    cv::drawKeypoints(img, keypoint, img_orb, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_orb);
    cv::waitKey(0);
    
    //sift算法
    cv::Ptr sift = cv::xfeatures2d::SIFT::create(1000);
    t = clock();
    
    sift->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "SIFT算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_sift;
    cv::drawKeypoints(img, keypoint, img_sift, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_sift);
    cv::waitKey(0);
    
    //surf算法
    cv::Ptr surf = cv::xfeatures2d::SURF::create(2800);
    t = clock();
    
    surf->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "SURF算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_surf;
    cv::drawKeypoints(img, keypoint, img_surf, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_surf);
    cv::waitKey(0);
    
    //brisk算法
    cv::Ptr brisk = cv::BRISK::create(68);
    t = clock();    
    brisk->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "BRISK算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_brisk;
    cv::drawKeypoints(img, keypoint, img_brisk, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_brisk);
    cv::waitKey(0);
    
    //akaze算法
    cv::Ptr akaze = cv::AKAZE::create(2, 0, 3, 0.005f, 4, 4, 3);
    t = clock();    
    akaze->detect(img,keypoint);
    cout << "*******************************" << endl;
    cout << "共找到了" << keypoint.size() << "个特征点;" << endl;   
    cout << "AKAZE算法用时" << 1000 * (clock() - t) / (double)CLOCKS_PER_SEC << "ms" << endl;
    
    cv::Mat img_akaze;
    cv::drawKeypoints(img, keypoint, img_akaze, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    cv::imshow("img", img_akaze);
    cv::waitKey(0);
   
    return 0;
}

运行这个代码时,你可以边按enter边观察各个算法对图像特征点的提取情况,由于篇幅限制,在这就不提及了。在这主要是展现代码对各个算法提取1000个特征点执行时间的测量,其运行结果如下所示:

《视觉slam十四讲》第七讲课后习题_第2张图片

运行完代码你会发现,特征点检测算法的考虑指标就是精度和速度的均衡。SIFT框架下算法精度和稳定性明显优于ORB、BRISK等快算二值描述子算法,但是其速度明显慢很多。SURF算法是SIFT算法的加速版本,但是还是比较慢。AKAZE算法对于斑点噪声严重的SAR图像性能远高于其他算法。但是在深度学习大行其道的今天,经典局部特征提取算法正在逐渐退出舞台。

 

3.我们发现OpenCV提供的ORB算法在图像中提取的特征点分布并不是很均匀,有没有让特征点分布均匀的方法?

百度一搜做的人还不少,比如2016年出的ORB_SLAM2中已经用四叉树实现的特征点均匀分布算法,然后有找到一篇2018年5月发表的论文,也是用四叉树的方法,不过略有不同。大家有兴趣可以去读一下刘硕士的《ORB特征四叉树均匀分布算法》。大部分都是将图层不停四叉树分割,然后取其中比较好的keypoint保留。

在这就直接将orb中的各种函数包装写成一个main函数,并方便像我这种小白对运行顺序一目了然:

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_3)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
)

add_executable(homeworks7_3 homeworks.cpp)
target_link_libraries(homeworks7_3 ${OpenCV_LIBS})

install(TARGETS homeworks7_3 RUNTIME DESTINATION bin)

homeworks.cpp代码如下:

#include 
#include 
#include 
#include 
#include 
#include 

using namespace std;
using namespace cv;

int features_num = 1000; //最大特征点数量
float scale_factor = 1.2f; //金字塔之间尺度参数
int levels_num = 8; //金字塔层数
int default_fast_threshold = 20; //fast角点检测时候的阈值
int min_fast_threshold = 7; //最小fast检测阈值
int EDGE_THRESHOLD = 19; //过滤掉边缘效应的阈值
int PATCH_SIZE = 31;
int HALF_PATH_SIZE = 15;

//定义一个四叉树节点类型的类,类里面定义了一些成员及函数
class ExtractorNode{
public:
  ExtractorNode() : bNoMore(false) {}
  void DivideNode(ExtractorNode &n1, ExtractorNode &n2, ExtractorNode &n3, ExtractorNode &n4); //分配节点函数
  std::vector vKeys; //节点keypoints容器
  cv::Point2i UL,UR,BL,BR; //二维整数点类型数据u的上下左右像素
  std::list::iterator lit; //节点类型列表迭代器
  bool bNoMore; //确认是否之含有一个特征点
};


//定义DivideNode函数
void ExtractorNode::DivideNode(ExtractorNode& n1, ExtractorNode& n2, ExtractorNode& n3, ExtractorNode& n4)
{
  /*
   * 
   * -----------------------------------------------------------------------
   *	/                               /                               /
   *	/                               /				/
   *	/                               /				/
   *	/                               /				/
   *	/             n1                /		n2		/
   *	/                               /				/
   *	/                               /				/
   *	/                               /				/
   *	/                               / 				/
   *	/-------------------------------/-------------------------------	 
   *	/                               / 				/
   *	/                               / 				/
   *	/                               / 				/
   *	/               n3              / 		n4		/
   *	/                               / 				/
   *	/                               / 				/
   *	/                               / 				/
   *	/                               / 				/
   *	/                               /				/
   *---------------------------------------------------------------------------
   * 
   */
  const int halfx = ceil(static_cast(UR.x-UL.x)/2);
  const int halfy = ceil(static_cast(BR.y-UL.y)/2);
  
  // 矩形切四块
  n1.UL = UL;
  n1.UR = cv::Point2i(UL.x+halfy, UL.y);
  n1.BL = cv::Point2i(UL.x, UL.y+halfy);
  n1.BR = cv::Point2i(UL.x+halfx, UL.y+halfy);
  n1.vKeys.reserve(vKeys.size());
  
  n2.UL = n1.UR;
  n2.UR = UR;
  n2.BL = n1.BR;
  n2.BR = cv::Point2i(UR.x, UL.y+halfy);
  n2.vKeys.reserve(vKeys.size());
  
  n3.UL = n1.BL;
  n3.UR = n1.BR;
  n3.BL = BL;
  n3.BR = cv::Point2i(n1.BR.x, BL.y);
  n3.vKeys.reserve(vKeys.size());
  
  n4.UL = n3.UR;
  n4.UR = n2.BR;
  n4.BL = n3.BR;
  n4.BR = BR;
  n4.vKeys.reserve(vKeys.size());
  
  for (size_t i=0; i features_num_per_level; //每层特征点数
    vector umax; //存储特征方向,每个v对应的最大u
    vector vec_scale_factor; // 存储每层尺度因子
    
    //构建图像金字塔
            
    //初始化每层金字塔对应的尺度因子
    vec_scale_factor.resize(levels_num);
    vec_scale_factor[0] = 1.0f;
    for (int i=1; i vec_image_pyramid(levels_num); //图像金字塔容器
    
    for (int level=0; level> all_keypoints; // 所有图层图像上特征点容器,容器的容器
    all_keypoints.resize(levels_num);
    
    const float border_width = 30; // 设置栅栏格子大小
    
    for (int level=0; level vec_to_distribute_keys;
      vec_to_distribute_keys.reserve(features_num * 10);
      //计算总面积长宽
      const float width = max_border_x - min_border_x;
      const float height = max_border_y - min_border_y;
      //记录划分完后格子行和列
      const int cols = width/border_width;
      const int rows = height/border_width;
      //重新计算格子大小
      const int width_cell = ceil(width/cols);
      const int height_cell = ceil(height/rows);
      cout << "第" << level+1 << "层图像切割成" << rows << "行" << cols << "列, ";
      
      //开始对每个格子进行检测
      for (int i=0; i= max_border_y - 3)
	  continue;
	if(max_y >=max_border_y)
	  max_y = max_border_y;
	
	for (int j=0; j= max_border_x - 6) //一般认为相片宽度比高度大
	    continue;
	  if(max_x > max_border_x)
	    max_x = max_border_x;
	  
	  std::vector vec_keys_cell; // 用FAST特征检测并存储每个格子的特征点
	  cv::FAST(vec_image_pyramid[level].rowRange(ini_y, max_y).colRange(ini_x, max_x), vec_keys_cell, default_fast_threshold, true);
	  
	  //如果fast测空,降低阈值继续检测
	  if (vec_keys_cell.empty())
	  {
	    cv::FAST(vec_image_pyramid[level].rowRange(ini_y, max_y).colRange(ini_x, max_x), vec_keys_cell, min_fast_threshold, true);
	  }
	  //计算特征点位置
	  if (!vec_keys_cell.empty())
	  {
	    for (std::vector::iterator vit = vec_keys_cell.begin(); vit != vec_keys_cell.end(); vit++) //迭代法遍历每个格子中特征点容器的特征点
	    {
	      //记录特征点在图像中的绝对坐标
	      (*vit).pt.x += j * width_cell;
	      (*vit).pt.y += i * height_cell;
	      vec_to_distribute_keys.push_back(*vit);	      
	    }
	  }
	}
      }
      
      cout << "这层图像共有" <<  vec_to_distribute_keys.size() << "个特征点" << endl;
      
      std::vector & keypoints = all_keypoints[level];
      keypoints.reserve(features_num);
    
      // 开始四叉树划分
      // 初始化几个节点,不难发现,由于长和宽比较接近,所以一般初始节点为1
      const int init_node_num = round(static_cast(max_border_x - min_border_x)/(max_border_y - min_border_y));
      cout << "初始时有" << init_node_num << "个节点";
      //节点间间隔
      const float interval_x = static_cast(max_border_x - min_border_x)/init_node_num;
      cout << "节点间隔为" << interval_x << ", ";
      
      
      //四叉树设计
      // 定义节点类型的初始节点容器
      std::vector init_nodes;
      init_nodes.resize(init_node_num);
      // 划分之后的节点列表
      std::list list_nodes;
      // 处理初始节点
      for (int i=0; i(i), 0);
	ni.UR = cv::Point2i(interval_x * static_cast(i+1), 0);
	ni.BL = cv::Point2i(ni.UL.x, max_border_y-min_border_y);
	ni.BR = cv::Point2i(ni.UR.x, max_border_y-min_border_y);
	ni.vKeys.reserve(vec_to_distribute_keys.size());
	
	list_nodes.push_back(ni);
	init_nodes[i] = &list_nodes.back(); //返回list_nodes最后元素值	
      }
      
      //将点分配给初级节点
      for (size_t i=0; ivKeys.push_back(kp);
      }
      
      //设计节点迭代器
      std::list::iterator lit = list_nodes.begin();
      //遍历节点列表
      while (lit != list_nodes.end())
      {
	//只含有一个特征点,就不在划分;
	if (lit->vKeys.size() == 1)
	{
	  lit->bNoMore = true;
	  lit++;
	}
	else if (lit->vKeys.empty())
	  lit = list_nodes.erase(lit); // 如果这个节点没有特征点就删了
	else
	  lit++;
      }
      
      //完结标志定义
      bool is_finish = false;
      //迭代计数
      int iteration = 0;
      
      // 定义新数据类型节点及其所包含的特征数
      std::vector> keys_size_and_node;
      keys_size_and_node.reserve(list_nodes.size() * 4);
      
      while (!is_finish)
      {
	iteration++;
	//初始化节点个数,用于判断节点是否再次进行了划分
	int pre_size = list_nodes.size();
	
	lit = list_nodes.begin();
	//定义节点分解次数
	int to_expand_num = 0;
	keys_size_and_node.clear();
	
	while (lit != list_nodes.end())
	{
	  if (lit->bNoMore)
	  {
	    lit++;
	    continue;
	  }
	  else
	  {
	    //超过一个特征点就继续划分
	    ExtractorNode n1, n2, n3, n4;
	    lit -> DivideNode(n1, n2, n3, n4);
	    
	    //对划分后的节点进行判断,判断是否含有特征点,含有特征点则添加特征点
	    if (n1.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n1);
	      if (n1.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n1.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    if (n2.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n2);
	      if (n2.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n2.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    if (n3.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n3);
	      if (n3.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n3.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    if (n4.vKeys.size() > 0)
	    {
	      list_nodes.push_front(n4);
	      if (n4.vKeys.size() > 1)
	      {
		to_expand_num++;
		keys_size_and_node.push_back(std::make_pair(n4.vKeys.size(), &list_nodes.front()));
		list_nodes.front().lit = list_nodes.begin();
	      }
	    }
	    
	    lit = list_nodes.erase(lit);
	    continue;
	  }
	}

	
	//给每层分配特征点数先估计
	features_num_per_level.resize(levels_num);
	float factor = 1.0f / scale_factor;
	float desired_features_per_scale = features_num * (1-factor) / (1-(float)pow((double)factor, (double)levels_num)); // 构造等比数列
	int sum_features = 0;
	for (int level=0; level= features_num_per_level[level] || (int)list_nodes.size() == pre_size)
	{
	  is_finish = true;
	}
	else if (((int)list_nodes.size() + to_expand_num * 3) > features_num_per_level[level])// 节点展开次数乘以3用于表明下一次的节点分解可能超过特征数,即为最后一次分解
	{
	  
	  while (!is_finish)
	  {
	    pre_size = list_nodes.size();

	    std::vector > prev_size_and_node = keys_size_and_node;
	    keys_size_and_node.clear();

	    sort(prev_size_and_node.begin(), prev_size_and_node.end());
	    for (int j = prev_size_and_node.size()-1; j >= 0; j--)
	    {
	      
	      ExtractorNode n1, n2, n3, n4;
	      prev_size_and_node[j].second->DivideNode(n1, n2, n3, n4);

	      // 划分之后进一步的判断
	      if (n1.vKeys.size() > 0)
	      {
		list_nodes.push_front(n1);
		if (n1.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n1.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      if (n2.vKeys.size() > 0)
	      {
		list_nodes.push_front(n2);
		if (n2.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n2.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      if (n3.vKeys.size() > 0)
	      {
		list_nodes.push_front(n3);
		if (n3.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n3.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      if (n4.vKeys.size() > 0)
	      {
		list_nodes.push_front(n4);
		if (n4.vKeys.size() > 1)
		{
		  keys_size_and_node.push_back(std::make_pair(n4.vKeys.size(), &list_nodes.front()));
		  list_nodes.front().lit = list_nodes.begin();
		}
	      }
	      
	      list_nodes.erase(prev_size_and_node[j].second->lit);
	      if ((int)list_nodes.size() >= features_num_per_level[level])
		break;
	     }
	      if ((int)list_nodes.size() >= features_num_per_level[level] || (int)list_nodes.size() == pre_size)
		is_finish = true;
	    }
	}

      }

	
      // 用KeyPoints数据类型.response进行挑选保留每个节点下最好的特征点
      std::vector result_keys;
      result_keys.reserve(features_num_per_level[level]);
      for (std::list::iterator lit = list_nodes.begin(); lit != list_nodes.end(); lit++)
      {
	  std::vector &node_keys = lit->vKeys;
	  cv::KeyPoint* keypoint = &node_keys[0];
	  float max_response = keypoint->response;

	  for (size_t k = 1; k < node_keys.size(); k++)
	  {
	    if (node_keys[k].response > max_response)
	    {
	      keypoint = &node_keys[k];
	      max_response = node_keys[k].response;
	    }
	  }

	  result_keys.push_back(*keypoint);
	}
    
    keypoints = result_keys;
    
    const int scaled_path_size = PATCH_SIZE * vec_scale_factor[level];
    // 换算特征点的真实值
    const int nkps = keypoints.size();
    for (int i=0; i u_max; //在半径等于HALF_SIZE的范围内,v=x下,u所能取到的最大坐标的绝对值
   u_max.resize(HALF_PATH_SIZE + 1);
   //将v坐标分两部分计算,xy方向比较对称
   int v, v0, vmax = cvFloor(HALF_PATH_SIZE * sqrt(2.f) / 2 + 1);
   int vmin = cvCeil(HALF_PATH_SIZE * sqrt(2.f)/2);
   // 勾股定理
   const double hp2 = HALF_PATH_SIZE * HALF_PATH_SIZE;
   for (v = 0; v<=vmax; ++v)
      u_max[v] = cvRound(sqrt(hp2 - v*v));
    //确保对称是个圆
    for (v = HALF_PATH_SIZE, v0 = 0; v>=vmin; --v)
    {
      while (u_max[v0] == u_max[v0+1])
	++v0;
      u_max[v] = v0;
      ++v0;
     }
      
   for (int level=0; level& keypoints = all_keypoints[level];

      //逐个特征点计算
      for (size_t i=0; i (cvRound(keypoints[i].pt.y), cvRound(keypoints[i].pt.y)); // 得到每个特征点的中心位置
	
	// 当v=0时单独计算
	for (int u = -HALF_PATH_SIZE; u pattern;   
   const int num_points = 512;
   const Point* pattern0 = (const Point*)bit_pattern_31;
   std::copy(pattern0, pattern0+num_points, std::back_inserter(pattern)); //将从pattern0到pattern0+512序列(也就是前面定义一串数字)拷贝到pattern中

   
   Mat descriptors;
   int num_keypoints = 0;
   for (int level=0; level out_put_keypoints (num_keypoints);
   
   if (num_keypoints !=0)
     descriptors.create(num_keypoints, 32, CV_8U);
   
   
   int offset = 0;
   for (int level=0; level& keypoints = all_keypoints[level];
      
      int num_keypoints_level = (int)keypoints.size();
      
      if (num_keypoints_level == 0)
	continue;
      
      //高斯模糊是为了计算BRIEF时去噪
      cout << "开始将原图层高斯模糊,正在模糊第" << level+1 << "张图..." << endl;
      Mat working_mat = vec_image_pyramid[level].clone();
      GaussianBlur(working_mat, working_mat, Size(7,7), 2, 2, BORDER_REFLECT_101);
      imshow("img", working_mat);
      waitKey(100);
      
      //计算每一层的描述子
      Mat descriptors_per_level = descriptors.rowRange(offset, offset+num_keypoints_level);
      descriptors_per_level = Mat::zeros((int)keypoints.size(), 32, CV_8UC1); // 每一层的描述子
      
      const float factorPI = (float)(CV_PI/180.f);      
      for (size_t i=0; i(cvRound(keypoints[i].pt.y), cvRound(keypoints[i].pt.x));
	const int step = (int)working_mat.step;
        
	//取旋转后一个像素点的值
	#define GET_VALUE(idx) \
	  center[ cvRound(ppattern[idx].x*b + ppattern[idx].y*a)*step + \
	          cvRound(ppattern[idx].x*a - ppattern[idx].y*b)]

	 //循环32次,pattern取值16*32=512,也就是说每次取16个点,形成8个点对,8个点对比较可以形成8bit长度的特征描述数据
	 for (int i=0; i<32; ++i, ppattern += 16)
	 {
	   int t0, t1, val;
	   
	   t0 = GET_VALUE(0); 
	   t1 = GET_VALUE(1);
	   val = t0 < t1;
	   
	   t0 = GET_VALUE(2);
	   t1 = GET_VALUE(3);
	   val |= (t0 < t1) << 1;
	   
	   t0 = GET_VALUE(4); 
	   t1 = GET_VALUE(5);
           val |= (t0 < t1) << 2;
        
	   t0 = GET_VALUE(6); 
	   t1 = GET_VALUE(7);
           val |= (t0 < t1) << 3;
        
	   t0 = GET_VALUE(8); 
	   t1 = GET_VALUE(9);
           val |= (t0 < t1) << 4;
        
	   t0 = GET_VALUE(10); 
	   t1 = GET_VALUE(11);
           val |= (t0 < t1) << 5;
        
	   t0 = GET_VALUE(12); 
	   t1 = GET_VALUE(13);
           val |= (t0 < t1) << 6;
        
	   t0 = GET_VALUE(14); 
	   t1 = GET_VALUE(15);
           val |= (t0 < t1) << 7;

           desc[i] = (uchar)val; // 一共32*8维描述子
	 }

       #undef GET_VALUE
      }
   
      offset += num_keypoints_level;
   
      //对关键点进行尺度恢复,恢复到原图位置
      if (level != 0)
      {
        float scale = vec_scale_factor[level];
        for (vector::iterator keypoint = keypoints.begin(), keypoint_end = keypoints.end(); keypoint != keypoint_end; ++keypoint)
	  keypoint->pt *= scale;
      }  
    
    out_put_keypoints.insert(out_put_keypoints.end(), keypoints.begin(), keypoints.end());
    
    }
    
    destroyAllWindows();
    
    Mat out_img1;
    drawKeypoints(img, out_put_keypoints, out_img1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
    imshow("四叉树法ORB", out_img1);
    imwrite("NewORB.png", out_img1);
    waitKey(0);
  
    vector orb_keypoints;
    Ptr orb = ORB::create(1000);    
    orb->detect(img,orb_keypoints);
    cout << "共找到了" << orb_keypoints.size() << "个特征点;" << endl;   
    
    cv::Mat img_orb;
    cv::drawKeypoints(img, orb_keypoints, img_orb, cv::Scalar::all(-1), cv::DrawMatchesFlags::DEFAULT);
    cv::imshow("普通ORB算法", img_orb);
    cv::imwrite("NormalORB.png", img_orb);
    cv::waitKey(0);
    
    return 0;
      
}


其运行结果如下图所示:第一张图是用了四叉树进行均匀划分的ORB算法,第二张则是直接用opencv自带的ORB算法:

《视觉slam十四讲》第七讲课后习题_第3张图片

《视觉slam十四讲》第七讲课后习题_第4张图片

四叉树均匀划分的方法跟经典ORB算法变化不大,一般还是:1.构建图像金字塔;2.将每层图像划分网格,FAST算法提取特征点;3.对网格内的特征点进行评估,选取质量最好的留下作为代表关键点;4.用强度质心法计算每个关键点的方向;5对金字塔图层进行高斯滤波;6.使用BRIEF计算各个关键点的描述子(包含论文中的启发式搜索算法得到的256对匹配对坐标进行优化);7.保留关键点和描述子。

 

4.研究FLANN为何能够快速处理匹配问题。除了FLANN之外,还有哪些可以加速匹配的手段?

FLANN的全称为Fast Library for Approximate Nearest Neighbors,也就是快速最近领进搜索包。它是一个对大数据集和高维特征进行最近邻近的算法的集合。在面对大数据集时它的性能要好于BFMatcher。匹配问题实际上就是一个特征向量求相似度问题。对于最简单的办法,就是逐个匹配对计算距离。明显这种遍历的方式是效率极低的,对于大数据情况下不适用。因此经典kd-tree的搜索回溯的搜索办法在这里派上了用场,减少了不必要的计算过程,提高了效率,但是对于多维数据而言,其效果往往不升反降。在经典kd-tree算法上提出了随机kd-tree算法,随即选出方差较大的维度,建立kd-tree,再进行搜索回溯。还有一种分级k-means tree,与之前不同的是先通过k-means算法(之后回环检测时会用到)来进行先数据聚类,然后再进行kd-tree的建立。这些方法相交于传统的直接匹配法优势都比较大。kd-tree算法有一篇博客写的不错:https://www.cnblogs.com/lysuns/articles/4710712.html

一般加速匹配的算法都是在经典的SIFT算法中使用,常见的有主成分分析法,将SIFT描述子从128维降到36维,从而匹配速度增加3倍多;或者用GPU加速,可以使得匹配速度提高十多倍;再后来就是用FPGA加速,其匹配速度能提升10倍;再后来的VF-SIFT(very fast SIFT)算法,其核心思想是从SIFT特征中提取4个特征角,根据特征角区间的不同,避免了大量不必要的搜索,这样据说是普通搜索的1250倍。

 

5.把演示程序使用的EPnP改成其它PnP方法,并研究它们的工作原理。

目前除了EPnP外,OpenCV还提供了另外两种方法:迭代法和P3P法,其中,在OpenCV3中还另外提供了DLS法和UPnP法。首先,再明白一下PnP是啥。PnP,全称Perspective-n-Point,描述了当知道n个3D空间点及其投影位置时,如何估计相机的位姿的。EPnP(Efficient PnP)的思路是将空间中的任意3D点可以用4个不共面的3D点加权表示,然后通过n个3D点在相机平面的投影关系以及四个控制点的权重关系构建一个12 \times 12的矩阵,求这个矩阵的特征向量,就可以得到这个相机平面的坐标,再用POSIT正交投影变换算法得到相机位姿。迭代法实质是迭代求出重投影误差的最小解先用DLT直接线性变换求解,然后用LM算法进行优化,这个解显然不是正解,而且这个方法只能使用4个共面特征点才能求得。P3P法,它需要3对3D-2D的匹配点,然后通过三角形投影关系得到的方程组,然后将方程组进行变换得到两个二元二次方程进行求解。DLS(Direct Least-Squares)算法整体思路是首先对PnP非线性最小贰乘建模,重新定义LS模型以便于参数降维然后构造Maculy矩阵进行求解,将PnP问题重构为无约束LSM问题然后优化求解。UPnP(Uncalibrated PnP)算法跟EPnP差不了太多,仅仅是多估计了焦距。因此,比较适合未标定场合。

 

6.在PnP优化中,将第一个相机的观测也考虑进来,该如何书写?最后结果会产生何种变化?

根据原先课程中pose_estimation_3d2d.cpp的代码,对其中BundleAdjustment函数进行修改(OpenCV3环境),相关代码如下所示:

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_6)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)
find_package(G2O REQUIRED)
find_package(CSparse REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${G2O_INCLUDE_DIRS}
  ${CSPARSE_INCLUDE_DIR}
  "/usr/include/eigen3/"
  )
  

add_executable(homeworks7_6 homeworks7_6.cpp)

target_link_libraries(homeworks7_6 ${OpenCV_LIBS} ${CSPARSE_LIBRARY} g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension)

install(TARGETS homeworks7_6 RUNTIME DESTINATION bin)

homeworks7_6.cpp代码如下所示:

#include 
#include 
#include 
#include 
#include 

#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 


using namespace std;
using namespace cv;

//ORB特征识别匹配算法
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2, std::vector& keypoints_1, std::vector& keypoints_2, std::vector& matches)
{
  Mat descriptors_1, descriptors_2;
  
  cv::Ptr detector = cv::ORB::create();
  cv::Ptr descriptor = cv::ORB::create();
  cv::Ptr matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector matchers;
  matcher->match(descriptors_1, descriptors_2, matchers);
  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i max_dist) max_dist = dist;
  }
  
  for (int i=0; i(0,2)) / (K.at(0,0)),
    (p.y - K.at(1,2)) / (K.at(1,1))
  );
}


void bundleAdjustment(const vector points_3d, const vector points_2d_1, const vector points_2d_2, const Mat& K, Mat& R, Mat& t)
{
  //指定pose维度为6,landmark维度为3
  typedef g2o::BlockSolver> Block;
  //设置线性求解器,使用CSparse分解
  Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse();
  //矩阵快求解器
  Block* solver_ptr = new Block (linearSolver);
  
  //设置levenberg梯度下降法求解
  g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
  //定义图模型
  g2o::SparseOptimizer optimizer;
  //设置求解器
  optimizer.setAlgorithm(solver);
  
  
  //设置第一个相机顶点
  g2o::VertexSE3Expmap* poseOne = new g2o::VertexSE3Expmap();
  poseOne->setId(0);
  poseOne->setFixed(1); //固定
  poseOne->setEstimate(g2o::SE3Quat()); 
  optimizer.addVertex(poseOne);
  
  //设置第二个相机顶点
  g2o::VertexSE3Expmap* poseTwo = new g2o::VertexSE3Expmap();
  poseTwo->setId(1);
  Eigen::Matrix3d R_Two;
  R_Two << 
  R.at(0,0), R.at(0,1), R.at(0,2),
  R.at(1,0), R.at(1,1), R.at(1,2),
  R.at(2,0), R.at(2,1), R.at(2,2);
  //设置待优化参数为旋转矩阵和平移矩阵
  poseTwo->setEstimate(g2o::SE3Quat(
    R_Two,
    Eigen::Vector3d(t.at(0,0), t.at(1,0), t.at(2,0))
  ));
  optimizer.addVertex(poseTwo);
  
  
  int index = 2;
  for (const Point3f p:points_3d)
  {
    g2o::VertexSBAPointXYZ* point = new g2o::VertexSBAPointXYZ();
    point->setId(index++);
    //待优化空间点的3D位置
    point->setEstimate(Eigen::Vector3d(p.x, p.y, p.z));
    //是否边缘化以进行稀疏求解
    point->setMarginalized(true);
    optimizer.addVertex(point);
  }
  
  //对相机内参进行优化
  g2o::CameraParameters* camera = new g2o::CameraParameters(K.at(0,0),Eigen::Vector2d(K.at(0,2), K.at(1,2)), 0);
  camera->setId(0);
  optimizer.addParameter(camera);
  
  //添加边
  int edgeCount = 0;
  
  //第一个相机观测
  index = 2;
  for (const Point2f p:points_2d_1)
  {
    //重投影误差边类EdgeProjectXYZ2UV
    g2o::EdgeProjectXYZ2UV* edgeOne = new g2o::EdgeProjectXYZ2UV();
    edgeOne->setId(edgeCount++);
    //链接两个顶点
    edgeOne->setVertex(0, dynamic_cast(optimizer.vertex(index)));
    edgeOne->setVertex(1, dynamic_cast(optimizer.vertex(0)));
    //测量值为第一帧的像素坐标
    edgeOne->setMeasurement(Eigen::Vector2d(p.x, p.y));
    edgeOne->setParameterId(0,0);
    edgeOne->setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edgeOne);
    index++;
  }
  
  //第二个相机观测
  index = 2;
  for (const Point2f p:points_2d_2)
  {
    g2o::EdgeProjectXYZ2UV* edgetwo = new g2o::EdgeProjectXYZ2UV();
    edgetwo->setId(edgeCount++);
    edgetwo->setVertex(0, dynamic_cast(optimizer.vertex(index)));
    edgetwo->setVertex(1, dynamic_cast(optimizer.vertex(1)));
    edgetwo->setMeasurement(Eigen::Vector2d(p.x, p.y));
    edgetwo->setParameterId(0,0);
    //默认加权为1
    edgetwo->setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edgetwo);
    index++;
  }
  
  //打开调试输出
  //optimizer.setVerbose(true);
  //初始化
  optimizer.initializeOptimization();
  //设置迭代次数
  optimizer.optimize(100);
  cout << "优化后" << endl;
  cout << "T1 = " << endl << Eigen::Isometry3d(poseOne->estimate()).matrix() << endl;
  cout << "T2 = " << endl << Eigen::Isometry3d(poseTwo->estimate()).matrix() << endl;
}


int main(int argc, char **argv) {
  
  //载入rgb图像  
  Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
  
  vector keypoints_1, keypoints_2;
  vector matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  
  //载入深度图像
  Mat depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  //输入相机内参矩阵
  Mat K = (Mat_ (3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  
  //定义3D2D点
  vector pts_3d;
  vector pts_2d_1, pts_2d_2;
  
  for (DMatch m:matches)
  {
    ushort d = depth_1.ptr (int (keypoints_1[m.queryIdx].pt.y)) [int (keypoints_1[m.queryIdx].pt.x)];
      
    if (d == 0)
      continue;
    
    float dd = d/1000.0;
    
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d_1.push_back(keypoints_1[m.queryIdx].pt);
    pts_2d_2.push_back(keypoints_2[m.trainIdx].pt);
  }
  
  //根据所得的3D、2D点进行PnP求解
  Mat r, t;
  solvePnP(pts_3d, pts_2d_2, K, Mat(), r, t, false, SOLVEPNP_EPNP);
  Mat R;
  Rodrigues(r, R);
  
  cout << "旋转向量r= " << endl << r << endl;
  cout << "旋转矩阵R= " << endl << R << endl;
  cout << "平移向量t= " << endl << t << endl;
  
  
  //用BA优化
  bundleAdjustment(pts_3d, pts_2d_1, pts_2d_2, K, R, t);
  
    return 0;
    
}

看一下输出结果:

《视觉slam十四讲》第七讲课后习题_第5张图片

发现将第一帧的观测加入对旋转矩阵R的影响并不大,但是对平移矩阵t的影响却比较大,而且距离优化前的值差得比较远,可能是由于第一帧的观测和第二帧观测的误差叠加或是深度相机的深度值误差被放大的原因。

 

7.在ICP程序中,将空间点也作为优化变量考虑进来,程序应该如何书写?结果会如何变化?

3D-3D问题可能是slam中最容易遇到的问题。ICP(Iterative Closet Point)迭代最近点求解,在这种求解过程中没有相机模型,和相机并没有关系。ICP求解一般分为直接求解发SVD和非线性优化法。对于本题可以在书中的BA优化程序pose_estimation_3d3d.cpp程序中稍作修改。其代码如下所示(这个自定义g2o的方法顶点以及边建立的方法比较典型):

CMakeLists.txt代码如下所示:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_7)

set( CMAKE_BUILD_TYPE "Release" )
set( CMAKE_CXX_FLAGS "-std=c++11 -O3" )

list( APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules )

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package( OpenCV 3 REQUIRED )
find_package( G2O REQUIRED )
find_package( CSparse REQUIRED )

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${CSPARSE_INCLUDE_DIR}
  ${CSPARSE_INCLUDE_DIRS}
  "/usr/include/eigen3/"
  "/home/fuhang/projects/feature_extraction"
  )

add_executable(homeworks7_7 homeworks7_7.cpp)
target_link_libraries( homeworks7_7 
   ${OpenCV_LIBS}
   g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension 
   ${CSPARSE_LIBRARY}
)

install(TARGETS homeworks7_7 RUNTIME DESTINATION bin)

homeworks7_7.cpp的代码如下所示:

#include 

#include 
#include 
#include 
#include 

#include 
#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 
#include 

using namespace std;
using namespace cv;

void find_feature_matches (const Mat& img_1, const Mat& img_2, vector& keypoints_1, vector& keypoints_2, vector& matches)
{
  Mat descriptors_1, descriptors_2;
  
  Ptr detector = ORB::create();
  Ptr descriptor = ORB::create();
  Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector matchers;  
  matcher->match(descriptors_1, descriptors_2, matchers);  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i max_dist) max_dist = matchers[i].distance;
  }
  
  for (auto m:matchers)
  {
    if (m.distance <= max(30.0, 2*min_dist))
      matches.push_back(m);
  }
  
  cout << "共找到" << matches.size() << "对匹配点" << endl;
}


Point2d pixel2cam (const Point2d& p, const Mat& K)
{
    return Point2d
           (
               ( p.x - K.at ( 0,2 ) ) / K.at ( 0,0 ),
               ( p.y - K.at ( 1,2 ) ) / K.at ( 1,1 )
           );
}

void pose_estimation_3d3d ( const vector& pts1, const vector& pts2, Mat& R, Mat& t)
{
  Point3f p1, p2;
  int N = pts1.size();
  for (int i=0; i q1(N), q2(N);
  for (int i=0; i keypoints_1, keypoints_2;
  vector matches;
  find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
  //输入相机内参
  Mat K = ( Mat_ ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
  vector pts1, pts2;
  
  for ( DMatch m:matches )
  {
      ushort d1 = depth_1.ptr ( int ( keypoints_1[m.queryIdx].pt.y ) ) [ int ( keypoints_1[m.queryIdx].pt.x ) ];
      ushort d2 = depth_2.ptr ( int ( keypoints_2[m.trainIdx].pt.y ) ) [ int ( keypoints_2[m.trainIdx].pt.x ) ];
      if ( d1==0 || d2==0 )  
          continue;
      Point2d p1 = pixel2cam ( keypoints_1[m.queryIdx].pt, K );
      Point2d p2 = pixel2cam ( keypoints_2[m.trainIdx].pt, K );
      float dd1 = float ( d1 ) /1000.0;
      float dd2 = float ( d2 ) /1000.0;
      pts1.push_back ( Point3f ( p1.x * dd1, p1.y * dd1, dd1 ) );
      pts2.push_back ( Point3f ( p2.x * dd2, p2.y * dd2, dd2 ) );
  }
  cout << "3d-3d pairs: " << pts1.size() << endl;
  Mat R,t;
  pose_estimation_3d3d ( pts1, pts2, R, t );
  
  cout << "R逆变换 = " << endl << R.t() << endl;
  cout << "t逆变换 = " << endl << -R.t()*t << endl;
  
  bundleAdjustment(pts1, pts2, R, t);

    return 0;
}

运行代码,其计算结果如图所示:

《视觉slam十四讲》第七讲课后习题_第6张图片

由于初值选在SVD求解后的R、t,所以收敛过程比较快。本题中用的迭代方法不是高斯牛顿法,在笔者尝试多次高斯牛顿法后发现一直迭代发散,换成LM算法后一下子就收敛了,可能这个求解问题属于一种病态求解吧。

 

8.在特征点匹配过程中,不可避免地会遇到误匹配的情况。如果把误匹配的情况输入到PnP中或ICP中,会发生什么情况?有哪些能避免误匹配的方法?

目前书中用的是根据汉明距离的暴力匹配方法,然后根据经验参数(30或者是最小距离的两倍)对匹配子根据其距离进行筛选。如果误匹配情况输入到PnP或是ICP中,再加上迭代算法选择不正确,初值估计不准确,就很容易导致计算结果产生误差,更有甚者会让迭代过程不稳定,甚至报错。目前比较流行的避免误匹配方法有:交叉匹配(在暴力匹配的基础上再匹配一次,如果两次结果一致,则认为是个特征点,如果不一致则滤掉,BFMatcher XX (NORM_HAMMING, true) )、KNN匹配(K邻近匹配,匹配时候选择K个与特征点相似的点,一般K是2,如果区别足够大,则选择最相似的点作为匹配点,bfMatcher->knnMatch(descriptors1, descriptors2, knnMatches, 2) )、RANSAC(随机采样一致性,利用两个图像之间的单应矩阵,根据重投影误差判定某个匹配是不是正确匹配,findHomography)等等,一般可以跟觉已有的成熟框架如ORB_SLAM2等等观察其对于不同场景所采取的避免误匹配的方法。同样,对于后端,在优化时可以用Huber损失函数等等增强优化算法的鲁棒性。

 

9.在Ceres中实现PnP和ICP的优化。

也就是将g2o优化转换成Ceres优化。由于《视觉SLAM十四讲》一书中主要运用的是重投影误差优化法,主要针对的是BA优化问题,而g2o工具对这一类问题求解比较准确方便,以至于对Ceres这工具不太了解。相交于g2o仅适用于BA问题的求解,Ceres更加通用对于最小贰乘问题的计算。通过本题我们可以增加对Ceres优化方法的认识,当然更多的大家还是去看Ceres官方教材。

对于PnP,CMakeLists.txt代码如下所示:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_9)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

find_package(Ceres REQUIRED)

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${CERES_INCLUDE_DIRS}
  "/usr/include/eigen3/"
  "/home/fuhang/projects/feature_extraction"
  )

add_executable(homeworks7_9 homeworks7_9.cpp)
target_link_libraries( homeworks7_9 
   ${OpenCV_LIBS}
   ${CERES_LIBRARIES}
)

install(TARGETS homeworks7_9 RUNTIME DESTINATION bin)

homeworks7_9.cpp代码如下所示:

#include 
#include 
#include 
#include 
#include 

#include 
#include 

#include 
#include 


using namespace std;
using namespace cv;

//ORB特征识别匹配算法
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2, std::vector& keypoints_1, std::vector& keypoints_2, std::vector& matches)
{
  Mat descriptors_1, descriptors_2;
  
  cv::Ptr detector = cv::ORB::create();
  cv::Ptr descriptor = cv::ORB::create();
  cv::Ptr matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector matchers;
  matcher->match(descriptors_1, descriptors_2, matchers);
  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i max_dist) max_dist = dist;
  }
  
  for (int i=0; i(0,2)) / (K.at(0,0)),
    (p.y - K.at(1,2)) / (K.at(1,1))
  );
}


struct PnPProblem
{ 
  //定义PnP结构体的接收数据:2D点、3D点、相机内参
  PnPProblem(double x, double y, double X, double Y, double Z, double cx, double cy, double fx, double fy)
  {
    x_ = x;
    y_ = y;
    
    X_ = X;
    Y_ = Y;
    Z_ = Z;
    
    cx_ = cx;
    cy_ = cy;
    
    fx_ = fx;
    fy_ = fy;
  }
  //定义函数模板,模板参数为位姿和残差
  template 
  bool operator()(const T* const pose, T* residual) const
  {
    //3D点储存进p数组
    T p[3];
    p[0] = T(X_);
    p[1] = T(Y_);
    p[2] = T(Z_);
    
    //相机位姿旋转
    T r[3];
    r[0] = pose[0];
    r[1] = pose[1];
    r[2] = pose[2];
    
    //3D点旋转
    T newP[3];
    ceres::AngleAxisRotatePoint(r, p, newP);
    
    //3D点平移
    newP[0] += pose[3];
    newP[1] += pose[4];
    newP[2] += pose[5];
    
    //像素坐标系到相机坐标系映射
    T projectX = fx_ * newP[0] / newP[2] + cx_;
    T projectY = fy_ * newP[1] / newP[2] + cy_;
    
    //重投影后的残差
    residual[0] = T(x_) - projectX;
    residual[1] = T(y_) - projectY;
    
    //不return就GG了
    return true;
  }
  
  double x_, y_;
  double X_, Y_, Z_;
  double cx_, cy_;
  double fx_, fy_;
};


void bundleAdjustmentCeres(const vector points_3d, const vector points_2d, const Mat& K, Mat& R, Mat& t, Mat& T)
{
  ceres::Problem problem;
  
  Mat rotateVector;
  Rodrigues(R, rotateVector);
  
  double pose[6];
  pose[0] = rotateVector.at(0);
  pose[1] = rotateVector.at(1);
  pose[2] = rotateVector.at(2);
  pose[3] = t.at(0);
  pose[4] = t.at(1);
  pose[5] = t.at(2);
  
  double fx = K.at(0,0);
  double fy = K.at(1,1);
  double cx = K.at(0,2);
  double cy = K.at(1,2);
  
  for (size_t i=0; i(new PnPProblem(
	points_2d[i].x, points_2d[i].y,
	points_3d[i].x, points_3d[i].y, points_3d[i].z,
	cx, cy, 
	fx, fy
      )),
      nullptr,
      pose
    );
  }
  
  //使用QR求解
  ceres::Solver::Options options;
  options.linear_solver_type = ceres::DENSE_QR;
  //输出优化信息到std::cout
  options.minimizer_progress_to_stdout = true;
  
  //开始优化求解
  ceres::Solver::Summary summary;
  ceres::Solve (options, &problem, &summary);
  cout << summary.BriefReport() << endl;
  
  //输出坐标转化,由旋转向量转换为旋转矩阵
  rotateVector.at(0) = pose[0];
  rotateVector.at(1) = pose[1];
  rotateVector.at(2) = pose[2];
  Rodrigues(rotateVector, R);
  
  t.at(0) = pose[3];
  t.at(1) = pose[4];
  t.at(2) = pose[5];
  
  T = (Mat_(4,4) << 
    R.at(0,0), R.at(0,1), R.at(0,2), t.at(0),
    R.at(1,0), R.at(1,1), R.at(1,2), t.at(1),
    R.at(2,0), R.at(2,1), R.at(2,2), t.at(2),
    0,                 0,                 0,                 1
  );
  cout << "优化后位姿参数T= " << endl << T << endl;
}


int main(int argc, char **argv) {
  
  //载入rgb图像  
  Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
  
  vector keypoints_1, keypoints_2;
  vector matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  
  //载入深度图像
  Mat depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
  //输入相机内参矩阵
  Mat K = (Mat_ (3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  
  //定义3D2D点
  vector pts_3d;
  vector pts_2d_1, pts_2d_2;
  
  for (DMatch m:matches)
  {
    ushort d = depth_1.ptr (int (keypoints_1[m.queryIdx].pt.y)) [int (keypoints_1[m.queryIdx].pt.x)];
      
    if (d == 0)
      continue;
    
    float dd = d/1000.0;
    
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d_1.push_back(keypoints_1[m.queryIdx].pt);
    pts_2d_2.push_back(keypoints_2[m.trainIdx].pt);
  }
  
  //根据所得的3D、2D点进行PnP求解
  Mat r, t;
  solvePnP(pts_3d, pts_2d_2, K, Mat(), r, t, false, SOLVEPNP_EPNP);
  Mat R;
  Rodrigues(r, R);
  
  cout << "旋转向量r= " << endl << r << endl;
  cout << "旋转矩阵R= " << endl << R << endl;
  cout << "平移向量t= " << endl << t << endl;
  
  cout << "*************************开始优化***********************************" << endl;
  
  Mat T;  
  bundleAdjustmentCeres(pts_3d, pts_2d_2, K, R, t, T);
  
    return 0;
    
}

由Ceres计算得到的求解PnP结果如下图所示:

《视觉slam十四讲》第七讲课后习题_第7张图片

可以看出Ceres最后迭代的结果和g2o差不多,但是Ceres相交于g2o而言代码更加简洁,而且迭代步数更加少,可增补性强。同理,对于ICP的Ceres优化方法也是类似。

CMakeLists.txt代码如下:

cmake_minimum_required(VERSION 2.6)
project(homeworks7_9_icp)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11 -O3")

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

find_package(Ceres REQUIRED)

set(OpenCV_DIR "/usr/local/opencv3/share/OpenCV")
find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  ${CERES_INCLUDE_DIRS}
  "/usr/include/eigen3/"
  "/home/fuhang/projects/feature_extraction"
  )

add_executable(homeworks7_9_icp homeworks7_9_icp.cpp)
target_link_libraries( homeworks7_9_icp 
   ${OpenCV_LIBS}
   ${CERES_LIBRARIES}
)

install(TARGETS homeworks7_9_icp RUNTIME DESTINATION bin)

homeworks7_9_icp.cpp代码如下所示:

#include 

#include 
#include 
#include 
#include 

#include 
#include 
#include 

#include 
#include 

using namespace std;
using namespace cv;

void find_feature_matches (const Mat& img_1, const Mat& img_2, vector& keypoints_1, vector& keypoints_2, vector& matches)
{
  Mat descriptors_1, descriptors_2;
  
  Ptr detector = ORB::create();
  Ptr descriptor = ORB::create();
  Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming");
  
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);
  
  vector matchers;  
  matcher->match(descriptors_1, descriptors_2, matchers);  
  double min_dist = 10000, max_dist = 0;
  for (int i=0; i max_dist) max_dist = matchers[i].distance;
  }
  
  for (auto m:matchers)
  {
    if (m.distance <= max(30.0, 2*min_dist))
      matches.push_back(m);
  }
  
  cout << "共找到" << matches.size() << "对匹配点" << endl;
}


Point2d pixel2cam (const Point2d& p, const Mat& K)
{
    return Point2d
           (
               ( p.x - K.at ( 0,2 ) ) / K.at ( 0,0 ),
               ( p.y - K.at ( 1,2 ) ) / K.at ( 1,1 )
           );
}

void pose_estimation_3d3d ( const vector& pts1, const vector& pts2, Mat& R, Mat& t)
{
  Point3f p1, p2;
  int N = pts1.size();
  for (int i=0; i q1(N), q2(N);
  for (int i=0; i keypoints_1, keypoints_2;
  vector matches;
  find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
  //输入相机内参
  Mat K = ( Mat_ ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
  vector pts1, pts2;
  
  for ( DMatch m:matches )
  {
      ushort d1 = depth_1.ptr ( int ( keypoints_1[m.queryIdx].pt.y ) ) [ int ( keypoints_1[m.queryIdx].pt.x ) ];
      ushort d2 = depth_2.ptr ( int ( keypoints_2[m.trainIdx].pt.y ) ) [ int ( keypoints_2[m.trainIdx].pt.x ) ];
      if ( d1==0 || d2==0 )  
          continue;
      Point2d p1 = pixel2cam ( keypoints_1[m.queryIdx].pt, K );
      Point2d p2 = pixel2cam ( keypoints_2[m.trainIdx].pt, K );
      float dd1 = float ( d1 ) /1000.0;
      float dd2 = float ( d2 ) /1000.0;
      pts1.push_back ( Point3f ( p1.x * dd1, p1.y * dd1, dd1 ) );
      pts2.push_back ( Point3f ( p2.x * dd2, p2.y * dd2, dd2 ) );
  }
  cout << "3d-3d pairs: " << pts1.size() << endl;
  Mat R,t;
  pose_estimation_3d3d ( pts1, pts2, R, t );
  
  cout << "R逆变换 = " << endl << R.t() << endl;
  cout << "t逆变换 = " << endl << -R.t()*t << endl;
  
  Mat T;
  bundleAdjustment(pts1, pts2, R, t, T);

    return 0;
}

程序运行结果如下图所示:

《视觉slam十四讲》第七讲课后习题_第8张图片

不难发现对于ICP问题仅仅需要迭代一步就好了,相交于g2o而言,g2o用高斯牛顿法迭代还会发散,但是据说g2o对于BA问题的求解精度还是很高的。

你可能感兴趣的:(习题,slam)