v-loam源码阅读(二)视觉里程计

再转到VisualOdometry这一节点,这是我们在loam系列中没有接触过的,但对于熟悉视觉里程计以及loam系列的激光里程计的我们来说也不算太难,根据论文,前端是采用msckf作为视觉里程计,但从本版本的代码来看,它是偏向于继承loam非线性优化的特征点匹配方法,虽说是多状态约束下的估计,但它并没有更新观测量的状态与协方差矩阵,因此不能算是一种卡尔曼滤波(希望有新的一波开源呀)。相比于lego-loam,我们重点学习之前没有完全搞懂的坐标变换部分。

首先看到本节点所涉及的话题,它接收视觉特征查找节点的特征点、深度图节点的转化为深度信息的雷达点云、imu信息,发布的话题包括

ros::Subscriber imagePointsSub = nh.subscribe
                                   ("/image_points_last", 5, imagePointsHandler);

  ros::Subscriber depthCloudSub = nh.subscribe
                                  ("/depth_cloud", 5, depthCloudHandler);

  ros::Subscriber imuDataSub = nh.subscribe ("/imu/data", 5, imuDataHandler);

  ros::Publisher voDataPub = nh.advertise ("/cam_to_init", 5);
  voDataPubPointer = &voDataPub;

  tf::TransformBroadcaster tfBroadcaster;
  tfBroadcasterPointer = &tfBroadcaster;

  ros::Publisher depthPointsPub = nh.advertise ("/depth_points_last", 5);
  depthPointsPubPointer = &depthPointsPub;

  ros::Publisher imagePointsProjPub = nh.advertise ("/image_points_proj", 1);
  imagePointsProjPubPointer = &imagePointsProjPub;

  ros::Subscriber imageDataSub = nh.subscribe("/image/show", 1, imageDataHandler);

  ros::Publisher imageShowPub = nh.advertise("/image/show_2", 1);
  imageShowPubPointer = &imageShowPub;

对imu的处理与loam系列一样,使用一个下标不断轮循整个数组并替换数值。

void imuDataHandler(const sensor_msgs::Imu::ConstPtr& imuData)
{
  double roll, pitch, yaw;
  tf::Quaternion orientation;
  tf::quaternionMsgToTF(imuData->orientation, orientation);
  tf::Matrix3x3(orientation).getRPY(roll, pitch, yaw);

  imuPointerLast = (imuPointerLast + 1) % imuQueLength;

  imuTime[imuPointerLast] = imuData->header.stamp.toSec() - 0.1068;
  imuRoll[imuPointerLast] = roll;
  imuPitch[imuPointerLast] = pitch;
  imuYaw[imuPointerLast] = yaw;
}

 深度图的接收在于将传来的点云归一化到距离光心10米的平面上,之所以是z值为10是loam一向的做法,以相机坐标系为基准。

void depthCloudHandler(const sensor_msgs::PointCloud2ConstPtr& depthCloud2)
{
  depthCloudTime = depthCloud2->header.stamp.toSec();

  depthCloud->clear();
  pcl::fromROSMsg(*depthCloud2, *depthCloud);
  depthCloudNum = depthCloud->points.size();

  if (depthCloudNum > 10) {
    for (int i = 0; i < depthCloudNum; i++) {
      depthCloud->points[i].intensity = depthCloud->points[i].z;
      depthCloud->points[i].x *= 10 / depthCloud->points[i].z;
      depthCloud->points[i].y *= 10 / depthCloud->points[i].z;
      depthCloud->points[i].z = 10;
    }

    kdTree->setInputCloud(depthCloud);
  }
}

这两个函数是对image point的铺垫, 它们是将世界坐标系、相机坐标系以及上一帧相机位姿进行统一,这样的方法在loam系列中一脉相承。也就是说,c代表current,l代表last,o代表origin,最终要得到的是当前相对于原点的变换ox、oy、oz。

void accumulateRotation(double cx, double cy, double cz, double lx, double ly, double lz, 
                        double &ox, double &oy, double &oz)
{
  double srx = cos(lx)*cos(cx)*sin(ly)*sin(cz) - cos(cx)*cos(cz)*sin(lx) - cos(lx)*cos(ly)*sin(cx);
  ox = -asin(srx);

  double srycrx = sin(lx)*(cos(cy)*sin(cz) - cos(cz)*sin(cx)*sin(cy)) + cos(lx)*sin(ly)*(cos(cy)*cos(cz) 
                + sin(cx)*sin(cy)*sin(cz)) + cos(lx)*cos(ly)*cos(cx)*sin(cy);
  double crycrx = cos(lx)*cos(ly)*cos(cx)*cos(cy) - cos(lx)*sin(ly)*(cos(cz)*sin(cy) 
                - cos(cy)*sin(cx)*sin(cz)) - sin(lx)*(sin(cy)*sin(cz) + cos(cy)*cos(cz)*sin(cx));
  oy = atan2(srycrx / cos(ox), crycrx / cos(ox));

  double srzcrx = sin(cx)*(cos(lz)*sin(ly) - cos(ly)*sin(lx)*sin(lz)) + cos(cx)*sin(cz)*(cos(ly)*cos(lz) 
                + sin(lx)*sin(ly)*sin(lz)) + cos(lx)*cos(cx)*cos(cz)*sin(lz);
  double crzcrx = cos(lx)*cos(lz)*cos(cx)*cos(cz) - cos(cx)*sin(cz)*(cos(ly)*sin(lz) 
                - cos(lz)*sin(lx)*sin(ly)) - sin(cx)*(sin(ly)*sin(lz) + cos(ly)*cos(lz)*sin(lx));
  oz = atan2(srzcrx / cos(ox), crzcrx / cos(ox));
}

void diffRotation(double cx, double cy, double cz, double lx, double ly, double lz, 
                  double &ox, double &oy, double &oz)
{
  double srx = cos(cx)*cos(cy)*(sin(ly)*sin(lz) + cos(ly)*cos(lz)*sin(lx)) 
             - cos(cx)*sin(cy)*(cos(ly)*sin(lz) - cos(lz)*sin(lx)*sin(ly)) - cos(lx)*cos(lz)*sin(cx);
  ox = -asin(srx);

  double srycrx = cos(cx)*sin(cy)*(cos(ly)*cos(lz) + sin(lx)*sin(ly)*sin(lz)) 
                - cos(cx)*cos(cy)*(cos(lz)*sin(ly) - cos(ly)*sin(lx)*sin(lz)) - cos(lx)*sin(cx)*sin(lz);
  double crycrx = sin(cx)*sin(lx) + cos(cx)*cos(cy)*cos(lx)*cos(ly) + cos(cx)*cos(lx)*sin(cy)*sin(ly);
  oy = atan2(srycrx / cos(ox), crycrx / cos(ox));

  double srzcrx = cos(cx)*cos(lx)*cos(lz)*sin(cz) - (cos(cz)*sin(cy) 
                - cos(cy)*sin(cx)*sin(cz))*(sin(ly)*sin(lz) + cos(ly)*cos(lz)*sin(lx)) 
                - (cos(cy)*cos(cz) + sin(cx)*sin(cy)*sin(cz))*(cos(ly)*sin(lz) - cos(lz)*sin(lx)*sin(ly));
  double crzcrx = (sin(cy)*sin(cz) + cos(cy)*cos(cz)*sin(cx))*(sin(ly)*sin(lz) 
                + cos(ly)*cos(lz)*sin(lx)) + (cos(cy)*sin(cz) - cos(cz)*sin(cx)*sin(cy))*(cos(ly)*sin(lz) 
                - cos(lz)*sin(lx)*sin(ly)) + cos(cx)*cos(cz)*cos(lx)*cos(lz);
  oz = atan2(srzcrx / cos(ox), crzcrx / cos(ox));
}

 imageDataHandler将相关联的特征点用红绿蓝颜色圆圈发布出来:

void imageDataHandler(const sensor_msgs::Image::ConstPtr& imageData) 
{
  cv_bridge::CvImagePtr bridge = cv_bridge::toCvCopy(imageData, "bgr8");

  int ipRelationsNum = ipRelations->points.size();
  for (int i = 0; i < ipRelationsNum; i++) {
    if (fabs(ipRelations->points[i].v) < 0.5) {
      cv::circle(bridge->image, cv::Point((kImage[2] - ipRelations->points[i].z * kImage[0]) / showDSRate,
                (kImage[5] - ipRelations->points[i].h * kImage[4]) / showDSRate), 1, CV_RGB(255, 0, 0), 2);
    } else if (fabs(ipRelations->points[i].v - 1) < 0.5) {
      cv::circle(bridge->image, cv::Point((kImage[2] - ipRelations->points[i].z * kImage[0]) / showDSRate,
                (kImage[5] - ipRelations->points[i].h * kImage[4]) / showDSRate), 1, CV_RGB(0, 255, 0), 2);
    } else if (fabs(ipRelations->points[i].v - 2) < 0.5) {
      cv::circle(bridge->image, cv::Point((kImage[2] - ipRelations->points[i].z * kImage[0]) / showDSRate,
                (kImage[5] - ipRelations->points[i].h * kImage[4]) / showDSRate), 1, CV_RGB(0, 0, 255), 2);
    } /*else {
      cv::circle(bridge->image, cv::Point((kImage[2] - ipRelations->points[i].z * kImage[0]) / showDSRate,
                (kImage[5] - ipRelations->points[i].h * kImage[4]) / showDSRate), 1, CV_RGB(0, 0, 0), 2);
    }*/
  }

  sensor_msgs::Image::Ptr imagePointer = bridge->toImageMsg();
  imageShowPubPointer->publish(imagePointer);
}

imagePointsHandler是本节点主要的处理过程,它通过对特征点投影后的最近查找得到近似匹配,再利用位姿的迭代变换得到最优的上一帧到本帧的变换,即帧到帧的里程计数据。

void imagePointsHandler(const sensor_msgs::PointCloud2ConstPtr& imagePoints2)
{
  imagePointsLastTime = imagePointsCurTime;
  imagePointsCurTime = imagePoints2->header.stamp.toSec();

  imuRollLast = imuRollCur;
  imuPitchLast = imuPitchCur;
  imuYawLast = imuYawCur;
  //从数组头部开始查找最近的imu信息,如果没有则按值插补,但基本不可能查不到
  double transform[6] = {0};
  if (imuPointerLast >= 0) {
    while (imuPointerFront != imuPointerLast) {
      if (imagePointsCurTime < imuTime[imuPointerFront]) {
        break;
      }
      imuPointerFront = (imuPointerFront + 1) % imuQueLength;
    }

    if (imagePointsCurTime > imuTime[imuPointerFront]) {
      imuRollCur = imuRoll[imuPointerFront];
      imuPitchCur = imuPitch[imuPointerFront];
      imuYawCur = imuYaw[imuPointerFront];
    } else {
      int imuPointerBack = (imuPointerFront + imuQueLength - 1) % imuQueLength;
      double ratioFront = (imagePointsCurTime - imuTime[imuPointerBack]) 
                        / (imuTime[imuPointerFront] - imuTime[imuPointerBack]);
      double ratioBack = (imuTime[imuPointerFront] - imagePointsCurTime) 
                       / (imuTime[imuPointerFront] - imuTime[imuPointerBack]);

      imuRollCur = imuRoll[imuPointerFront] * ratioFront + imuRoll[imuPointerBack] * ratioBack;
      imuPitchCur = imuPitch[imuPointerFront] * ratioFront + imuPitch[imuPointerBack] * ratioBack;
      if (imuYaw[imuPointerFront] - imuYaw[imuPointerBack] > PI) {
        imuYawCur = imuYaw[imuPointerFront] * ratioFront + (imuYaw[imuPointerBack] + 2 * PI) * ratioBack;
      } else if (imuYaw[imuPointerFront] - imuYaw[imuPointerBack] < -PI) {
        imuYawCur = imuYaw[imuPointerFront] * ratioFront + (imuYaw[imuPointerBack] - 2 * PI) * ratioBack;
      } else {
        imuYawCur = imuYaw[imuPointerFront] * ratioFront + imuYaw[imuPointerBack] * ratioBack;
      }
    }

    if (imuInited) {
      //transform[0] -= imuPitchCur - imuPitchLast;
      //transform[1] -= imuYawCur - imuYawLast;
      //transform[2] -= imuRollCur - imuRollLast;
    }
  }
  //获取上一帧的点云信息
  pcl::PointCloud::Ptr imagePointsTemp = imagePointsLast;
  imagePointsLast = imagePointsCur;
  imagePointsCur = imagePointsTemp;

  imagePointsCur->clear();
  pcl::fromROSMsg(*imagePoints2, *imagePointsCur);

  imagePointsLastNum = imagePointsCurNum;
  imagePointsCurNum = imagePointsCur->points.size();

  pcl::PointCloud::Ptr startPointsTemp = startPointsLast;
  startPointsLast = startPointsCur;
  startPointsCur = startPointsTemp;

  pcl::PointCloud::Ptr startTransTemp = startTransLast;
  startTransLast = startTransCur;
  startTransCur = startTransTemp;

  std::vector* ipDepthTemp = ipDepthLast;
  ipDepthLast = ipDepthCur;
  ipDepthCur = ipDepthTemp;

  int j = 0;
  pcl::PointXYZI ips;
  pcl::PointXYZHSV ipr;
  ipRelations->clear();
  ipInd.clear();
  //循环的数量是上一帧的点云数量,也就是说它是基于上一帧的特征点对本帧的搜索
  for (int i = 0; i < imagePointsLastNum; i++) {
    bool ipFound = false;//如果本帧中的共视关系不存在,则忽略不计算在内
    for (; j < imagePointsCurNum; j++) {
      if (imagePointsCur->points[j].ind == imagePointsLast->points[i].ind) {
        ipFound = true;
      }
      if (imagePointsCur->points[j].ind >= imagePointsLast->points[i].ind) {
        break;
      }
    }

    if (ipFound) {
      ipr.x = imagePointsLast->points[i].u;//u、v是我们根据特征点以及内参求的像素逆深度
      ipr.y = imagePointsLast->points[i].v;
      ipr.z = imagePointsCur->points[j].u;
      ipr.h = imagePointsCur->points[j].v;

      ips.x = 10 * ipr.x;
      ips.y = 10 * ipr.y;
      ips.z = 10;//将其投影至距光心10米的平面上
      //在此之前,深度图也已经被投到z=10的平面上了,因此以深度图为查询目标,查找特征点投影后最近的深度点
      if (depthCloudNum > 10) {
        kdTree->nearestKSearch(ips, 3, pointSearchInd, pointSearchSqrDis);

        double minDepth, maxDepth;
        //查询到的足够近(距离小于0.707米?)
        if (pointSearchSqrDis[0] < 0.5 && pointSearchInd.size() == 3) {
          //查找的依据是投影后的点,而我们现在要找到相对应的深度图原有的点!intensity存储的是原有的z值
          pcl::PointXYZI depthPoint = depthCloud->points[pointSearchInd[0]];
          double x1 = depthPoint.x * depthPoint.intensity / 10;
          double y1 = depthPoint.y * depthPoint.intensity / 10;
          double z1 = depthPoint.intensity;
          minDepth = z1;
          maxDepth = z1;

          depthPoint = depthCloud->points[pointSearchInd[1]];
          double x2 = depthPoint.x * depthPoint.intensity / 10;
          double y2 = depthPoint.y * depthPoint.intensity / 10;
          double z2 = depthPoint.intensity;
          minDepth = (z2 < minDepth)? z2 : minDepth;
          maxDepth = (z2 > maxDepth)? z2 : maxDepth;

          depthPoint = depthCloud->points[pointSearchInd[2]];
          double x3 = depthPoint.x * depthPoint.intensity / 10;
          double y3 = depthPoint.y * depthPoint.intensity / 10;
          double z3 = depthPoint.intensity;
          minDepth = (z3 < minDepth)? z3 : minDepth;
          maxDepth = (z3 > maxDepth)? z3 : maxDepth;//求得三点中的最大与最小深度
          
          double u = ipr.x;
          double v = ipr.y;
          //根据特征点与附近的深度图点计算特征点深度,计算的方法是...
          ipr.s = (x1*y2*z3 - x1*y3*z2 - x2*y1*z3 + x2*y3*z1 + x3*y1*z2 - x3*y2*z1) 
                / (x1*y2 - x2*y1 - x1*y3 + x3*y1 + x2*y3 - x3*y2 + u*y1*z2 - u*y2*z1
                - v*x1*z2 + v*x2*z1 - u*y1*z3 + u*y3*z1 + v*x1*z3 - v*x3*z1 + u*y2*z3 
                - u*y3*z2 - v*x2*z3 + v*x3*z2);
          ipr.v = 1;
          //当三点深度差距过大则说明投影附近存在遮挡关系或物体边缘,放弃
          if (maxDepth - minDepth > 2) {
            ipr.s = 0;
            ipr.v = 0;
          } else if (ipr.s - maxDepth > 0.2) {//如果三点差距并不大,说明投影在较平整的地方,但特征点的深度求解与投影差距较大,我们将该特征点的深度设为投影的深度
            ipr.s = maxDepth;
          } else if (ipr.s - minDepth < -0.2) {
            ipr.s = minDepth;
          }
        } else {
          ipr.s = 0;
          ipr.v = 0;
        }
      } else {
        ipr.s = 0;
        ipr.v = 0;
      }
      //该值小于0.5代表深度获取失败了,由于点云的稀疏性,不可能所有点都能获得深度,这时候用三角测量得到深度
      if (fabs(ipr.v) < 0.5) {
        double disX = transformSum[3] - startTransLast->points[i].h;
        double disY = transformSum[4] - startTransLast->points[i].s;
        double disZ = transformSum[5] - startTransLast->points[i].v;
        //需要前后帧大于1m?有些难达到吧,可以根据实验调整
        if (sqrt(disX * disX + disY * disY + disZ * disZ) > 1) {

          double u0 = startPointsLast->points[i].u;
          double v0 = startPointsLast->points[i].v;
          double u1 = ipr.x;
          double v1 = ipr.y;
          //本帧相对原点的旋转
          double srx0 = sin(-startTransLast->points[i].x);
          double crx0 = cos(-startTransLast->points[i].x);
          double sry0 = sin(-startTransLast->points[i].y);
          double cry0 = cos(-startTransLast->points[i].y);
          double srz0 = sin(-startTransLast->points[i].z);
          double crz0 = cos(-startTransLast->points[i].z);
          //上一帧相对原点的旋转
          double srx1 = sin(-transformSum[0]);
          double crx1 = cos(-transformSum[0]);
          double sry1 = sin(-transformSum[1]);
          double cry1 = cos(-transformSum[1]);
          double srz1 = sin(-transformSum[2]);
          double crz1 = cos(-transformSum[2]);
          //本帧相对原点的位移
          double tx0 = -startTransLast->points[i].h;
          double ty0 = -startTransLast->points[i].s;
          double tz0 = -startTransLast->points[i].v;
          //上一帧相对原点的位移
          double tx1 = -transformSum[3];
          double ty1 = -transformSum[4];
          double tz1 = -transformSum[5];
          //将本帧的相机坐标系下的深度点转到世界坐标系下
          //一次绕z轴的变换
          double x1 = crz0 * u0 + srz0 * v0;
          double y1 = -srz0 * u0 + crz0 * v0;
          double z1 = 1;
          //绕x轴变换
          double x2 = x1;
          double y2 = crx0 * y1 + srx0 * z1;
          double z2 = -srx0 * y1 + crx0 * z1;
          //绕y轴变换
          double x3 = cry0 * x2 - sry0 * z2;
          double y3 = y2;
          double z3 = sry0 * x2 + cry0 * z2;
          //再将世界坐标系下的本帧点转换到上一帧的相机坐标系下
          double x4 = cry1 * x3 + sry1 * z3;
          double y4 = y3;
          double z4 = -sry1 * x3 + cry1 * z3;

          double x5 = x4;
          double y5 = crx1 * y4 - srx1 * z4;
          double z5 = srx1 * y4 + crx1 * z4;

          double x6 = crz1 * x5 - srz1 * y5;
          double y6 = srz1 * x5 + crz1 * y5;
          double z6 = z5;
          //计算当前帧的特征点在上一帧的投影
          u0 = x6 / z6;
          v0 = y6 / z6;

          x1 = cry1 * (tx1 - tx0) + sry1 * (tz1 - tz0);
          y1 = ty1 - ty0;
          z1 = -sry1 * (tx1 - tx0) + cry1 * (tz1 - tz0);

          x2 = x1;
          y2 = crx1 * y1 - srx1 * z1;
          z2 = srx1 * y1 + crx1 * z1;
          //在这里将上一帧和本帧在上一帧的坐标系内的三角形作为三角测量的方法,三角形的两条斜边分别为两帧的深度
          double tx = crz1 * x2 - srz1 * y2;
          double ty = srz1 * x2 + crz1 * y2;
          double tz = z2;
          //计算三角形的底边
          double delta = sqrt((v0 - v1) * (v0 - v1) + (u0 - u1) * (u0 - u1))
                       * cos(atan2(tz * v1 - ty, tz * u1 - tx) - atan2(v0 - v1, u0 - u1));
          //再计算三角形的斜边,也就是上一帧的深度,除以底边得到深度
          double depth = sqrt((tz * u0 - tx) * (tz * u0 - tx) + (tz * v0 - ty) * (tz * v0 - ty)) / delta;

          if (depth > 0.5 && depth < 100) {
            ipr.s = depth;
            ipr.v = 2;
          }
        }
        //ipr.v这个值为2代表三角测量得到深度,为1代表直接通过点云得到深度,为0则未得到深度
        if (ipr.v == 2) {
          if ((*ipDepthLast)[i] > 0) {//依次融合深度
            ipr.s = 3 * ipr.s * (*ipDepthLast)[i] / (ipr.s + 2 * (*ipDepthLast)[i]);
          }
          (*ipDepthLast)[i] = ipr.s;
        } else if ((*ipDepthLast)[i] > 0) {
          ipr.s = (*ipDepthLast)[i];
          ipr.v = 2;
        }
      }

      ipRelations->push_back(ipr);//纳入到相关联的点中
      ipInd.push_back(imagePointsLast->points[i].ind);
    }
  }

  int iterNum = 100;
  pcl::PointXYZHSV ipr2, ipr3, ipr4;
  int ipRelationsNum = ipRelations->points.size();
  int ptNumNoDepthRec = 0;
  int ptNumWithDepthRec = 0;
  double meanValueWithDepthRec = 100000;
  for (int iterCount = 0; iterCount < iterNum; iterCount++) {
    ipRelations2->clear();
    ipy2.clear();
    int ptNumNoDepth = 0;
    int ptNumWithDepth = 0;
    double meanValueNoDepth = 0;
    double meanValueWithDepth = 0;
    for (int i = 0; i < ipRelationsNum; i++) {
      ipr = ipRelations->points[i];
      //相关联的一对点,像素分别为u、v、0、1
      double u0 = ipr.x;
      double v0 = ipr.y;
      double u1 = ipr.z;
      double v1 = ipr.h;
      //transform原本为长度为6的数组,代表xyzrpy的值
      double srx = sin(transform[0]);
      double crx = cos(transform[0]);
      double sry = sin(transform[1]);
      double cry = cos(transform[1]);
      double srz = sin(transform[2]);
      double crz = cos(transform[2]);
      double tx = transform[3];
      double ty = transform[4];
      double tz = transform[5];
      //分两种情况,分别是成功获取深度与否
      if (fabs(ipr.v) < 0.5) {
        //ipr2的6个参数分别是误差函数对xyzrpy的偏导数,请务必看准顺序。。。
        //对roll的偏导数
        ipr2.x = v0*(crz*srx*(tx - tz*u1) - crx*(ty*u1 - tx*v1) + srz*srx*(ty - tz*v1)) 
               - u0*(sry*srx*(ty*u1 - tx*v1) + crz*sry*crx*(tx - tz*u1) + sry*srz*crx*(ty - tz*v1)) 
               + cry*srx*(ty*u1 - tx*v1) + cry*crz*crx*(tx - tz*u1) + cry*srz*crx*(ty - tz*v1);
        //对pitch的偏导数
        ipr2.y = u0*((tx - tz*u1)*(srz*sry - crz*srx*cry) - (ty - tz*v1)*(crz*sry + srx*srz*cry) 
               + crx*cry*(ty*u1 - tx*v1)) - (tx - tz*u1)*(srz*cry + crz*srx*sry) 
               + (ty - tz*v1)*(crz*cry - srx*srz*sry) + crx*sry*(ty*u1 - tx*v1);
        //对yaw的偏导数
        ipr2.z = -u0*((tx - tz*u1)*(cry*crz - srx*sry*srz) + (ty - tz*v1)*(cry*srz + srx*sry*crz)) 
               - (tx - tz*u1)*(sry*crz + cry*srx*srz) - (ty - tz*v1)*(sry*srz - cry*srx*crz) 
               - v0*(crx*crz*(ty - tz*v1) - crx*srz*(tx - tz*u1));
        //对tx的偏导数
        ipr2.h = cry*crz*srx - v0*(crx*crz - srx*v1) - u0*(cry*srz + crz*srx*sry + crx*sry*v1) 
               - sry*srz + crx*cry*v1;
        //对ty的偏导数
        ipr2.s = crz*sry - v0*(crx*srz + srx*u1) + u0*(cry*crz + crx*sry*u1 - srx*sry*srz) 
               - crx*cry*u1 + cry*srx*srz;
        //对tz的偏导数
        ipr2.v = u1*(sry*srz - cry*crz*srx) - v1*(crz*sry + cry*srx*srz) + u0*(u1*(cry*srz + crz*srx*sry) 
               - v1*(cry*crz - srx*sry*srz)) + v0*(crx*crz*u1 + crx*srz*v1);
        //y2是误差函数,大概是两组点在转换到同一坐标系下后,在z=10平面投影的点云的x+y距离
        double y2 = (ty - tz*v1)*(crz*sry + cry*srx*srz) - (tx - tz*u1)*(sry*srz - cry*crz*srx) 
                  - v0*(srx*(ty*u1 - tx*v1) + crx*crz*(tx - tz*u1) + crx*srz*(ty - tz*v1)) 
                  + u0*((ty - tz*v1)*(cry*crz - srx*sry*srz) - (tx - tz*u1)*(cry*srz + crz*srx*sry) 
                  + crx*sry*(ty*u1 - tx*v1)) - crx*cry*(ty*u1 - tx*v1);

        if (ptNumNoDepthRec < 50 || iterCount < 25 || fabs(y2) < 2 * meanValueWithDepthRec / 10000) {
          double scale = 100;
          ipr2.x *= scale;
          ipr2.y *= scale;
          ipr2.z *= scale;
          ipr2.h *= scale;
          ipr2.s *= scale;
          ipr2.v *= scale;
          y2 *= scale;

          ipRelations2->push_back(ipr2);
          ipy2.push_back(y2);

          ptNumNoDepth++;
        } else {
          ipRelations->points[i].v = -1;
        }
      } else if (fabs(ipr.v - 1) < 0.5 || fabs(ipr.v - 2) < 0.5) {

        double d0 = ipr.s;

        ipr3.x = d0*(cry*srz*crx + cry*u1*srx) - d0*u0*(sry*srz*crx + sry*u1*srx) 
               - d0*v0*(u1*crx - srz*srx);

        ipr3.y = d0*(crz*cry + crx*u1*sry - srx*srz*sry) - d0*u0*(crz*sry - crx*u1*cry + srx*srz*cry);

        ipr3.z = -d0*(sry*srz - cry*srx*crz) - d0*u0*(cry*srz + srx*sry*crz) - crx*d0*v0*crz;

        ipr3.h = 1;

        ipr3.s = 0;

        ipr3.v = -u1;

        double y3 = tx - tz*u1 + d0*(crz*sry - crx*cry*u1 + cry*srx*srz) - d0*v0*(crx*srz + srx*u1) 
                  + d0*u0*(cry*crz + crx*sry*u1 - srx*sry*srz);

        ipr4.x = d0*(cry*v1*srx - cry*crz*crx) + d0*u0*(crz*sry*crx - sry*v1*srx) 
               - d0*v0*(crz*srx + v1*crx);

        ipr4.y = d0*(srz*cry + crz*srx*sry + crx*v1*sry) + d0*u0*(crz*srx*cry - srz*sry + crx*v1*cry);

        ipr4.z = d0*(sry*crz + cry*srx*srz) + d0*u0*(cry*crz - srx*sry*srz) - crx*d0*v0*srz;

        ipr4.h = 0;

        ipr4.s = 1;

        ipr4.v = -v1;

        double y4 = ty - tz*v1 - d0*(cry*crz*srx - sry*srz + crx*cry*v1) + d0*v0*(crx*crz - srx*v1) 
                  + d0*u0*(cry*srz + crz*srx*sry + crx*sry*v1);

        if (ptNumWithDepthRec < 50 || iterCount < 25 || 
            sqrt(y3 * y3 + y4 * y4) < 2 * meanValueWithDepthRec) {
          ipRelations2->push_back(ipr3);
          ipy2.push_back(y3);

          ipRelations2->push_back(ipr4);
          ipy2.push_back(y4);

          ptNumWithDepth++;
          meanValueWithDepth += sqrt(y3 * y3 + y4 * y4);
        } else {
          ipRelations->points[i].v = -1;
        }
      }
    }
    meanValueWithDepth /= (ptNumWithDepth + 0.01);
    ptNumNoDepthRec = ptNumNoDepth;
    ptNumWithDepthRec = ptNumWithDepth;
    meanValueWithDepthRec = meanValueWithDepth;

    int ipRelations2Num = ipRelations2->points.size();
    if (ipRelations2Num > 10) {
      cv::Mat matA(ipRelations2Num, 6, CV_32F, cv::Scalar::all(0));
      cv::Mat matAt(6, ipRelations2Num, CV_32F, cv::Scalar::all(0));
      cv::Mat matAtA(6, 6, CV_32F, cv::Scalar::all(0));
      cv::Mat matB(ipRelations2Num, 1, CV_32F, cv::Scalar::all(0));
      cv::Mat matAtB(6, 1, CV_32F, cv::Scalar::all(0));
      cv::Mat matX(6, 1, CV_32F, cv::Scalar::all(0));
      //解最优化问题的过程
      for (int i = 0; i < ipRelations2Num; i++) {
        ipr2 = ipRelations2->points[i];
        //误差的六维雅可比矩阵
        matA.at(i, 0) = ipr2.x;
        matA.at(i, 1) = ipr2.y;
        matA.at(i, 2) = ipr2.z;
        matA.at(i, 3) = ipr2.h;
        matA.at(i, 4) = ipr2.s;
        matA.at(i, 5) = ipr2.v;
        matB.at(i, 0) = -0.2 * ipy2[i];//右侧的误差值,乘-0.2的意思是限制步长的意思吗?
      }
      cv::transpose(matA, matAt);
      matAtA = matAt * matA;//高斯牛顿法的JTJ近似H矩阵
      matAtB = matAt * matB;//H*x=b的形式,用qr分解求解线性的超定方程
      cv::solve(matAtA, matAtB, matX, cv::DECOMP_QR);

      //if (fabs(matX.at(0, 0)) < 0.1 && fabs(matX.at(1, 0)) < 0.1 && 
      //    fabs(matX.at(2, 0)) < 0.1) {
        transform[0] += matX.at(0, 0);
        transform[1] += matX.at(1, 0);
        transform[2] += matX.at(2, 0);
        transform[3] += matX.at(3, 0);
        transform[4] += matX.at(4, 0);
        transform[5] += matX.at(5, 0);//迭代下一次的初值
      //}

      float deltaR = sqrt(matX.at(0, 0) * 180 / PI * matX.at(0, 0) * 180 / PI
                   + matX.at(1, 0) * 180 / PI * matX.at(1, 0) * 180 / PI
                   + matX.at(2, 0) * 180 / PI * matX.at(2, 0) * 180 / PI);
      float deltaT = sqrt(matX.at(3, 0) * 100 * matX.at(3, 0) * 100
                   + matX.at(4, 0) * 100 * matX.at(4, 0) * 100
                   + matX.at(5, 0) * 100 * matX.at(5, 0) * 100);
      //这里的RT是上一帧到本帧的变换
      if (deltaR < 0.00001 && deltaT < 0.00001) {
        break;
      }

      //ROS_INFO ("iter: %d, deltaR: %f, deltaT: %f", iterCount, deltaR, deltaT);
    }
  }

  if (!imuInited) {
    imuYawInit = imuYawCur;
    transform[0] -= imuPitchCur;
    transform[2] -= imuRollCur;

    imuInited = true;
  }
  //收敛后得到上一帧到本帧的delta RT,然后将上一帧的旋转矩阵乘上delta R得到本帧的旋转矩阵,再分解得到r、p、y,这个函数称为accumulateRotation即累计上增加的旋转,得到本帧在世界坐标系下的旋转
  double rx, ry, rz;
  accumulateRotation(transformSum[0], transformSum[1], transformSum[2], 
                    -transform[0], -transform[1], -transform[2], rx, ry, rz);
    
  if (imuPointerLast >= 0) {
    double drx, dry, drz;
    diffRotation(imuPitchCur, imuYawCur - imuYawInit, imuRollCur, rx, ry, rz, drx, dry, drz);//接着是处理imu数据,将imu的rpy构造的旋转矩阵与刚才优化得到的旋转矩阵再次融合,虽然做法有点粗暴,互补滤波?

    transform[0] -= 0.1 * drx;
    /*if (dry > PI) {
      transform[1] -= 0.1 * (dry - 2 * PI);
    } else if (imuYawCur - imuYawInit - ry < -PI) {
      transform[1] -= 0.1 * (dry + 2 * PI);
    } else {
      transform[1] -= 0.1 * dry;
    }*/
    transform[2] -= 0.1 * drz;

    accumulateRotation(transformSum[0], transformSum[1], transformSum[2], 
                      -transform[0], -transform[1], -transform[2], rx, ry, rz);
  }
  //准备里程计数据
  double x1 = cos(rz) * transform[3] - sin(rz) * transform[4];
  double y1 = sin(rz) * transform[3] + cos(rz) * transform[4];
  double z1 = transform[5];

  double x2 = x1;
  double y2 = cos(rx) * y1 - sin(rx) * z1;
  double z2 = sin(rx) * y1 + cos(rx) * z1;

  double tx = transformSum[3] - (cos(ry) * x2 + sin(ry) * z2);
  double ty = transformSum[4] - y2;
  double tz = transformSum[5] - (-sin(ry) * x2 + cos(ry) * z2);

  transformSum[0] = rx;
  transformSum[1] = ry;
  transformSum[2] = rz;
  transformSum[3] = tx;
  transformSum[4] = ty;
  transformSum[5] = tz;

  pcl::PointXYZHSV spc;
  spc.x = transformSum[0];
  spc.y = transformSum[1];
  spc.z = transformSum[2];
  spc.h = transformSum[3];
  spc.s = transformSum[4];
  spc.v = transformSum[5];

  double crx = cos(transform[0]);
  double srx = sin(transform[0]);
  double cry = cos(transform[1]);
  double sry = sin(transform[1]);

  j = 0;
  for (int i = 0; i < imagePointsCurNum; i++) {
    bool ipFound = false;
    for (; j < imagePointsLastNum; j++) {
      if (imagePointsLast->points[j].ind == imagePointsCur->points[i].ind) {
        ipFound = true;
      }
      if (imagePointsLast->points[j].ind >= imagePointsCur->points[i].ind) {
        break;
      }
    }
    //对最近帧中能被连续观测到的特征点进行保留,并在下一帧的匹配时充当三角测量的第一个点
    if (ipFound) {
      startPointsCur->push_back(startPointsLast->points[j]);
      startTransCur->push_back(startTransLast->points[j]);

      if ((*ipDepthLast)[j] > 0) {
        double ipz = (*ipDepthLast)[j];
        double ipx = imagePointsLast->points[j].u * ipz;
        double ipy = imagePointsLast->points[j].v * ipz;

        x1 = cry * ipx + sry * ipz;
        y1 = ipy;
        z1 = -sry * ipx + cry * ipz;

        x2 = x1;
        y2 = crx * y1 - srx * z1;
        z2 = srx * y1 + crx * z1;

        ipDepthCur->push_back(z2 + transform[5]);
      } else {
        ipDepthCur->push_back(-1);
      }
    } else {
      startPointsCur->push_back(imagePointsCur->points[i]);
      startTransCur->push_back(spc);
      ipDepthCur->push_back(-1);
    }
  }
  startPointsLast->clear();
  startTransLast->clear();
  ipDepthLast->clear();

  angleSum[0] -= transform[0];
  angleSum[1] -= transform[1];
  angleSum[2] -= transform[2];

  geometry_msgs::Quaternion geoQuat = tf::createQuaternionMsgFromRollPitchYaw(rz, -rx, -ry);

  nav_msgs::Odometry voData;
  voData.header.frame_id = "/camera_init";
  voData.child_frame_id = "/camera";
  voData.header.stamp = imagePoints2->header.stamp;
  voData.pose.pose.orientation.x = -geoQuat.y;
  voData.pose.pose.orientation.y = -geoQuat.z;
  voData.pose.pose.orientation.z = geoQuat.x;
  voData.pose.pose.orientation.w = geoQuat.w;
  voData.pose.pose.position.x = tx;
  voData.pose.pose.position.y = ty;
  voData.pose.pose.position.z = tz;
  voData.twist.twist.angular.x = angleSum[0];
  voData.twist.twist.angular.y = angleSum[1];
  voData.twist.twist.angular.z = angleSum[2];
  voDataPubPointer->publish(voData);

  tf::StampedTransform voTrans;
  voTrans.frame_id_ = "/camera_init";
  voTrans.child_frame_id_ = "/camera";
  voTrans.stamp_ = imagePoints2->header.stamp;
  voTrans.setRotation(tf::Quaternion(-geoQuat.y, -geoQuat.z, geoQuat.x, geoQuat.w));
  voTrans.setOrigin(tf::Vector3(tx, ty, tz));
  tfBroadcasterPointer->sendTransform(voTrans);

  pcl::PointCloud::Ptr depthPointsTemp = depthPointsLast;
  depthPointsLast = depthPointsCur;
  depthPointsCur = depthPointsTemp;

  DepthPoint ipd;
  depthPointsCur->clear();

  ipd.u = transformSum[0];
  ipd.v = transformSum[1];
  ipd.depth = transformSum[2];
  ipd.ind = -2;
  depthPointsCur->push_back(ipd);

  ipd.u = transformSum[3];
  ipd.v = transformSum[4];
  ipd.depth = transformSum[5];
  ipd.ind = -1;
  depthPointsCur->push_back(ipd);

  depthPointsLastNum = depthPointsCurNum;
  depthPointsCurNum = 2;

  j = 0;
  pcl::PointXYZ ipp;
  depthPointsSend->clear();
  imagePointsProj->clear();
  for (int i = 0; i < ipRelationsNum; i++) {//为下一轮的匹配预留id与深度
    if (fabs(ipRelations->points[i].v - 1) < 0.5 || fabs(ipRelations->points[i].v - 2) < 0.5) {

      ipd.u = ipRelations->points[i].z;
      ipd.v = ipRelations->points[i].h;
      ipd.depth = ipRelations->points[i].s + transform[5];
      ipd.label = ipRelations->points[i].v;
      ipd.ind = ipInd[i];

      depthPointsCur->push_back(ipd);
      depthPointsCurNum++;

      for (; j < depthPointsLastNum; j++) {
        if (depthPointsLast->points[j].ind < ipInd[i]) {
          depthPointsSend->push_back(depthPointsLast->points[j]);
        } else if (depthPointsLast->points[j].ind > ipInd[i]) {
          break;
        }
      }

      ipd.u = ipRelations->points[i].x;
      ipd.v = ipRelations->points[i].y;
      ipd.depth = ipRelations->points[i].s;

      depthPointsSend->push_back(ipd);

      ipp.x = ipRelations->points[i].x * ipRelations->points[i].s;
      ipp.y = ipRelations->points[i].y * ipRelations->points[i].s;
      ipp.z = ipRelations->points[i].s;

      imagePointsProj->push_back(ipp);
    }
  }

  sensor_msgs::PointCloud2 depthPoints2;
  pcl::toROSMsg(*depthPointsSend, depthPoints2);
  depthPoints2.header.frame_id = "camera2";
  depthPoints2.header.stamp = ros::Time().fromSec(imagePointsLastTime);
  depthPointsPubPointer->publish(depthPoints2);

  sensor_msgs::PointCloud2 imagePointsProj2;
  pcl::toROSMsg(*imagePointsProj, imagePointsProj2);
  imagePointsProj2.header.frame_id = "camera2";
  imagePointsProj2.header.stamp = ros::Time().fromSec(imagePointsLastTime);
  imagePointsProjPubPointer->publish(imagePointsProj2);
}

从本节点的过程上看,视觉里程计的实现类似于loam系列的scan to scan的位姿估计,得到的是粗略估计的里程计信息,这一步的位姿相当粗糙,只在很短的距离内有效,对imu的数据利用也不能称得上一般说的vio。当然我们还有后手,接下来的一步是精配准,在lego-loam中是当前帧以粗略的位姿转换到相邻的地图下再进行scan to map的估计,而在v-loam中是大家喜闻乐见的光束平差法,经历ba优化后的里程计将会精准许多,我们下一篇再仔细学习。

你可能感兴趣的:(SLAM算法阅读)