张正友相机标定,opencv中calibrate函数内部具体实现过程附具体代码(三)calibrate第二步-----利用ceres第一次优化

张正友相机标定,opencv中calibrate函数内部具体实现过程附具体代码(三)calibrate第二步-----利用ceres第一次优化

  • 优化内外参数步骤
  • 构造结构体
  • main函数中添加代码
  • CreateSolveMaterial函数

优化内外参数步骤

由于目前没有求出畸变系数,我们假设畸变不存在,进行优化内外参数,有的读者会问道,为什么现在要进行优化,因为我们根据二维点和对应三维点求得的内外参数存在误差,大家可以根据求出的内外参数将三维点投影回去,并在棋盘格图像上绘制出来,可以看到重新投影的点在角点周围,并不是完全重合,此时我们需要优化参数,减小误差。在张正友的论文中,他也是在求出内外参数后进行一次优化,求出畸变系数后又进行一次优化。
优化实现代码可参考ceres solver官方教程中的Bundle Adjustment。
张正友相机标定,opencv中calibrate函数内部具体实现过程附具体代码(三)calibrate第二步-----利用ceres第一次优化_第1张图片

构造结构体

//--------------------残差构造函数——No.1----------------------//
struct SnavelyReprojectionError {
  SnavelyReprojectionError(Point2d observed, Point3d point1 )
      : observed(observed), point1(point1) {}

  template <typename T>
  bool operator()(const T* const incamera,
                  const T* const outcameraR1,
                  const T* const outcameraR2,
                  const T* const outcameraT,
                  T* residuals) const {
    T p[3];
    T pointed[3];
    pointed[0]=T(point1.x);
    pointed[1]=T(point1.y);
    pointed[2]=T(point1.z);

    p[0]=pointed[0]*outcameraR1[0]+pointed[1]*outcameraR1[3]+pointed[2]*outcameraR2[0];
    p[1]=pointed[0]*outcameraR1[1]+pointed[1]*outcameraR1[4]+pointed[2]*outcameraR2[1];
    p[2]=pointed[0]*outcameraR1[2]+pointed[1]*outcameraR1[5]+pointed[2]*outcameraR2[2];

    p[0] += outcameraT[0];
    p[1] += outcameraT[1];
    p[2] += outcameraT[2];

    T xp =  p[0] / p[2];
    T yp =  p[1] / p[2];

    const T& fx = incamera[0];
    const T& fy = incamera[1];
    const T& cx = incamera[2];
    const T& cy = incamera[3];
    const T& ff = incamera[4];

    T predicted_x = fx * xp + ff * yp + cx;
    T predicted_y = fy * yp + cy;

    residuals[0] = predicted_x - T(observed.x);
    residuals[1] = predicted_y - T(observed.y);
    return true;
  }

   static ceres::CostFunction* Create(const Point2d observed,
                                      const Point3d point1) {
        return (new ceres::AutoDiffCostFunction<SnavelyReprojectionError, 2, 5, 6, 3, 3>(
                 new SnavelyReprojectionError(observed, point1)));
   }
  Point2d observed;
  Point3d point1;
};

main函数中添加代码

int main(){
    CreateSolveMaterial(uv,object_points,uuvv, object_pointses, incamera, outcameraR1, outcameraR2,
                        outcameraT, cameraMatrix, tvecsMat1, rvecsMat, image_count, CornerNum);//为优化准备素材

    ceres::Problem problem;
    ceres::LossFunction* loss_function = new ceres::HuberLoss(4);

    for (int i = 0; i < image_count; ++i )
    {
        for (int j = 0; j < CornerNum; ++j) {
        ceres::CostFunction* cost_function =
        SnavelyReprojectionError::Create(uuvv[i][j],
                                         object_pointses[i][j]);
        problem.AddResidualBlock(cost_function,
                                 loss_function, 
                                 incamera,
                                 outcameraR1[i].ptr<double>(0),
                                 outcameraR2[i].ptr<double>(0),
                                 outcameraT[i].ptr<double>(0));
        }
        ceres::Solver::Options options;
        options.max_num_iterations=300;
        options.linear_solver_type = ceres::DENSE_SCHUR;
        options.minimizer_progress_to_stdout = true;
        ceres::Solver::Summary summary;
        ceres::Solve(options, &problem, &summary);
        std::cout << summary.BriefReport() << "\n";
    }
}

CreateSolveMaterial函数

这个代码有个重大错误,外参是矩阵直接传入的,其实不能把R的整个矩阵传进去优化,要先用罗德里格斯公式转化成旋转向量,在传入优化,优化后再转化成旋转矩阵,这样做保证优化后的结果符合旋转矩阵的性质!!!!!
优化这部分需要仔细搞懂参数是代表什么含义的,推荐先搞懂官方文档上的代码,就可以按照自己的想法随便改了。
重要的两个地方:
1.SnavelyReprojectionError::Create()中传的是不变量,即二维点三维点。
2. problem.AddResidualBlock()后边四个参数是需要优化的量。


void CreateSolveMaterial(vector<vector<Point2f> >& uv, vector<vector<Point3f> >& object_points,
                         vector<vector<Point2d> >& uuvv, vector<vector<Point3d> >& object_pointses,
                         double incamera[5], vector<Mat>& outcameraR1, vector<Mat>& outcameraR2,
                         vector<Mat>& outcameraT, Mat& cameraMatrix, vector<Mat>& tvecsMat1,
                         vector<Mat>& rvecsMat, int& image_count, int& CornerNum)
{
    for(int a=0; a<image_count; a++)
    {
        vector<Point2d> uuu;
        for(int b=0;b<CornerNum;b++)
        {
            double point_temp1;
            double point_temp2;
            Point2d point_temp;
            point_temp1=uv[a][b].x;
            point_temp2=uv[a][b].y;
            point_temp.x=point_temp1;
            point_temp.y=point_temp2;
            uuu.push_back(point_temp);
        }
        uuvv.push_back(uuu);
    }

    for(int b=0; b<image_count; b++)
    {
        vector<Point3d> obj;
        for(int a=0; a<CornerNum; a++)
        {
        double point_temp1;
        double point_temp2;
        double point_temp3;
        Point3d point_temp;
        point_temp1=object_points[b][a].x;
        point_temp2=object_points[b][a].y;
        point_temp3=object_points[b][a].z;
        point_temp.x=point_temp1;
        point_temp.y=point_temp2;
        point_temp.z=point_temp3;
        obj.push_back(point_temp);
        }
        object_pointses.push_back(obj);
    }

    double camera1,camera2,camera3,camera4,cameraf;
    camera1=cameraMatrix.at<float>(0,0);
    camera2=cameraMatrix.at<float>(1,1);
    camera3=cameraMatrix.at<float>(0,2);
    camera4=cameraMatrix.at<float>(1,2);
    cameraf=cameraMatrix.at<float>(0,1);
    incamera[0]=camera1;
    incamera[1]=camera2;
    incamera[2]=camera3;
    incamera[3]=camera4;
    incamera[4]=cameraf;

    for(int a=0;a<image_count;a++)
    {
        double camera_1,camera_2,camera_3,camera_4,camera_5,camera_6,camera_7,camera_8,camera_9;
        double camera_01,camera_02,camera_03;
        camera_1=tvecsMat1[a].at<float>(0,0);
        camera_2=tvecsMat1[a].at<float>(1,0);
        camera_3=tvecsMat1[a].at<float>(2,0);
        camera_4=tvecsMat1[a].at<float>(0,1);
        camera_5=tvecsMat1[a].at<float>(1,1);
        camera_6=tvecsMat1[a].at<float>(2,1);
        camera_7=tvecsMat1[a].at<float>(0,2);
        camera_8=tvecsMat1[a].at<float>(1,2);
        camera_9=tvecsMat1[a].at<float>(2,2);
        camera_01=rvecsMat[a].at<float>(0,0);
        camera_02=rvecsMat[a].at<float>(1,0);
        camera_03=rvecsMat[a].at<float>(2,0);
        Mat outcameraR_p_1(Matx16d(camera_1,camera_2,camera_3,camera_4,camera_5,camera_6));
        Mat outcameraR_p_2(Matx13d(camera_7,camera_8,camera_9));
        Mat outcameraT_p(Matx13d(camera_01,camera_02,camera_03));
        outcameraR1.push_back(outcameraR_p_1);
        outcameraR2.push_back(outcameraR_p_2);
        outcameraT.push_back(outcameraT_p);
    }
}

转载请注明出处。

你可能感兴趣的:(学习笔记)