Open CV——相机校准和立体匹配

函数有变化时,一定要记得去示例里查看相关示例

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include 
#include 

using namespace cv;
using namespace std;

enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
int main()
{
    /************************************************************************
    从摄像机中读取多幅图像,从中提取出角点,然后对角点进行亚像素精确化
    *************************************************************************/
    int image_count = 10;                    /****    图像数量     ****/
    Mat frame;
    Size image_size;                         /****     图像的尺寸      ****/
    Size board_size = Size(9, 6);            /****    定标板上每行、列的角点数       ****/
    vector corners;                  /****    缓存每幅图像上检测到的角点       ****/
    vector<vector>  corners_Seq;    /****  保存检测到的所有角点       ****/
    ofstream fout("calibration_result.txt");  /**    保存定标结果的文件     **/
    int mode = DETECTION;

    VideoCapture cap(0);
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
    if (!cap.isOpened()) {
        std::cout << "打开摄像头失败,退出";
        exit(-1);
    }
    namedWindow("Calibration");
    std::cout << "Press 'g' to start capturing images!" << endl;

    int count = 0, n = 0;
    stringstream tempname;
    string filename;
    int key;
    string msg;
    int baseLine;
    Size textSize;
    while (n < image_count)
    {
        frame.setTo(0);
        cap >> frame;
        if (mode == DETECTION)
        {
            key = 0xff & waitKey(30);
            if ((key & 255) == 27)
                break;

            if (cap.isOpened() && key == 'g')
            {
                mode = CAPTURING;
            }
        }

        if (mode == CAPTURING)
        {
            key = 0xff & waitKey(30);
            if ((key & 255) == 32)
            {
                image_size = frame.size();
                /* 提取角点 */
                Mat imageGray;
                cvtColor(frame, imageGray, CV_RGB2GRAY);
                bool patternfound = findChessboardCorners(frame, board_size, corners, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK);
                if (patternfound)
                {
                    n++;
                    tempname << n;
                    tempname >> filename;
                    filename += ".jpg";
                    /* 亚像素精确化 */
                    cornerSubPix(imageGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
                    count += corners.size();
                    corners_Seq.push_back(corners);
                    imwrite(filename, frame);
                    tempname.clear();
                    filename.clear();
                }
                else
                {
                    std::cout << "Detect Failed.\n";
                }
            }
        }
        msg = mode == CAPTURING ? "100/100/s" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
        baseLine = 0;
        textSize = getTextSize(msg, 1, 1, 1, &baseLine);
        Point textOrigin(frame.cols - 2 * textSize.width - 10, frame.rows - 2 * baseLine - 10);

        if (mode == CAPTURING)
        {
            msg = format("%d/%d", n, image_count);
        }

        putText(frame, msg, textOrigin, 1, 1, mode != CALIBRATED ? Scalar(0, 0, 255) : Scalar(0, 255, 0));

        imshow("Calibration", frame);
        key = 0xff & waitKey(1);
        if ((key & 255) == 27)
            break;
    }

    std::cout << "角点提取完成!\n";

    /************************************************************************
    摄像机定标
    *************************************************************************/
    std::cout << "开始定标………………" << endl;
    Size square_size = Size(25, 25);                                      /**** 实际测量得到的定标板上每个棋盘格的大小   ****/
    vector<vector>  object_Points;                                      /****  保存定标板上角点的三维坐标   ****/

    Mat image_points = Mat(1, count, CV_32FC2, Scalar::all(0));          /*****   保存提取的所有角点   *****/
    vector<int>  point_counts;                                          /*****    每幅图像中角点的数量    ****/
    Mat intrinsic_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0));                /*****    摄像机内参数矩阵    ****/
    Mat distortion_coeffs = Mat(1, 5, CV_32FC1, Scalar::all(0));            /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
    vector rotation_vectors;                                      /* 每幅图像的旋转向量 */
    vector translation_vectors;                                  /* 每幅图像的平移向量 */

                                                                      /* 初始化定标板上角点的三维坐标 */
    for (int t = 0; tvector tempPointSet;
        for (int i = 0; ifor (int j = 0; j/* 假设定标板放在世界坐标系中z=0的平面上 */
                Point3f tempPoint;
                tempPoint.x = i*square_size.width;
                tempPoint.y = j*square_size.height;
                tempPoint.z = 0;
                tempPointSet.push_back(tempPoint);
            }
        }
        object_Points.push_back(tempPointSet);
    }

    /* 初始化每幅图像中的角点数,这里我们假设每幅图像中都可以看到完整的定标板 */
    for (int i = 0; i< image_count; i++)
    {
        point_counts.push_back(board_size.width*board_size.height);
    }

    /* 开始定标 */
    calibrateCamera(object_Points, corners_Seq, image_size, intrinsic_matrix, distortion_coeffs, rotation_vectors, translation_vectors);
    std::cout << "定标完成!\n";

    /************************************************************************
    对定标结果进行评价
    *************************************************************************/
    std::cout << "开始评价定标结果………………" << endl;
    double total_err = 0.0;                   /* 所有图像的平均误差的总和 */
    double err = 0.0;                        /* 每幅图像的平均误差 */
    vector  image_points2;             /****   保存重新计算得到的投影点    ****/

    std::cout << "每幅图像的定标误差:" << endl;
    fout << "每幅图像的定标误差:" << endl << endl;
    for (int i = 0; ivector tempPointSet = object_Points[i];
        /****    通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点     ****/
        projectPoints(tempPointSet, rotation_vectors[i], translation_vectors[i], intrinsic_matrix, distortion_coeffs, image_points2);
        /* 计算新的投影点和旧的投影点之间的误差*/
        vector tempImagePoint = corners_Seq[i];
        Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
        Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);
        for (int j = 0; j < tempImagePoint.size(); j++)
        {
            image_points2Mat.at(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);
            tempImagePointMat.at(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
        }
        err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
        total_err += err /= point_counts[i];
        std::cout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
        fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
    }
    std::cout << "总体平均误差:" << total_err / image_count << "像素" << endl;
    fout << "总体平均误差:" << total_err / image_count << "像素" << endl << endl;
    std::cout << "评价完成!" << endl;

    /************************************************************************
    保存定标结果
    *************************************************************************/
    std::cout << "开始保存定标结果………………" << endl;
    Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */

    fout << "相机内参数矩阵:" << endl;
    fout << intrinsic_matrix << endl << endl;
    fout << "畸变系数:\n";
    fout << distortion_coeffs << endl << endl << endl;
    for (int i = 0; i"第" << i + 1 << "幅图像的旋转向量:" << endl;
        fout << rotation_vectors[i] << endl;

        /* 将旋转向量转换为相对应的旋转矩阵 */
        Rodrigues(rotation_vectors[i], rotation_matrix);
        fout << "第" << i + 1 << "幅图像的旋转矩阵:" << endl;
        fout << rotation_matrix << endl;
        fout << "第" << i + 1 << "幅图像的平移向量:" << endl;
        fout << translation_vectors[i] << endl << endl;
    }
    std::cout << "完成保存" << endl;
    fout << endl;

    /************************************************************************
    显示定标结果
    *************************************************************************/
    //  Mat mapx = Mat(image_size,CV_32FC1);
    //  Mat mapy = Mat(image_size,CV_32FC1);
    //  Mat R = Mat::eye(3,3,CV_32F);
    //  std::cout<<"保存矫正图像"<
    //  string imageFileName;
    //  std::stringstream StrStm;
    //  for (int i = 0 ; i != image_count ; i++)
    //  {
    //      std::cout<<"Frame #"<
    //      Mat newCameraMatrix = Mat(3,3,CV_32FC1,Scalar::all(0));
    //      initUndistortRectifyMap(intrinsic_matrix,distortion_coeffs,R,intrinsic_matrix,image_size,CV_32FC1,mapx,mapy);
    //      StrStm.clear();
    //      imageFileName.clear();
    //      StrStm<
    //      StrStm>>imageFileName;
    //      imageFileName += ".jpg";
    //      Mat t = imread(imageFileName);
    //      Mat newimage = t.clone();
    //      cv::remap(t,newimage,mapx, mapy, INTER_LINEAR);
    //      StrStm.clear();
    //      imageFileName.clear();
    //      StrStm<
    //      StrStm>>imageFileName;
    //      imageFileName += "_d.jpg";
    //      imwrite(imageFileName,newimage);
    //  }
    //  std::cout<<"保存结束"<
    return 0;
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include 
#include 

using namespace cv;
using namespace std;

enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
int main()
{ 
    /************************************************************************  
           从摄像机中读取多幅图像,从中提取出角点,然后对角点进行亚像素精确化 
    *************************************************************************/ 
    int image_count=  10;                    /****    图像数量     ****/  
    Mat frame;
    Size image_size;                         /****     图像的尺寸      ****/   
    Size board_size = Size(9,6);            /****    定标板上每行、列的角点数       ****/  
    vector corners;                  /****    缓存每幅图像上检测到的角点       ****/
    vector<vector>  corners_Seq;    /****  保存检测到的所有角点       ****/   
    ofstream fout("calibration_result.txt");  /**    保存定标结果的文件     **/
    int mode = DETECTION;

    VideoCapture cap(1);
    cap.set(CV_CAP_PROP_FRAME_WIDTH,640);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT,480);
    if(!cap.isOpened()){
        std::cout<<"打开摄像头失败,退出";
        exit(-1);
    }
    namedWindow("Calibration");
    std::cout<<"Press 'g' to start capturing images!"<int count = 0,n=0;
    stringstream tempname;
    string filename;
    int key;
    string msg;
    int baseLine;
    Size textSize;
    while(n < image_count )
    {
        frame.setTo(0);
        cap>>frame;
        if(mode == DETECTION)
        {
            key = 0xff & waitKey(30);
            if( (key & 255) == 27 )
                break;

            if( cap.isOpened() && key == 'g' )
            {
                mode = CAPTURING;
            }
        }

        if(mode == CAPTURING)
        {
            key = 0xff & waitKey(30);
            if( (key & 255) == 32 )
            {
                image_size = frame.size();
                /* 提取角点 */   
                Mat imageGray;
                cvtColor(frame, imageGray , CV_RGB2GRAY);
                bool patternfound = findChessboardCorners(frame, board_size, corners,CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK );
                if (patternfound)   
                {    
                    n++;
                    tempname<>filename;
                    filename+=".jpg";
                    /* 亚像素精确化 */
                    cornerSubPix(imageGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
                    count += corners.size();
                    corners_Seq.push_back(corners);
                    imwrite(filename,frame);
                    tempname.clear();
                    filename.clear();
                }
                else
                {
                    std::cout<<"Detect Failed.\n";
                }
            }           
        }
        msg = mode == CAPTURING ? "100/100/s" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
        baseLine = 0;
        textSize = getTextSize(msg, 1, 1, 1, &baseLine);
        Point textOrigin(frame.cols - 2*textSize.width - 10, frame.rows - 2*baseLine - 10);

        if( mode == CAPTURING )
        {
            msg = format( "%d/%d",n,image_count);
        }

        putText( frame, msg, textOrigin, 1, 1,mode != CALIBRATED ? Scalar(0,0,255) : Scalar(0,255,0));

        imshow("Calibration",frame);
        key = 0xff & waitKey(1);
        if( (key & 255) == 27 )
            break;
    }   

    std::cout<<"角点提取完成!\n"; 

    /************************************************************************  
           摄像机定标  
    *************************************************************************/   
    std::cout<<"开始定标………………"<25,25);                                      /**** 实际测量得到的定标板上每个棋盘格的大小   ****/  
    vector<vector>  object_Points;                                      /****  保存定标板上角点的三维坐标   ****/

    Mat image_points = Mat(1, count , CV_32FC2, Scalar::all(0));          /*****   保存提取的所有角点   *****/   
    vector<int>  point_counts;                                          /*****    每幅图像中角点的数量    ****/   
    Mat intrinsic_matrix = Mat(3,3, CV_32FC1, Scalar::all(0));                /*****    摄像机内参数矩阵    ****/   
    Mat distortion_coeffs = Mat(1,5, CV_32FC1, Scalar::all(0));            /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */ 
    vector rotation_vectors;                                      /* 每幅图像的旋转向量 */  
    vector translation_vectors;                                  /* 每幅图像的平移向量 */  

    /* 初始化定标板上角点的三维坐标 */     
    for (int t=0;tvector tempPointSet;
        for (int i=0;ifor (int j=0;j/* 假设定标板放在世界坐标系中z=0的平面上 */   
                Point3f tempPoint;
                tempPoint.x = i*square_size.width;
                tempPoint.y = j*square_size.height;
                tempPoint.z = 0;
                tempPointSet.push_back(tempPoint);
            }   
        }
        object_Points.push_back(tempPointSet);
    }   

    /* 初始化每幅图像中的角点数,这里我们假设每幅图像中都可以看到完整的定标板 */   
    for (int i=0; i< image_count; i++)   
    {
        point_counts.push_back(board_size.width*board_size.height);   
    }

    /* 开始定标 */   
    calibrateCamera(object_Points, corners_Seq, image_size,  intrinsic_matrix  ,distortion_coeffs, rotation_vectors, translation_vectors);   
    std::cout<<"定标完成!\n";   

    /************************************************************************  
           对定标结果进行评价  
    *************************************************************************/   
    std::cout<<"开始评价定标结果………………"<double total_err = 0.0;                   /* 所有图像的平均误差的总和 */   
    double err = 0.0;                        /* 每幅图像的平均误差 */   
    vector  image_points2;             /****   保存重新计算得到的投影点    ****/   

    std::cout<<"每幅图像的定标误差:"<"每幅图像的定标误差:"<for (int i=0;  ivector tempPointSet = object_Points[i];
        /****    通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点     ****/
        projectPoints(tempPointSet, rotation_vectors[i], translation_vectors[i], intrinsic_matrix, distortion_coeffs, image_points2);
        /* 计算新的投影点和旧的投影点之间的误差*/  
        vector tempImagePoint = corners_Seq[i];
        Mat tempImagePointMat = Mat(1,tempImagePoint.size(),CV_32FC2);
        Mat image_points2Mat = Mat(1,image_points2.size(), CV_32FC2);
        for (int j = 0 ; j < tempImagePoint.size(); j++)
        {
            image_points2Mat.at(0,j) = Vec2f(image_points2[j].x, image_points2[j].y);
            tempImagePointMat.at(0,j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
        }
        err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
        total_err += err/=  point_counts[i];   
        std::cout<<"第"<1<<"幅图像的平均误差:"<"像素"<"第"<1<<"幅图像的平均误差:"<"像素"<std::cout<<"总体平均误差:"<"像素"<"总体平均误差:"<"像素"<std::cout<<"评价完成!"</************************************************************************  
           保存定标结果  
    *************************************************************************/   
    std::cout<<"开始保存定标结果………………"<3,3,CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */   

    fout<<"相机内参数矩阵:"<"畸变系数:\n";   
    fout<for (int i=0; i"第"<1<<"幅图像的旋转向量:"</* 将旋转向量转换为相对应的旋转矩阵 */   
        Rodrigues(rotation_vectors[i],rotation_matrix);   
        fout<<"第"<1<<"幅图像的旋转矩阵:"<"第"<1<<"幅图像的平移向量:"<std::cout<<"完成保存"</************************************************************************  
           显示定标结果  
    *************************************************************************/
//  Mat mapx = Mat(image_size,CV_32FC1);
//  Mat mapy = Mat(image_size,CV_32FC1);
//  Mat R = Mat::eye(3,3,CV_32F);
//  std::cout<<"保存矫正图像"<
//  string imageFileName;
//  std::stringstream StrStm;
//  for (int i = 0 ; i != image_count ; i++)
//  {
//      std::cout<<"Frame #"<
//      Mat newCameraMatrix = Mat(3,3,CV_32FC1,Scalar::all(0));
//      initUndistortRectifyMap(intrinsic_matrix,distortion_coeffs,R,intrinsic_matrix,image_size,CV_32FC1,mapx,mapy);
//      StrStm.clear();
//      imageFileName.clear();
//      StrStm<
//      StrStm>>imageFileName;
//      imageFileName += ".jpg";
//      Mat t = imread(imageFileName);
//      Mat newimage = t.clone();
//      cv::remap(t,newimage,mapx, mapy, INTER_LINEAR);
//      StrStm.clear();
//      imageFileName.clear();
//      StrStm<
//      StrStm>>imageFileName;
//      imageFileName += "_d.jpg";
//      imwrite(imageFileName,newimage);
//  }
//  std::cout<<"保存结束"<
    return 0;
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241

由于Open CV3的stereocalibration变化了,重新修改以后才能正常运行。成功运行,但是效果很差。可能是参数设置有问题

#include 
#include 
#include 
#include 
#include 
#include 
//#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

using namespace cv;
using namespace std;

typedef unsigned int uint;

Size imgSize(1280, 720);
Size patSize(14, 12);           //每张棋盘寻找的角点个数是14*12个
const double patLen = 5.0f;    // unit: mm  标定板每个格的宽度(金属标定板)
double imgScale = 1.0;          //图像缩放的比例因子


                                //将要读取的图片路径存储在fileList中
vector<string> fileList;
void initFileList(string dir, int first, int last)
{
    fileList.clear();
    for (int cur = first; cur <= last; cur++) 
    {

        string str_file = dir + "/" + to_string(cur) + ".jpg";
        fileList.push_back(str_file);
    }
}


// 生成点云坐标后保存
static void saveXYZ(string filename, const Mat& mat)
{
    const double max_z = 1.0e4;
    ofstream fp(filename);
    if (!fp.is_open())
    {
        std::cout << "打开点云文件失败" << endl;
        fp.close();
        return;
    }
    //遍历写入
    for (int y = 0; y < mat.rows; y++)
    {
        for (int x = 0; x < mat.cols; x++)
        {
            Vec3f point = mat.at(y, x);   //三通道浮点型
            if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z)
                continue;
            fp << point[0] << " " << point[1] << " " << point[2] << endl;
        }
    }
    fp.close();
}


// 存储视差数据
//---opencv只支持xml、yml的数据文件读写功能,matlab无法读取,可以保存为txt再导入matlab
void saveDisp(const string filename, const Mat& mat)
{
    ofstream fp(filename, ios::out);
    fp << mat.rows << endl;
    fp << mat.cols << endl;
    for (int y = 0; y < mat.rows; y++)
    {
        for (int x = 0; x < mat.cols; x++)
        {
            double disp = mat.at<short>(y, x); // 这里视差矩阵是CV_16S 格式的,故用 short 类型读取
            fp << disp << endl;       // 若视差矩阵是 CV_32F 格式,则用 float 类型读取
        }
    }
    fp.close();
}

//视差图的伪彩色图显示
/*
使灰度图中亮度越高的像素点,在伪彩色图中对应的点越趋向于红色;亮度越低,则对应的伪彩色越趋向于蓝色;
总体上按照灰度值高低,由红渐变至蓝,中间色为绿色
*/
void F_Gray2Color(Mat gray_mat, Mat& color_mat)
{
    color_mat = Mat::zeros(gray_mat.size(), CV_8UC3);
    int rows = color_mat.rows, cols = color_mat.cols;

    Mat red = Mat(gray_mat.rows, gray_mat.cols, CV_8U);
    Mat green = Mat(gray_mat.rows, gray_mat.cols, CV_8U);
    Mat blue = Mat(gray_mat.rows, gray_mat.cols, CV_8U);
    Mat mask = Mat(gray_mat.rows, gray_mat.cols, CV_8U);

    subtract(gray_mat, Scalar(255), blue);         // blue(I) = 255 - gray(I)
    red = gray_mat.clone();                        // red(I) = gray(I)
    green = gray_mat.clone();                      // green(I) = gray(I),if gray(I) < 128

    compare(green, 128, mask, CMP_GE);             // green(I) = 255 - gray(I), if gray(I) >= 128
    subtract(green, Scalar(255), green, mask);
    convertScaleAbs(green, green, 2.0, 2.0);//dst(I)=abs(src(I)*scale + shift)

    vector vec;
    vec.push_back(red);
    vec.push_back(green);
    vec.push_back(blue);
    cv::merge(vec, color_mat);
}
//视差图和原始图像融合
Mat F_mergeImg(Mat img1, Mat disp8)
{
    Mat color_mat = Mat::zeros(img1.size(), CV_8UC3);

    Mat red = img1.clone();//左相机拍摄的矫正图像
    Mat green = disp8.clone();//视差图的灰度图
    Mat blue = Mat::zeros(img1.size(), CV_8UC1);

    vector vec;
    vec.push_back(red);
    vec.push_back(blue);
    vec.push_back(green);
    cv::merge(vec, color_mat);

    return color_mat;
}


// 双目立体标定
int stereoCalibrate(string intrinsic_filename = "intrinsics.yml", string extrinsic_filename = "extrinsics.yml")
{
    vector<int> idx;
    //左侧相机的角点坐标和右侧相机的角点坐标

    vector<vector> imagePoints[2];

    //vector> leftPtsList(fileList.size());
    //vector> rightPtsList(fileList.size());

    for (uint i = 0; i < fileList.size(); ++i)
    {
        vector leftPts, rightPts;      // 存储左右相机的角点位置
        Mat rawImg = imread(fileList[i]);       //原始图像
        if (rawImg.empty())
        {
            std::cout << "the Image is empty..." << fileList[i] << endl;
            continue;
        }
        //截取左右图片
        Rect leftRect(0, 0, imgSize.width, imgSize.height);
        Rect rightRect(imgSize.width, 0, imgSize.width, imgSize.height);

        Mat leftRawImg = rawImg(leftRect);       //切分得到的左原始图像
        Mat rightRawImg = rawImg(rightRect);     //切分得到的右原始图像

                                                 //imwrite("left.jpg", leftRawImg);
                                                 //imwrite("right.jpg", rightRawImg);
                                                 //std::cout<<"左侧图像:  宽度"<
                                                 //std::cout<<"右侧图像:  宽度"<

        Mat leftImg, rightImg, leftSimg, rightSimg, leftCimg, rightCimg, leftMask, rightMask;
        // BGT -> GRAY  
        if (leftRawImg.type() == CV_8UC3)
            cvtColor(leftRawImg, leftImg, CV_BGR2GRAY);  //转为灰度图
        else
            leftImg = leftRawImg.clone();
        if (rightRawImg.type() == CV_8UC3)
            cvtColor(rightRawImg, rightImg, CV_BGR2GRAY);
        else
            rightImg = rightRawImg.clone();

        imgSize = leftImg.size();

        //图像滤波预处理
        //---依次对图像进行平滑处理和滤波处理,使得白色棋盘格及边缘在图像上更突出
        resize(leftImg, leftMask, Size(200, 200));      //resize对原图像img重新调整大小生成mask图像大小为200*200
        resize(rightImg, rightMask, Size(200, 200));
        GaussianBlur(leftMask, leftMask, Size(13, 13), 7);
        GaussianBlur(rightMask, rightMask, Size(13, 13), 7);
        resize(leftMask, leftMask, imgSize);
        resize(rightMask, rightMask, imgSize);
        medianBlur(leftMask, leftMask, 9);    //中值滤波
        medianBlur(rightMask, rightMask, 9);
        //???
        for (int v = 0; v < imgSize.height; v++) {
            for (int u = 0; u < imgSize.width; u++) {
                int leftX = ((int)leftImg.at(v, u) - (int)leftMask.at(v, u)) * 2 + 128;
                int rightX = ((int)rightImg.at(v, u) - (int)rightMask.at(v, u)) * 2 + 128;
                leftImg.at(v, u) = max(min(leftX, 255), 0);
                rightImg.at(v, u) = max(min(rightX, 255), 0);
            }
        }
        //?
        //寻找角点, 图像缩放
        resize(leftImg, leftSimg, Size(), imgScale, imgScale);      //图像以0.5的比例缩放
        resize(rightImg, rightSimg, Size(), imgScale, imgScale);
        cvtColor(leftSimg, leftCimg, CV_GRAY2BGR);     //转为BGR图像,cimg和simg是800*600的图像
        cvtColor(rightSimg, rightCimg, CV_GRAY2BGR);


        //寻找棋盘角点
        bool leftFound = findChessboardCorners(leftCimg, patSize, leftPts, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        bool rightFound = findChessboardCorners(rightCimg, patSize, rightPts, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

        if (leftFound)
            cornerSubPix(leftSimg, leftPts, Size(11, 11), Size(-1, -1),
                TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 300, 0.01));//进行亚像素角点优化
        if (rightFound)
            cornerSubPix(rightSimg, rightPts, Size(11, 11), Size(-1, -1),
                TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 300, 0.01));   //亚像素

                                                                                //放大为原来的尺度
        for (uint j = 0; j < leftPts.size(); j++) //该幅图像共132个角点,坐标乘以2,还原角点位置
            leftPts[j] *= 1. / imgScale;
        for (uint j = 0; j < rightPts.size(); j++)
            rightPts[j] *= 1. / imgScale;

        //显示
        string leftWindowName = "Left Corner Pic", rightWindowName = "Right Corner Pic";

        Mat leftPtsTmp = Mat(leftPts) * imgScale;      //再次乘以 imgScale
        Mat rightPtsTmp = Mat(rightPts) * imgScale;

        drawChessboardCorners(leftCimg, patSize, leftPtsTmp, leftFound);      //绘制角点坐标并显示
        imshow(leftWindowName, leftCimg);
        imwrite("输出/DrawChessBoard/" + to_string(i) + "_left.jpg", leftCimg);
        waitKey(200);

        drawChessboardCorners(rightCimg, patSize, rightPtsTmp, rightFound);   //绘制角点坐标并显示
        imshow(rightWindowName, rightCimg);
        imwrite("输出/DrawChessBoard/" + to_string(i) + "_right.jpg", rightCimg);
        waitKey(200);

        cv::destroyAllWindows();

        //保存角点坐标
        if (leftFound && rightFound)
        {
            imagePoints[0].push_back(leftPts);
            imagePoints[1].push_back(rightPts);  //保存角点坐标
            std::cout << "图片 " << i << " 处理成功!" << endl;
            idx.push_back(i);
        }
    }
    cv::destroyAllWindows();
    imagePoints[0].resize(idx.size());
    imagePoints[1].resize(idx.size());
    std::cout << "成功标定的标定板个数为" << idx.size() << "  序号分别为: ";
    for (unsigned int i = 0; i < idx.size(); ++i)
        std::cout << idx[i] << "  ";

    //生成物点坐标
    //---物点坐标Z坐标为零,横纵坐标均从0开始,然后按照设定的patSize依次设定。每张棋盘格的物点坐标是相同的
    vector<vector> objPts(idx.size());  //idx.size代表成功检测的图像的个数
    for (int y = 0; y < patSize.height; y++) {
        for (int x = 0; x < patSize.width; x++) {
            objPts[0].push_back(Point3f((float)x, (float)y, 0) * patLen);
        }
    }
    for (uint i = 1; i < objPts.size(); i++) {
        objPts[i] = objPts[0];
    }

    //
    // 双目立体标定
    Mat cameraMatrix[2], distCoeffs[2];
    vector rvecs[2], tvecs[2];
    cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
    cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
    Mat R, T, E, F;

    cv::calibrateCamera(objPts, imagePoints[0], imgSize, cameraMatrix[0],
        distCoeffs[0], rvecs[0], tvecs[0], CV_CALIB_FIX_K3);

    cv::calibrateCamera(objPts, imagePoints[1], imgSize, cameraMatrix[1],
        distCoeffs[1], rvecs[1], tvecs[1], CV_CALIB_FIX_K3);

    std::cout << endl << "Left Camera Matrix: " << endl << cameraMatrix[0] << endl;
    std::cout << endl << "Right Camera Matrix:" << endl << cameraMatrix[1] << endl;
    std::cout << endl << "Left Camera DistCoeffs: " << endl << distCoeffs[0] << endl;
    std::cout << endl << "Right Camera DistCoeffs: " << endl << distCoeffs[1] << endl;

    double rms = stereoCalibrate(objPts, imagePoints[0], imagePoints[1],
        cameraMatrix[0], distCoeffs[0],
        cameraMatrix[1], distCoeffs[1],
        imgSize, R, T, E, F,
        CALIB_FIX_ASPECT_RATIO +
        CALIB_ZERO_TANGENT_DIST +
        CALIB_USE_INTRINSIC_GUESS +
        CALIB_SAME_FOCAL_LENGTH +
        CALIB_RATIONAL_MODEL +
        CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
        TermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 100, 1e-5));
    //CV_CALIB_USE_INTRINSIC_GUESS);


    std::cout << endl << endl << "立体标定完成! " << endl << "done with RMS error=" << rms << endl;  //反向投影误差
    std::cout << endl << "Left Camera Matrix: " << endl << cameraMatrix[0] << endl;
    std::cout << endl << "Right Camera Matrix:" << endl << cameraMatrix[1] << endl;
    std::cout << endl << "Left Camera DistCoeffs: " << endl << distCoeffs[0] << endl;
    std::cout << endl << "Right Camera DistCoeffs: " << endl << distCoeffs[1] << endl;


    // 标定精度检测
    // 通过检查图像上点与另一幅图像的极线的距离来评价标定的精度。为了实现这个目的,使用 undistortPoints 来对原始点做去畸变的处理
    // 随后使用 computeCorrespondEpilines 来计算极线,计算点和线的点积。累计的绝对误差形成了误差
    std::cout << endl << " 极线计算...  误差计算... ";
    double err = 0;
    int npoints = 0;
    vector lines[2];
    for (unsigned int i = 0; i < idx.size(); i++)
    {
        int npt = (int)imagePoints[0][i].size();  //角点个数
        Mat imgpt[2];
        for (int k = 0; k < 2; k++)
        {
            imgpt[k] = Mat(imagePoints[k][i]);  //
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]); // 畸变
            computeCorrespondEpilines(imgpt[k], k + 1, F, lines[k]);  // 计算极线
        }
        for (int j = 0; j < npt; j++)
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                    imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;   // 累计误差
        }
        npoints += npt;
    }
    std::cout << "  平均误差 average reprojection err = " << err / npoints << endl;  // 平均误差

                                                                                 // 相机内参数和畸变系数写入文件
    FileStorage fs(intrinsic_filename, CV_STORAGE_WRITE);
    if (fs.isOpened())
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }
    else
        std::cout << "Error: can not save the intrinsic parameters\n";

    // 立体矫正  BOUGUET'S METHOD
    Mat R1, R2, P1, P2, Q;
    Rect validRoi[2];

    cv::stereoRectify(cameraMatrix[0], distCoeffs[0],
        cameraMatrix[1], distCoeffs[1],
        imgSize, R, T, R1, R2, P1, P2, Q,
        CALIB_ZERO_DISPARITY, 1, imgSize, &validRoi[0], &validRoi[1]);

    fs.open(extrinsic_filename, CV_STORAGE_WRITE);
    if (fs.isOpened())
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }
    else
        std::cout << "Error: can not save the intrinsic parameters\n";

    std::cout << "双目标定完成..." << endl;
    getchar();
    getchar();
    return 0;
}


//------------------------------------------------------
//------------------------------------------------------
// 双目立体匹配和测量
int stereoMatch(int picNum,
    string intrinsic_filename = "intrinsics.yml",
    string extrinsic_filename = "extrinsics.yml",
    bool no_display = false,
    string point_cloud_filename = "输出/point3D.txt"
)
{
    //获取待处理的左右相机图像
    int color_mode = 0;
    Mat rawImg = imread(fileList[picNum], color_mode);    //待处理图像  grayScale
    if (rawImg.empty())
    {
        std::cout << "In Function stereoMatch, the Image is empty..." << endl;
        return 0;
    }
    //截取
    Rect leftRect(0, 0, imgSize.width, imgSize.height);
    Rect rightRect(imgSize.width, 0, imgSize.width, imgSize.height);
    Mat img1 = rawImg(leftRect);       //切分得到的左原始图像
    Mat img2 = rawImg(rightRect);      //切分得到的右原始图像
                                       //图像根据比例缩放
    if (imgScale != 1.f) 
    {
        Mat temp1, temp2;
        int method = imgScale < 1 ? INTER_AREA : INTER_CUBIC;
        resize(img1, temp1, Size(), imgScale, imgScale, method);
        img1 = temp1;
        resize(img2, temp2, Size(), imgScale, imgScale, method);
        img2 = temp2;
    }
    imwrite("输出/原始左图像.jpg", img1);
    imwrite("输出/原始右图像.jpg", img2);

    Size img_size = img1.size();

    // reading intrinsic parameters
    FileStorage fs(intrinsic_filename, CV_STORAGE_READ);
    if (!fs.isOpened())
    {
        std::cout << "Failed to open file " << intrinsic_filename << endl;
        return -1;
    }
    Mat M1, D1, M2, D2;      //左右相机的内参数矩阵和畸变系数
    fs["M1"] >> M1;
    fs["D1"] >> D1;
    fs["M2"] >> M2;
    fs["D2"] >> D2;

    M1 *= imgScale;
    M2 *= imgScale;

    // 读取双目相机的立体矫正参数
    fs.open(extrinsic_filename, CV_STORAGE_READ);
    if (!fs.isOpened())
    {
        std::cout << "Failed to open file  " << extrinsic_filename << endl;
        return -1;
    }

    // 立体矫正
    Rect roi1, roi2;
    Mat Q;
    Mat R, T, R1, P1, R2, P2;
    fs["R"] >> R;
    fs["T"] >> T;

    //Alpha取值为-1时,OpenCV自动进行缩放和平移
    cv::stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2);

    // 获取两相机的矫正映射
    Mat map11, map12, map21, map22;
    initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
    initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

    // 矫正原始图像
    Mat img1r, img2r;
    remap(img1, img1r, map11, map12, INTER_LINEAR);
    remap(img2, img2r, map21, map22, INTER_LINEAR);
    img1 = img1r;
    img2 = img2r;

    //stereoBM
    Ptr bm = StereoBM::create(16, 9);
    bm->setPreFilterType(CV_STEREO_BM_NORMALIZED_RESPONSE);
    bm->setPreFilterSize(9);
    bm->setPreFilterCap(31);
    bm->setBlockSize(21);
    bm->setMinDisparity(-16);
    bm->setNumDisparities(64);
    bm->setTextureThreshold(10);
    bm->setUniquenessRatio(5);
    bm->setSpeckleWindowSize(100);
    bm->setSpeckleRange(32);
    bm->setROI1(roi1);
    bm->setROI2(roi2);
    // getter
    int pfs = bm->getPreFilterSize();
    int pfc = bm->getPreFilterCap();
    int bs = bm->getBlockSize();
    int md = bm->getMinDisparity();
    int nd = bm->getNumDisparities();
    int tt = bm->getTextureThreshold();
    int ur = bm->getUniquenessRatio();
    int sw = bm->getSpeckleWindowSize();
    int sr = bm->getSpeckleRange();

    /*
    // 初始化 stereoBMstate 结构体
    StereoBM bm;

    int unitDisparity = 15;//40
    int numberOfDisparities = unitDisparity * 16;//视差窗口,16的整数倍
    bm.state->roi1 = roi1;//视图中的有效像素区域
    bm.state->roi2 = roi2;
    bm.state->preFilterCap = 13;
    bm.state->SADWindowSize = 19;// 窗口大小
    bm.state->minDisparity = 0;     //最小视差                                  // 确定匹配搜索从哪里开始  默认值是0
    bm.state->numberOfDisparities = numberOfDisparities;                // 在该数值确定的视差范围内进行搜索
    bm.state->textureThreshold = 1000;//10  低纹理区域的判断阈值                                 // 保证有足够的纹理以克服噪声
    bm.state->uniquenessRatio = 1;     //10   视差唯一性百分比 / !!使用匹配功能模式
    bm.state->speckleWindowSize = 200;   //13    检查视差连通区域变化度的窗口大小                         // 检查视差连通区域变化度的窗口大小, 值为 0 时取消 speckle 检查
    bm.state->speckleRange = 32;    //32                                  // 视差变化阈值,当窗口内视差变化大于阈值时,该窗口内的视差清零,int 型
    bm.state->disp12MaxDiff = -1;//左视差图和右视差图之间的最大容许差异

    */

    // 计算
    Mat disp, disp8;
    int64 t = getTickCount();
    // Compute disparity
    bm->compute(img1, img2, disp);
    //bm(img1, img2, disp);
    t = getTickCount() - t;
    printf("立体匹配耗时: %fms\n", t * 1000 / getTickFrequency());

    // 将16位符号整形的视差矩阵转换为8位无符号整形矩阵
    //disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.));
    disp.convertTo(disp8, CV_8U, 255 / (64*16.));
    // 视差图转为彩色图
    Mat vdispRGB = disp8;
    F_Gray2Color(disp8, vdispRGB);
    // 将左侧矫正图像与视差图融合
    Mat merge_mat = F_mergeImg(img1, disp8);

    saveDisp("输出/视差数据.txt", disp);

    //显示
    if (!no_display) {
        imshow("左侧矫正图像", img1);
        imwrite("输出/left_undistortRectify.jpg", img1);
        imshow("右侧矫正图像", img2);
        imwrite("输出/right_undistortRectify.jpg", img2);
        imshow("视差图", disp8);
        imwrite("输出/视差图.jpg", disp8);
        imshow("视差图_彩色.jpg", vdispRGB);
        imwrite("输出/视差图_彩色.jpg", vdispRGB);
        imshow("左矫正图像与视差图合并图像", merge_mat);
        imwrite("输出/左矫正图像与视差图合并图像.jpg", merge_mat);
        cv::waitKey();
        std::cout << endl;
    }
    cv::destroyAllWindows();

    // 视差图转为深度图
    cout << endl << "计算深度映射... " << endl;
    Mat xyz;
    reprojectImageTo3D(disp, xyz, Q, true);    //获得深度图  disp: 720*1280 
    cv::destroyAllWindows();
    cout << endl << "保存点云坐标... " << endl;
    saveXYZ(point_cloud_filename, xyz);

    cout << endl << endl << "结束" << endl << "Press any key to end... ";

    getchar();
    return 0;
}




int main()
{
    string intrinsic_filename = "intrinsics.yml";
    string extrinsic_filename = "extrinsics.yml";
    string point_cloud_filename = "输出/point3D.txt";

    /* 立体标定 运行一次即可 */
    /*initFileList("calib_pic", 1, 26);
    stereoCalibrate(intrinsic_filename, extrinsic_filename);*/

    /* 立体匹配 */
    initFileList("test_pic", 1, 2);
    stereoMatch(0, intrinsic_filename, extrinsic_filename, false, point_cloud_filename);

    return 0;
}

原文地址:https://blog.csdn.net/u014413083/article/details/53205560#comments

你可能感兴趣的:(图像处理,opencv学习笔记)