单目相机标定与双目相机标定是进行视觉测量,视觉定位的关键步骤之一,对于其具体的理论部分的理解可以参考:
https://blog.csdn.net/Kano365/article/details/90721424
https://www.cnblogs.com/zyly/p/9366080.html#_label3
这些文章都写的非常好了,因此就不在进行阐述。下面贴出自己在测试过程中使用的代码(代码中涉及到保存结果的路径需要自己设定一下,还没有改成参数传递的方式):
1、单目相机标定
单目相机标定类的头文件monocular.h
#ifndef MONOCULAR_H
#define MONOCULAR_H
#include
#include
#include
using namespace std;
using namespace cv;
class monocular
{
public:
/*构造函数*/
monocular(int boardWidth, int boardHeight, int squareSize);
/*实现相机标定*/
void calibrate();
/*析构函数*/
~monocular();
private:
/*计算标定板上模块的实际物理坐标*/
void calRealPoint(vector >& obj, int boardwidth, int boardheight, int imgNumber, int squaresize);
/*设置相机的初始参数 也可以不估计*/
void guessCameraParam();
/*输出标定参数*/
void saveCameraParam();
/*计算重投影误差*/
void reproject_error();
/*原始图像的畸变矫正*/
void distort_correct();
/*读取相机内部参数,输出到界面*/
void ReadCameraParam();
private:
Mat mIntrinsic; //相机内参数
Mat mDistortion_coeff; //相机畸变参数
vector mvRvecs; //旋转向量
vector mvTvecs; //平移向量
vector > mvCorners; //各个图像找到的角点的集合 和objRealPoint 一一对应
vector > mvObjRealPoint; //各副图像的角点的实际物理坐标集合
vector mvCorner; //某一副图像找到的角点
vector mFiles;//所有标定图像的路径
vector mImages; //所有标定图像
int mBoardWidth; //横向的角点数目
int mBoardHeight; //纵向的角点数据
int mBoardCorner; //总的角点数据
int mSquareSize; //标定板黑白格子的大小 单位mm
Size mBoardSize; //总的内角点
double mdRMS_error; //返回总的均方根重投影误差
double mdtotal_err; //返回总的重投影误差
double mdave_error; //返回平均的重投影误差
};
#endif
单目相机标定类的实现文件monoculai.cc
#include "monocular.h"
monocular::monocular(int boardWidth, int boardHeight, int squareSize):
mBoardWidth(boardWidth), mBoardHeight(boardHeight), mSquareSize(squareSize)
{
mBoardCorner = boardWidth * boardHeight;
mBoardSize = Size(boardWidth, boardHeight);
}
monocular::~monocular()
{
}
void monocular::calRealPoint(vector >& obj, int boardwidth, int boardheight, int imgNumber, int squaresize)
{
vector imgpoint;
for (int rowIndex = 0; rowIndex < boardheight; rowIndex++)
{
for (int colIndex = 0; colIndex < boardwidth; colIndex++)
{
imgpoint.push_back(Point3f(rowIndex * squaresize, colIndex * squaresize, 0));
}
}
for (int imgIndex = 0; imgIndex < imgNumber; imgIndex++)
{
obj.push_back(imgpoint);
}
}
/*设置相机的初始参数 也可以不估计*/
void monocular::guessCameraParam()
{
/*分配内存*/
mIntrinsic.create(3, 3, CV_64FC1); //相机内参数
mDistortion_coeff.create(5, 1, CV_64FC1); //畸变参数
/*
fx 0 cx
0 fy cy
0 0 1 内参数
*/
mIntrinsic.at(0, 0) = 256.8093262; //fx
mIntrinsic.at(0, 2) = 160.2826538; //cx
mIntrinsic.at(1, 1) = 254.7511139; //fy
mIntrinsic.at(1, 2) = 127.6264572; //cy
mIntrinsic.at(0, 1) = 0;
mIntrinsic.at(1, 0) = 0;
mIntrinsic.at(2, 0) = 0;
mIntrinsic.at(2, 1) = 0;
mIntrinsic.at(2, 2) = 1;
/*
k1 k2 p1 p2 p3 畸变参数
*/
mDistortion_coeff.at(0, 0) = -0.193740; //k1
mDistortion_coeff.at(1, 0) = -0.378588; //k2
mDistortion_coeff.at(2, 0) = 0.028980; //p1
mDistortion_coeff.at(3, 0) = 0.008136; //p2
mDistortion_coeff.at(4, 0) = 0; //p3
}
void monocular::saveCameraParam()
{
FileStorage fs("./result/monocular.yaml", FileStorage::WRITE);
if(!fs.isOpened())
{
cout << "open file error!" << endl;
return;
}
fs << "cameraMatrix" << mIntrinsic;
fs << "distCoeffs" << mDistortion_coeff;
fs << "the overall RMS re-projection error" << mdRMS_error;
fs << "the Mean pixel re-projection error" << mdave_error;
fs.release();
}
void monocular::ReadCameraParam()
{
FileStorage fs("./result/monocular.yaml", FileStorage::READ);
if(!fs.isOpened())
{
cout << "open file error!" << endl;
return;
}
fs["cameraMatrix"] >> mIntrinsic;
fs["distCoeffs"] >> mDistortion_coeff;
cout << "cameraMatrix is: " << mIntrinsic << endl;
cout << "distCoeffs is:" << mDistortion_coeff << endl;
fs.release();
}
void monocular::reproject_error()
{
mdtotal_err = 0.0;
mdave_error = 0.0;
vector image_points2; // 保存重新计算得到的投影点
image_points2.clear();
for(size_t i = 0; i < mvRvecs.size(); i++)
{
vector tempPointSet = mvObjRealPoint[i];
//通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点
projectPoints(tempPointSet, mvRvecs[i], mvTvecs[i], mIntrinsic, mDistortion_coeff, image_points2);
// 计算新的投影点和旧的投影点之间的误差
vector tempImagePoint = mvCorners[i];
Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2); //cornerSubPix
Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2); //projectPoints
//对标定结果进行评价
for (int j = 0; j < tempImagePoint.size(); j++)
{
//分别给两个角点坐标赋值他x,y坐标
image_points2Mat.at(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);
tempImagePointMat.at(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
}
//norm计算数组src1和src2的相对差范数
mdtotal_err = mdtotal_err + norm(image_points2Mat, tempImagePointMat, NORM_L2);
std::cout << "the " << i + 1 << " image Mean error:" << mdtotal_err << " pixel" << endl;
}
mdave_error = mdtotal_err/mvRvecs.size();
std::cout << "The all Mean error: " << mdave_error << " pixel" << endl;
}
void monocular::distort_correct()
{
Mat mapx = Mat(mImages[0].size(), CV_32FC1);
Mat mapy = Mat(mImages[0].size(), CV_32FC1);
Mat R = Mat::eye(3, 3, CV_32F);
//计算出对应的映射表
//第三个参数R,可选的输入,是第一和第二相机坐标之间的旋转矩阵;
//第四个参数newCameraMatrix,输入的校正后的3X3摄像机矩阵;
//第五个参数size,摄像机采集的无失真的图像尺寸;
//第六个参数m1type,定义map1的数据类型,可以是CV_32FC1或者CV_16SC2;
//第七个参数map1和第八个参数map2,输出的X / Y坐标重映射参数;
initUndistortRectifyMap(mIntrinsic, mDistortion_coeff, R, mIntrinsic, mImages[0].size(), CV_32FC1, mapx, mapy);
for (int i = 0; i < mImages.size(); i++)
{
Mat imageSource = mImages[i];
Mat newimage = mImages[i].clone();
Mat newimage1 = mImages[i].clone();
//方法一:使用initUndistortRectifyMap和remap两个函数配合实现。
//initUndistortRectifyMap用来计算畸变映射,remap把求得的映射应用到图像上。
remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
//方法二:使用undistort函数实现
undistort(imageSource, newimage1, mIntrinsic, mDistortion_coeff);
//输出图像
string str = "./result/remap/" + to_string(i + 1) + ".jpg";
string str1 = "./result/undistort/" + to_string(i + 1) + ".jpg";
imwrite(str, newimage);
imwrite(str1, newimage1);
}
}
void monocular::calibrate()
{
int Width, Height = 0;
string filespath = "./data/left/";
glob(filespath, mFiles, false);
for(size_t i = 0; i < mFiles.size(); i++)
cout << "calibrate image path is: " << mFiles[i] << endl;
for (size_t i = 0; i < mFiles.size(); i++)
mImages.push_back(imread(mFiles[i]));
if(mImages.size() < 5)
{
cout << "the image of calibration is not enough!" << endl;
return;
}
Width = mImages[0].cols;
Height = mImages[0].rows;
for(size_t i = 0; i < mImages.size(); i++)
{
cv::Mat SrcImage = mImages[i];
if(SrcImage.channels() != 1)
cvtColor(SrcImage, SrcImage, CV_BGR2GRAY);
bool isFind = findChessboardCorners(SrcImage, mBoardSize, mvCorner, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE);
if (isFind == true)
{
//精确角点位置,亚像素角点检测
cornerSubPix(SrcImage, mvCorner, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
//绘制角点
drawChessboardCorners(SrcImage, mBoardSize, mvCorner, isFind);
mvCorners.push_back(mvCorner);
imshow("chessboard_corner", SrcImage);
waitKey(50);
cout << "The image " << i << " is good" << endl;
}
else
cout << "The image " << i << " is bad, try again" << endl;
}
/*设置实际初始参数 根据calibrateCamera来 如果flag = 0 也可以不进行设置*/
guessCameraParam();
cout << "guess successful" << endl;
/*计算实际的校正点的三维坐标*/
calRealPoint(mvObjRealPoint, mBoardWidth, mBoardHeight, mImages.size(), mSquareSize);
cout << "calculate real successful" << endl;
/*标定摄像头*/
//第一个参数:objectPoints,为世界坐标系中的三维点:vector> object_points,
//需要依据棋盘上单个黑白矩阵的大小,计算出(初始化)每一个内角点的世界坐标。
//长100*宽75
//第二个参数:imagePoints,为每一个内角点对应的图像坐标点:vector> image_points
//第三个参数:imageSize,为图像的像素尺寸大小
//第四个参数:cameraMatrix为相机的内参矩阵:Mat cameraMatrix=Mat(3,3,CV_32FC1,Scalar::all(0));
//第五个参数:distCoeffs为畸变矩阵Mat distCoeffs=Mat(1,5,CV_32FC1,Scalar::all(0));
//内参数矩阵 M=[fx γ u0,0 fy v0,0 0 1]
//外参矩阵 5个畸变系数k1,k2,p1,p2,k3
//第六个参数:rvecs旋转向量R,vector tvecs;
//第七个参数:tvecs位移向量T,和rvecs一样,应该为vector tvecs;
//第八个参数:flags为标定时所采用的算法 第九个参数:criteria是最优迭代终止条件设定。
//return:重投影的总的均方根误差。
//总结:得到相机内参矩阵K、相机的5个畸变系数、每张图片属于自己的平移向量T、旋转向量R
mdRMS_error = calibrateCamera(mvObjRealPoint, mvCorners, Size(Width, Height), mIntrinsic, mDistortion_coeff, mvRvecs, mvTvecs, 0);
cout << "the overall RMS re-projection error is: " << mdRMS_error << endl;
cout << "calibration successful" << endl;
/*计算并得到重投影误差*/
reproject_error();
cout << "re-project_error compute successful" << endl;
/*保存并输出参数*/
saveCameraParam();
cout << "save camera param successful" << endl;
/*畸变校正*/
distort_correct();
cout << "distort_correct successful" << endl;
/*读取相机内部参数,输出到界面*/
ReadCameraParam();
cout << "read camera param finished!" << endl;
}
单目相机标定的主函数文件main.cc
#include "monocular.h"
int main ( int argc, char** argv )
{
//标定板的大小和小方格的尺寸
monocular Mono_cali(6, 9, 25);
Mono_cali.calibrate();
return 0;
}
最后给出整个代码的下载地址:https://github.com/zwl2017/monocular_calibrate
里面包括了测试图片,以及运行方式。当然,也可在Windows上使用VS进行测试,只需新建一个工程即可。
2、双目相机标定
为了使用的方便,直接使用前面的单目相机标定类获取相机的内部参数。整个代码下载地址为:https://github.com/zwl2017/stereo_calibrate
整个过程中比较难以理解的是双目的校正算法,OpenCV中使用的是Bouguet算法进行校正,具体内容参考博客https://blog.csdn.net/u011574296/article/details/73826420
至于具体的校正算法如何推到,有待后续研究!!!