双目视觉标定就是通过求解实际三维空间中坐标点和摄像机二维图像坐标点的对应关系,在双目视觉中,三维空间坐标系一般是以左相机坐标系作为基准坐标系。利用棋盘板获取到的用于计算的二维图像坐标和三维空间的物理坐标,再通过一定的算法,求解出变换矩阵,则解决了基础的双目视觉标定的过程。
从物体到相机坐标系统的转换:物体上的点P对应图像平面的点p,我们通过使用旋转矩阵R和平移向量T将点p和点P关联起来。
旋转矩阵R:是在乘以一个向量的时候有改变向量的方向但不改变大小的效果并保持了手性的矩阵。
平移向量T:用来表示如何将一个坐标系统移到另一个坐标系统,,其原点也移动到另一个位置;换句话说,平移向量是第一个坐标系统的原点到第二个坐标系统的原点的偏移量;如:从以物体中心的原点的坐标系到以相机中心为原点的坐标系,相应的平移向量为T。
本征矩阵E:包含了关于物理空间中两个摄像头的平移和旋转信息,在坐标系中第二个摄像头相对于第一个摄像头的位置。
基本矩阵F:除了包含了E中的相同信息外,还包含了两个摄像头的内参数;将图像坐标系(像素)中一个摄像头的像平面上的点和另一个摄像头的像平面上的点相互关联。
Hartley校正算法只使用基本矩阵F来生成非标定立体视觉;Hartley算法可以通过单个摄像头记录的运动推导出立体结构,但是单个摄像头会比Bouguet标定算法产生更多的畸变图像。
Hartley校正算法旨在找到最小化两幅立体图像的计算视差时将对极点映射到无穷远处的单应矩阵,这可通过匹配两幅图像之间的对应点简单完成。因此,就可以绕过计算两个摄像头的内参。所以我们只需要计算基础矩阵,它可以通过cvFindFundamentalMat()函数获得两个场景视图上的7个以上的匹配点来获得,或者,通过立体标定cvStereoCalibrate()函数求解获得。
Hartley校正算法有点是,在线立体标定可以简单的通过场景的观察点来完成,缺点是场景的图像比例未知。
Bouguet校正算法是使用立体图像间的旋转矩阵R和平移矩阵T来标定立体视觉。而旋转矩阵R和平移矩阵T可以通过立体标定cvStereoCalibrate()函数求解获得。
//查找棋盘角点
findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
// 亚像素角点
void cornerSubPix( InputArray image, InputOutputArray corners,
Size winSize, Size zeroZone,
TermCriteria criteria );
//显示角点结果
void drawChessboardCorners( InputOutputArray image, Size patternSize,
InputArray corners, bool patternWasFound );
以上函数详情在上一节中
//双目标定
double stereoCalibrate( InputArrayOfArrays objectPoints, //物体目标点向量
InputArrayOfArrays imagePoints1, //第一个摄像头的图像的角点向量
InputArrayOfArrays imagePoints2, //第二个摄像头的图像的角点向量
InputOutputArray cameraMatrix1, //第一个摄像头的相机矩阵
InputOutputArray distCoeffs1, //第一个摄像头的畸变矩阵
InputOutputArray cameraMatrix2, //第二个摄像头的相机矩阵
InputOutputArray distCoeffs2, //第二个摄像头的畸变矩阵
Size imageSize, // 图像大小
OutputArray R, // 旋转矩阵
OutputArray T, // 平移矩阵
OutputArray E, // 本质矩阵
OutputArray F, // 基本矩阵
int flags = CALIB_FIX_INTRINSIC, //标志位
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) //终止条件
);
//立体矫正
void stereoRectify( InputArray cameraMatrix1, //第一个摄像头的相机矩阵
InputArray distCoeffs1,//第一个摄像头的畸变矩阵
InputArray cameraMatrix2, //第二个摄像头的相机矩阵
InputArray distCoeffs2,//第二个摄像头的畸变矩阵
Size imageSize, // 图像大小
InputArray R, // 旋转矩阵
InputArray T,// 平移矩阵
OutputArray R1,//第一摄像机的旋转矩阵
OutputArray R2,//第二摄像机的旋转矩阵
OutputArray P1,//第一个相机坐标系中的投影矩阵
OutputArray P2,//第二个相机坐标系中的投影矩阵
OutputArray Q, //重影矩阵
int flags = CALIB_ZERO_DISPARITY,
double alpha = -1, Size newImageSize = Size(),
CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
为了提高精确度,可以先对左右相机进行单目标定,再用单目标定的参数再双目标定
#include
#include
#include
#include
#include
#include
using namespace std;
using namespace cv;
int n = 0;
int m = 0;
int i = 0;
int j = 0;
const int imagewidth = 640;
const int imagehight = 480;
const int boardwidth = 11;
const int boardhight = 8;
const int framenumber = 18;
const int squaresize = 25;
const cv::Size imagesize = cv::Size(imagewidth,imagehight);
const cv::Size boardsize = cv::Size(boardwidth,boardhight);
vector<cv::Point3f> objectpoint;
vector<vector<cv::Point3f>> objpoint;
vector<cv::Point2f> cornerL;
vector<cv::Point2f> cornerR;
vector<vector<cv::Point2f>> imagepointL;
vector<vector<cv::Point2f>> imagepointR;
Mat cameraMatrixR = (Mat_<double>(3, 3) << 8.0856621975776159e+02, 0., 3.3172675729233373e+02, 0.,
8.0855814161874650e+02, 2.3761987675788620e+02, 0., 0., 1.);
//获得的畸变参数
Mat distcoeffR = (Mat_<double>(5, 1) << 1.9786933338711710e-01, - 9.5319729169749201e-01,
1.5594891750389842e-03, - 9.3807184310007189e-05,
6.1835075544632612e-01);
Mat cameraMatrixL = (Mat_<double>(3, 3) << 7.8812414561053731e+02, 0., 2.8537738147375256e+02, 0.,
7.8860406351187100e+02, 2.2145209735502249e+02, 0., 0., 1.);
Mat distcoeffL = (Mat_<double>(5, 1) << 2.6555971873433654e-01, - 1.8747944751471195e+00,
- 1.0161666668941473e-03, - 3.5967374160257356e-03,
4.3538385537724684e+00);
cv::Mat R,T,E,F;
cv::Mat R1, R2, P1, P2 , Q;
cv::Mat maplx, maply, maprx, mapry;
cv::Mat imageL, grayimageL;
cv::Mat imageR, grayimageR;
cv::Rect validROIL, validROIR;
void worldpoint()
{
for (int i = 0; i < boardhight; i++)
{
for (int j = 0; j < boardwidth; j++)
{
objectpoint.push_back(cv::Point3f(i*squaresize,j*squaresize,0.0f));
}
}
for (int w = 1; w <= framenumber;w++)
{
objpoint.push_back(objectpoint);
}
}
void outputparam()
{
cv::FileStorage fs("intrinsics.yml", cv::FileStorage::WRITE);
fs << "cameraMatrixL" << cameraMatrixL << "cameraDistcoeffL" << distcoeffL << "cameraMatrixR" << cameraMatrixR << "cameraDistcoeffR" << distcoeffR;
fs.release();
std::cout << "cameraMatrixL=:" << cameraMatrixL << endl << "cameraDistcoeffL=:" << distcoeffL << endl << "cameraMatrixR=:" << cameraMatrixR << endl << "cameraDistcoeffR=:" << distcoeffR << endl;
fs.open("extrinsics.yml", cv::FileStorage::WRITE);
fs << "R" << R << "T" << T << "Rl" << R1 << "Rr" << R2 << "Pl" << P1 << "Pr" << P2 << "Q" << Q;
std::cout << "R=" << R << endl << "T=" << T << endl << "Rl=" << R1 << endl << "Rr=" << R2 << endl << "Pl=" << P1 << endl << "Pr=" << P2 << endl << "Q=" << Q << endl;
fs.release();
}
int main()
{
//读取图片
int current = 1;
while (current <= framenumber)
{
char frameL[50];
sprintf_s(frameL, 50, "left%d.jpg", n++);
imageL = cv::imread(frameL);
cv::cvtColor(imageL,grayimageL,cv::ColorConversionCodes::COLOR_BGR2GRAY);
char frameR[50];
sprintf_s(frameR, 50, "right%d.jpg", m++);
imageR = cv::imread(frameR);
cv::cvtColor(imageR, grayimageR, cv::ColorConversionCodes::COLOR_BGR2GRAY);
bool foundL, foundR;
foundL = cv::findChessboardCorners(imageL,boardsize,cornerL);
foundR = cv::findChessboardCorners(imageR, boardsize, cornerR);
if (foundL == true && foundR == true)
{
cv::cornerSubPix(grayimageL,cornerL,cv::Size(11,11),cv::Size(-1,-1),cv::TermCriteria(cv::TermCriteria::MAX_ITER|cv::TermCriteria::EPS, 30, 1e-6));
cv::cornerSubPix(grayimageR, cornerR, cv::Size(11, 11), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 30, 1e-6));
cv::drawChessboardCorners(imageL,boardsize,cornerL,foundL);
cv::imshow("L", imageL);
cv::waitKey(10);
cv::drawChessboardCorners(imageR, boardsize, cornerR, foundR);
cv::imshow("R", imageR);
cv::waitKey(10);
imagepointL.push_back(cornerL);
imagepointR.push_back(cornerR);
cout << "The image " << current << " is good" << endl;
}
else
{
std::cout << "The image is bad please try again" << endl;
}
current++;
}
worldpoint();
cout << "开始标定" << endl;
double err = cv::stereoCalibrate(objpoint, imagepointL, imagepointR, cameraMatrixL, distcoeffL, cameraMatrixR, distcoeffR, imagesize, R, T, E, F,
CALIB_USE_INTRINSIC_GUESS,
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, 1e-6));
cout << "The err = " << err << endl;
cv::stereoRectify(cameraMatrixL,distcoeffL, cameraMatrixR,distcoeffR,imagesize,R,T,R1,R2,P1,P2,Q, cv::CALIB_ZERO_DISPARITY, -1, imagesize, &validROIL, &validROIR);
cv::initUndistortRectifyMap(cameraMatrixL,distcoeffL,R1,P1,imagesize, CV_32FC1,maplx,maply);
cv::initUndistortRectifyMap(cameraMatrixR,distcoeffR,R2,P2,imagesize,CV_32FC1,maprx,mapry);
outputparam();
cv::Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imagesize.width, imagesize.height);
w = cvRound(imagesize.width * sf);
h = cvRound(imagesize.height * sf);
canvas.create(h, w * 2, CV_8UC3);
int currents = 1;
while (currents <= framenumber)
{
char frameL[50];
sprintf_s(frameL, 50, "left%d.jpg", i++);
imageL = cv::imread(frameL);
char frameR[50];
sprintf_s(frameR, 50, "right%d.jpg", j++);
imageR = cv::imread(frameR);
cv::Mat rectifyImageL2, rectifyImageR2;
cv::remap(imageL, rectifyImageL2, maplx, maply, cv::InterpolationFlags::INTER_LINEAR);
cv::remap(imageR, rectifyImageR2, maprx, mapry, cv::InterpolationFlags::INTER_LINEAR);
cv::Mat canvasPart = canvas(cv::Rect(w * 0, 0, w, h));
resize(rectifyImageL2, canvasPart, canvasPart.size(), 0, 0, cv::INTER_AREA);
cv::Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
cv::rectangle(canvasPart, vroiL, cv::Scalar(0, 0, 255), 3, 8);
canvasPart = canvas(cv::Rect(w, 0, w, h));
resize(rectifyImageR2, canvasPart, canvasPart.size(), 0, 0, cv::INTER_LINEAR);
cv::Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
cv::rectangle(canvasPart, vroiR, cv::Scalar(0, 255, 0), 3, 8);
for (int i = 0; i < canvas.rows; i += 16)
line(canvas, cv::Point(0, i), cv::Point(canvas.cols, i), cv::Scalar(0, 255, 0), 1, 8);
cv::imshow("rectified", canvas);
if (cv::waitKey() > 0)
{
currents++;
}
}
return 0;
}
结果:
在使用摄像头时,建议使用一般的摄像头,不建议使用自动对焦的自能摄像头
如有错误,请批评指正