总结下通过Opencv进行单目、多目标定
参考:
https://blog.csdn.net/zhiyuan2021/article/details/118556816
https://www.cnblogs.com/polly333/p/5013505.html
/*************************************************************************************
*
* Description:相机标定,张氏标定法 单目标定
* Author :JNU
* Data :2018.7.22
*
************************************************************************************/
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
void main(char *args)
{
//保存文件名称
std::vector<std::string> filenames;
//需要更改的参数
//左相机标定,指定左相机图片路径,以及标定结果保存文件
string infilename = "sample/left/filename.txt"; //如果是右相机把left改为right
string outfilename = "sample/left/caliberation_result.txt";
//标定所用图片文件的路径,每一行保存一个标定图片的路径 ifstream 是从硬盘读到内存
ifstream fin(infilename);
//保存标定的结果 ofstream 是从内存写到硬盘
ofstream fout(outfilename);
/*
1.读取毎一幅图像,从中提取出角点,然后对角点进行亚像素精确化、获取每个角点在像素坐标系中的坐标
像素坐标系的原点位于图像的左上角
*/
std::cout << "开始提取角点......" << std::endl;;
//图像数量
int imageCount = 0;
//图像尺寸
cv::Size imageSize;
//标定板上每行每列的角点数
cv::Size boardSize = cv::Size(9, 6);
//缓存每幅图像上检测到的角点
std::vector<Point2f> imagePointsBuf;
//保存检测到的所有角点
std::vector<std::vector<Point2f>> imagePointsSeq;
char filename[100];
if (fin.is_open())
{
//读取完毕?
while (!fin.eof())
{
//一次读取一行
fin.getline(filename, sizeof(filename) / sizeof(char));
//保存文件名
filenames.push_back(filename);
//读取图片
Mat imageInput = cv::imread(filename);
//读入第一张图片时获取图宽高信息
if (imageCount == 0)
{
imageSize.width = imageInput.cols;
imageSize.height = imageInput.rows;
std::cout << "imageSize.width = " << imageSize.width << std::endl;
std::cout << "imageSize.height = " << imageSize.height << std::endl;
}
std::cout << "imageCount = " << imageCount << std::endl;
imageCount++;
//提取每一张图片的角点
if (cv::findChessboardCorners(imageInput, boardSize, imagePointsBuf) == 0)
{
//找不到角点
std::cout << "Can not find chessboard corners!" << std::endl;
exit(1);
}
else
{
Mat viewGray;
//转换为灰度图片
cv::cvtColor(imageInput, viewGray, cv::COLOR_BGR2GRAY);
//亚像素精确化 对粗提取的角点进行精确化
cv::find4QuadCornerSubpix(viewGray, imagePointsBuf, cv::Size(5, 5));
//保存亚像素点
imagePointsSeq.push_back(imagePointsBuf);
//在图像上显示角点位置
cv::drawChessboardCorners(viewGray, boardSize, imagePointsBuf, true);
//显示图片
//cv::imshow("Camera Calibration", viewGray);
cv::imwrite("test.jpg", viewGray);
//等待0.5s
//waitKey(500);
}
}
//计算每张图片上的角点数 54
int cornerNum = boardSize.width * boardSize.height;
//角点总数
int total = imagePointsSeq.size()*cornerNum;
std::cout << "total = " << total << std::endl;
for (int i = 0; i < total; i++)
{
int num = i / cornerNum;
int p = i%cornerNum;
//cornerNum是每幅图片的角点个数,此判断语句是为了输出,便于调试
if (p == 0)
{
std::cout << "\n第 " << num+1 << "张图片的数据 -->: " << std::endl;
}
//输出所有的角点
std::cout<<p+1<<":("<< imagePointsSeq[num][p].x;
std::cout << imagePointsSeq[num][p].y<<")\t";
if ((p+1) % 3 == 0)
{
std::cout << std::endl;
}
}
std::cout << "角点提取完成!" << std::endl;
/*
2.摄像机标定 世界坐标系原点位于标定板左上角(第一个方格的左上角)
*/
std::cout << "开始标定" << std::endl;
//棋盘三维信息,设置棋盘在世界坐标系的坐标
//实际测量得到标定板上每个棋盘格的大小
cv::Size squareSize = cv::Size(26, 26);
//毎幅图片角点数量
std::vector<int> pointCounts;
//保存标定板上角点的三维坐标
std::vector<std::vector<cv::Point3f>> objectPoints;
//摄像机内参数矩阵 M=[fx γ u0,0 fy v0,0 0 1]
cv::Mat cameraMatrix = cv::Mat(3, 3, CV_64F, Scalar::all(0));
//摄像机的5个畸变系数k1,k2,p1,p2,k3
cv::Mat distCoeffs = cv::Mat(1, 5, CV_64F, Scalar::all(0));
//每幅图片的旋转向量
std::vector<cv::Mat> tvecsMat;
//每幅图片的平移向量
std::vector<cv::Mat> rvecsMat;
//初始化标定板上角点的三维坐标
int i, j, t;
for (t = 0; t < imageCount; t++)
{
std::vector<cv::Point3f> tempPointSet;
//行数
for (i = 0; i < boardSize.height; i++)
{
//列数
for (j = 0; j < boardSize.width; j++)
{
cv::Point3f realPoint;
//假设标定板放在世界坐标系中z=0的平面上。
realPoint.x = i*squareSize.width;
realPoint.y = j*squareSize.height;
realPoint.z = 0;
tempPointSet.push_back(realPoint);
}
}
objectPoints.push_back(tempPointSet);
}
//初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板
for (i = 0; i < imageCount; i++)
{
pointCounts.push_back(boardSize.width*boardSize.height);
}
//开始标定
cv::calibrateCamera(objectPoints, imagePointsSeq, imageSize, cameraMatrix, distCoeffs, rvecsMat, tvecsMat);
std::cout << "标定完成" << std::endl;
//对标定结果进行评价
std::cout << "开始评价标定结果......" << std::endl;
//所有图像的平均误差的总和
double totalErr = 0.0;
//每幅图像的平均误差
double err = 0.0;
//保存重新计算得到的投影点
std::vector<cv::Point2f> imagePoints2;
std::cout << "每幅图像的标定误差:" << std::endl;
fout << "每幅图像的标定误差:" << std::endl;
for (i = 0; i < imageCount; i++)
{
std::vector<cv::Point3f> tempPointSet = objectPoints[i];
//通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点imagePoints2(在像素坐标系下的点坐标)
cv::projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, imagePoints2);
//计算新的投影点和旧的投影点之间的误差
std::vector<cv::Point2f> tempImagePoint = imagePointsSeq[i];
cv::Mat tempImagePointMat = cv::Mat(1, tempImagePoint.size(), CV_32FC2);
cv::Mat imagePoints2Mat = cv::Mat(1, imagePoints2.size(), CV_32FC2);
for (int j = 0; j < tempImagePoint.size(); j++)
{
imagePoints2Mat.at<cv::Vec2f>(0, j) = cv::Vec2f(imagePoints2[j].x, imagePoints2[j].y);
tempImagePointMat.at<cv::Vec2f>(0, j) = cv::Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
}
//Calculates an absolute difference norm or a relative difference norm.
err = cv::norm(imagePoints2Mat, tempImagePointMat, NORM_L2);
totalErr += err /= pointCounts[i];
std::cout << " 第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
fout<< "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
}
//每张图像的平均总误差
std::cout << " 总体平均误差:" << totalErr / imageCount << "像素" << std::endl;
fout << "总体平均误差:" << totalErr / imageCount << "像素" << std::endl;
std::cout << "评价完成!" << std::endl;
//保存标定结果
std::cout << "开始保存标定结果....." << std::endl;
//保存每张图像的旋转矩阵
cv::Mat rotationMatrix = cv::Mat(3, 3, CV_32FC1, Scalar::all(0));
fout << "相机内参数矩阵:" << std::endl;
fout << cameraMatrix << std::endl << std::endl;
fout << "畸变系数:" << std::endl;
fout << distCoeffs << std::endl << std::endl;
for (int i = 0; i < imageCount; i++)
{
fout << "第" << i + 1 << "幅图像的旋转向量:" << std::endl;
fout << tvecsMat[i] << std::endl;
//将旋转向量转换为相对应的旋转矩阵
cv::Rodrigues(tvecsMat[i], rotationMatrix);
fout << "第" << i + 1 << "幅图像的旋转矩阵:" << std::endl;
fout << rotationMatrix << std::endl;
fout << "第" << i + 1 << "幅图像的平移向量:" << std::endl;
fout << rvecsMat[i] << std::endl;
}
std::cout << "保存完成" << std::endl;
/************************************************************************
显示定标结果
*************************************************************************/
cv::Mat mapx = cv::Mat(imageSize, CV_32FC1);
cv::Mat mapy = cv::Mat(imageSize, CV_32FC1);
cv::Mat R = cv::Mat::eye(3, 3, CV_32F);
std::cout << "显示矫正图像" << endl;
for (int i = 0; i != imageCount; i++)
{
std::cout << "Frame #" << i + 1 << "..." << endl;
//计算图片畸变矫正的映射矩阵mapx、mapy(不进行立体校正、立体校正需要使用双摄)
initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, imageSize, CV_32FC1, mapx, mapy);
//读取一张图片
Mat imageSource = imread(filenames[i]);
Mat newimage = imageSource.clone();
//另一种不需要转换矩阵的方式
//undistort(imageSource,newimage,cameraMatrix,distCoeffs);
//进行校正
remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
imshow("原始图像", imageSource);
imshow("矫正后图像", newimage);
waitKey();
}
//释放资源
fin.close();
fout.close();
system("pause");
}
}
标定过程中的一下注意事项:
1.标定板灰度变化情况,非黑即白,有时需要对图像进行取反
2.一些函数参数的设置,需要明确其内涵定义是何作用,尽管它已经是opencv的内置参数,注CV_CALIB_FIX_INTRINSIC这个参数是不会求出内参和平移的,因为默认它们已知,只会求P,Q
3.细微的调参之处
/* This is sample from the OpenCV book. The copyright notice is below */
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warranty, support or any guarantee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OPENCV WEBSITES:
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
GitHub: https://github.com/opencv/opencv/
************************************************** */
#include "opencv2/calib3d.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
static int print_help(char** argv)
{
cout <<
" Given a list of chessboard images, the number of corners (nx, ny)\n"
" on the chessboards, and a flag: useCalibrated for \n"
" calibrated (0) or\n"
" uncalibrated \n"
" (1: use stereoCalibrate(), 2: compute fundamental\n"
" matrix separately) stereo. \n"
" Calibrate the cameras and display the\n"
" rectified results along with the computed disparity images. \n" << endl;
cout << "Usage:\n " << argv[0] << " -w= -h= -s= \n" << endl;
return 0;
}
static void
StereoCalib(const vector<string>& imagelist, Size boardSize, float squareSize, bool displayCorners = false, bool useCalibrated=true, bool showRectified=true)
{
if( imagelist.size() % 2 != 0 )
{
cout << "Error: the image list contains odd (non-even) number of elements\n";
return;
}
const int maxScale = 2;
// ARRAY AND VECTOR STORAGE:
vector<vector<Point2f> > imagePoints[2];
vector<vector<Point3f> > objectPoints;
Size imageSize;
int i, j, k, nimages = (int)imagelist.size()/2;
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
vector<string> goodImageList;
for( i = j = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
const string& filename = imagelist[i*2+k];
Mat img = imread(filename, 0);
if(img.empty())
break;
if( imageSize == Size() )
imageSize = img.size();
else if( img.size() != imageSize )
{
cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
break;
}
bool found = false;
vector<Point2f>& corners = imagePoints[k][j];
for( int scale = 1; scale <= maxScale; scale++ )
{
Mat timg;
if( scale == 1 )
timg = img;
else
resize(img, timg, Size(), scale, scale, INTER_LINEAR_EXACT);
found = findChessboardCorners(timg, boardSize, corners,
CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if( found )
{
if( scale > 1 )
{
Mat cornersMat(corners);
cornersMat *= 1./scale;
}
break;
}
}
if( displayCorners )
{
cout << filename << endl;
Mat cimg, cimg1;
cvtColor(img, cimg, COLOR_GRAY2BGR);
drawChessboardCorners(cimg, boardSize, corners, found);
double sf = 640./MAX(img.rows, img.cols);
resize(cimg, cimg1, Size(), sf, sf, INTER_LINEAR_EXACT);
imshow("corners", cimg1);
char c = (char)waitKey(500);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
if( !found )
break;
cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
30, 0.01));
}
if( k == 2 )
{
goodImageList.push_back(imagelist[i*2]);
goodImageList.push_back(imagelist[i*2+1]);
j++;
}
}
cout << j << " pairs have been successfully detected.\n";
nimages = j;
if( nimages < 2 )
{
cout << "Error: too little pairs to run the calibration\n";
return;
}
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
objectPoints.resize(nimages);
for( i = 0; i < nimages; i++ )
{
for( j = 0; j < boardSize.height; j++ )
for( k = 0; k < boardSize.width; k++ )
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
}
cout << "Running stereo calibration ...\n";
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0);
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
CALIB_FIX_ASPECT_RATIO +
CALIB_ZERO_TANGENT_DIST +
CALIB_USE_INTRINSIC_GUESS +
CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL +
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
cout << "done with RMS error=" << rms << endl;
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for( i = 0; i < nimages; i++ )
{
int npt = (int)imagePoints[0][i].size();
Mat imgpt[2];
for( k = 0; k < 2; k++ )
{
imgpt[k] = Mat(imagePoints[k][i]);
undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
}
for( j = 0; j < npt; j++ )
{
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] +
imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "average epipolar err = " << err/npoints << endl;
// save intrinsic parameters
FileStorage fs("intrinsics.yml", FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
"M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
else
cout << "Error: can not save the intrinsic parameters\n";
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
fs.open("extrinsics.yml", FileStorage::WRITE);
if( fs.isOpened() )
{
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
fs.release();
}
else
cout << "Error: can not save the extrinsic parameters\n";
// OpenCV can handle left-right
// or up-down camera arrangements
bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
// COMPUTE AND DISPLAY RECTIFICATION
if( !showRectified )
return;
Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useCalibrated )
{
// we already computed everything
}
// OR ELSE HARTLEY'S METHOD
else
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
vector<Point2f> allimgpt[2];
for( k = 0; k < 2; k++ )
{
for( i = 0; i < nimages; i++ )
std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
}
F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
Mat H1, H2;
stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);
R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
P1 = cameraMatrix[0];
P2 = cameraMatrix[1];
}
//Precompute maps for cv::remap()
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
Mat canvas;
double sf;
int w, h;
if( !isVerticalStereo )
{
sf = 600./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h, w*2, CV_8UC3);
}
else
{
sf = 300./MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width*sf);
h = cvRound(imageSize.height*sf);
canvas.create(h*2, w, CV_8UC3);
}
for( i = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
{
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
if( useCalibrated )
{
Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
}
}
if( !isVerticalStereo )
for( j = 0; j < canvas.rows; j += 16 )
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
else
for( j = 0; j < canvas.cols; j += 16 )
line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
char c = (char)waitKey();
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.resize(0);
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
int main(int argc, char** argv)
{
Size boardSize;
string imagelistfn;
bool showRectified;
cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}");
if (parser.has("help"))
return print_help(argv);
showRectified = !parser.has("nr");
imagelistfn = samples::findFile(parser.get<string>("@input"));
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
float squareSize = parser.get<float>("s");
if (!parser.check())
{
parser.printErrors();
return 1;
}
vector<string> imagelist;
bool ok = readStringList(imagelistfn, imagelist);
if(!ok || imagelist.empty())
{
cout << "can not open " << imagelistfn << " or the string list is empty" << endl;
return print_help(argv);
}
StereoCalib(imagelist, boardSize, squareSize, false, true, showRectified);
return 0;
}
功能说明 |
寻找棋盘图中棋盘角点 |
|
函数声明 |
vFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count = NULL, int flags = CV_CALIB_CB_ADAPTIVE_THRESH ) |
|
参数说明 |
In |
image,输入的棋盘图必须是8位灰度或者彩色图像 |
in |
pattern_size,棋盘图中每行和每列的角点个数(内角点的个数)。如上图为(6,7)。 |
|
out |
corners,存储角点位置的数组指针,该数组表现事先分配空间,而且至少必须大于棋盘的所有角点的个数。 |
|
out |
corner_count,变量是可选的,如果不是NULL,则它是一个指向所记录角点数目的正数指针。 |
|
in |
flag,用来定义额外的滤波步骤以有助于寻找棋盘角点。所有的变量都可以单独或者以逻辑或的方式组合使用。CV_CALIB_CB_ADAPTIVE_THRESH ,cvFindChessboardCorners()的默认方式是,首先根据平均亮度对图像进行二值化,但如果设置此标志,则使用自适应二值化。CV_CALIB_CB_NORMALIZE_IMAGE ,如果设置了该标志,则会在二值化之前应用cvEqualizeHist()来归一化图像。CV_CALIB_CB_FILTER_QUADS ,一旦二值化图像以后,算法试图根据棋盘上黑色方块的投影视场中定位四边形。这是一个逼近的过程,因为四边形的每个边都假设为直角,而实际上由于图像的径向畸变,这个不完全成立。如果这个标志被设置了,那么将对这个四边形使用额外的约束以拒绝错误的四边形。函数成功找到所有的角点,则返回非0,否则返回0。 |
|
备注 |
|
功能说明 |
cvFindChessboardCorners()发现所有角点绘制到所提供的图像上 |
|
函数声明 |
cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, CvPoint2D32f* corners, int count, int pattern_was_found ) |
|
参数说明 |
In/out |
image,欲绘制的图像,该图像为8位的彩色图像。 pattern_size, 图像尺寸 |
in |
||
in |
corners。角点矩阵 |
|
in |
count,角点数目 |
|
in |
pattern_was_found,表示是否所有的棋盘模式都被成功找到,这可以设置为cvFindChessboardCorners()函数的返回值。
|
|
备注 |
如果没有发现所有的角点,那么角点将使用红色圆圈绘制。如果发现了所有的角点,那么角点将用不同颜色绘制(每行使用单独的颜色绘制),并且把角点以一定顺序用线连接起来 |
功能说明 |
在角点检测中精确化角点位置 |
|
函数声明 |
void cornerSubPix( InputArray image, InputOutputArray corners, Size winSize, Size zeroZone,TermCriteria criteria ) |
|
参数说明 |
In |
image:输入图像 |
In/out |
corners:输入角点的初始坐标以及精准化后的坐标用于输 |
|
in |
出。winSize:搜索窗口边长的一半,例如如果winSize=Size(5,5),则一个大小为\的搜索窗口将被使用。 |
|
in |
ZeroZone: 不清楚,一般默认为(-1,-1) |
|
in |
Critrria: 插值方式 |
|
备注 |
|
功能说明 |
双目摄像机标定,计算了两个摄像头进行立体像对之间的转换关系,根据左右相机的参数矩阵,生成两个相机之间的关系矩阵,以及基本和本质矩阵 |
|
函数声明 |
double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC ) |
|
主要参数说明 |
in |
objectPoints–校正的图像点向量组. |
in |
imagePoints1–通过第一台相机观测到的图像上面的向量组. |
|
in |
imagePoints2–通过第二台相机观测到的图像上面的向量组. |
|
in |
cameraMatrix1–输入或者输出第一个相机的内参数矩阵如果CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO , CV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH其中之一参数被指定, 所有或者部分的矩阵参数就必须被初始化. 详细内容则查看FLAGS的描述。 |
|
In/out |
distCoeffs1–输入/输出第一个相机的畸变系数向量 其中的4,5或者8个元素. 输出向量的长度则是取决于FLAGS的值。 |
|
In/out |
cameraMatrix2–输入或者输出第二个相机的内参数矩阵 |
|
In/out |
distCoeffs2–输入/输出第二个相机的畸变系数向量 |
|
in |
imageSize–图像文件的大小——只用于初始化相机内参数矩阵。 |
|
out |
R–输出第一和第二相机坐标系之间的旋转矩阵。 |
|
out |
T–输出第一和第二相机坐标系之间的旋转矩阵平移向量 |
|
out |
E–输出本征矩阵。 |
|
out |
F–输出基础矩阵。 |
|
in |
term_crit–迭代优化算法终止的标准。 |
|
in |
flags– 不同的FLAG,可能是零或以下值的结合: § CV_CALIB_FIX_INTRINSIC要确认cameraMatrix? and distCoeffs?所以只有R, T, E , 和F矩阵被估计出来 § CV_CALIB_USE_INTRINSIC_GUESS根据指定的FLAG优化一些或全部的内在参数。初始值是由用户提供。 § CV_CALIB_FIX_PRINCIPAL_POINT在优化过程中确定主点。 § CV_CALIB_FIX_FOCAL_LENGTH确定和 . § CV_CALIB_FIX_ASPECT_RATIO优化 . 确定的比值. § CV_CALIB_SAME_FOCAL_LENGTH执行以及 . § CV_CALIB_ZERO_TANGENT_DIST设置每个相机切向畸变系数为零并且设为固定值。 § CV_CALIB_FIX_K1,...,CV_CALIB_FIX_K6在优化中不改变相应的径向畸变系数. 如果设置CV_CALIB_USE_I NTRINSIC_GUESS , 使用distCoeffs矩阵提供的系数。否则将其置零. § CV_CALIB_RATIONAL_MODEL能够输出系数k4,k5,k6。提供向后兼容性,这额外FLAG应该明确指定校正函数使用理性模型和返回8个系数。如果FLAG没有被设置,该函数计算并只返回5畸变系数。 |
|
备注 |
如果你有一个立体相机的相对位置,并且两个摄像头的方向是固定的,以及你计算了物体相对于第一照相机和第二照相机的姿态,(R1,T1)和(R2,T2),各自(这个可以通过solvepnp()做到)通过这些姿态确定。这意味着,给定予(:数学:t_1),就可以计算(:数学:t_2)。你只需要知道第二相机相对于第一相机的位置和方向。 除了立体的相关信息,该函数也可以两个相机的每一个做一个完整的校准。然而,由于在输入数据中的高维的参数空间和噪声的,可能偏离正确值。如果每个单独的相机内参数可以被精确估计(例如,使用calibratecamera()),建议您这样做,然后在本征参数计算之中使用CV_CALIB_FIX_INTRINSIC的功能。否则,如果一旦计算出所有的参数,它将会合理的限制某些参数,例如,传递CV_CALIB_SAME_FOCAL_LENGTH and CV_CALIB_ZERO_TANGENT_DIST,这通常是一个合理的假设。 与calibratecamera()类似,该函数最大限度地减少了从相机中所有可用的视图的所有点的总的重投影误差。该函数返回的重投影误差最终值。
|
功能说明 |
利用定标来计算摄像机的内参数和外参数 |
|
|
函数声明 |
oid cvCalibrateCamera2( CvMat* object_points, CvMat* image_points, int* point_counts, CvSize image_size, CvMat* intrinsic_matrix, CvMat* distortion_coeffs, CvMat* rotation_vectors = NULL, CvMat* translation_vectors = NULL, int flags = 0 ); |
|
|
参数说明 |
|
object_points,是一个N×3的矩阵,如果对于每一个棋盘,我们有k个角点,并且我们通过旋转棋盘,得到棋盘的M的视场图,那么此时N=k×M。 在使用棋盘的场合,我们另点z的坐标值为0,而x,y坐标用里面来度量,选用英寸单位,那么所有参数计算的结果也是用英寸表示。最简单的方式是我们定义棋盘的每一个方块为一个单位。 |
|
|
image_points,是一个N×2的矩阵。包含object_points所提供的所有点的坐标。在图像中寻找到的角点的坐标。 |
||
|
point_counts,每个图像上角点的个数,以M×1矩阵形式提供,M是视场的数目 |
||
|
image_size,图像的大小,以像素为衡量单位。 |
||
|
intrinsic_matrix,摄像机内参数矩阵3×3大小。可以作为输入变量(此时会影响计算的结果),可以作为输出变量(本来主要就是为了求解该参数),内参数矩阵完全定义了理想摄像机模型的摄像机行为。 |
||
|
distortion_matrix,畸变系数,为5×1大小的矢量,可以作为输入变量,可以作为输出变量(同上),畸变系数的记录顺序是:k1,k2,p1,p2,k3; |
||
|
rotation_vectors,大小为M×3,M为视场的个数。旋转矢量的讲解参考上一篇文章。可以通过cvRodrigues2()将旋转矢量转换为旋转矩阵。 |
||
|
translation_vectors,大小为M×3矩阵。 函数中的标志位: 1、CV_CALIB_USE_INTRINSIC_GUESS cvCalibrateCamera2()计算内参数矩阵的时候,通常不需要额外的信息。具体来说,参数cx和cy(图像中心)的初始值可以直接从变量image_size中得到(即(H-1)/2,(W-1)/2)),如果设置了该变量那么instrinsic_matrix假设包含正确的值,并被用作初始猜测,为cvCalibrateCamera2()做优化时所用。 2、CV_CALIB_USE_PRINCIPAL_POINT 可以和CV_CALIB_USE_INTRINSIC_GUESS 一起使用。如果仅设置了该标志位,那么设置图像的中心作为主点。如果一起使用,则设置主点位置为intrinsic_matrix矩阵提供的初始值。(这也是为什么,instrinsic_matrix不仅可以作为输出,也可以作为输入的原因了) 3、CV_CALIB_FIX_ASPECT_RATIO 如果设置了该标志位,那么在调用标定程序时,优化过程只同时改变fx和fy,而固定intrinsic_matrix的其他值(如果 CV_CALIB_USE_INTRINSIC_GUESS也没有被设置,则intrinsic_matrix中的fx和fy可以为任何值,但比例相关)。 4、CV_CALIB_FIX_FOCAL_LENGTH 该标志位在优化的时候,直接使用 intrinsic_matrix传递过来的fx和fy。 5、CV_CALIB_FIX_K1,CV_CALIB_FIX_K2,CV_CALIB_FIX_K3 固定径向畸变k1,k2,k3。径向畸变参数可以通过组合这些标志设置为任意值。一般地最后一个参数应设置为0,初始使用鱼眼透镜。 6、CV_CALIB_ZERO_TANGENT_DIST 该标志在标定高级摄像机的时候比较重要,因为精确制作将导致很小的径向畸变。试图将参数拟合0会导致噪声干扰和数值不稳定。通过设置该标志可以关闭切向畸变参数p1和p2的拟合,即设置两个参数为0。 |
||
|
|||
备注 |
功能说明 |
计算摄像机外部参数 |
|
函数声明 |
void cvFindExtrinsicCameraParams2( const CvMat* object_points, const CvMat* image_points, const CvMat* intrinsic_matrix, const CvMat* distortion_coeffs, CvMat* rotation_vector, CvMat* translation_vector ); |
|
参数说明 |
in |
const CvMat* object_points: 世界坐标系实际坐标矩阵 |
in |
const CvMat* image_points:图像坐标系坐标矩阵 |
|
in |
const CvMat* intrinsic_matrix:内参矩阵 |
|
in |
const CvMat* distortion_coeffs:畸变矩阵 |
|
out |
CvMat* rotation_vector: 转移矩阵 |
|
out |
CvMat* translation_vector :平移矩阵 |
|
备注 |
根据图像坐标点,世界坐标点,内参数矩阵和畸变矩阵一起计算外部外餐 |
功能说明 |
投影三维点到图像平面 |
|
函数声明 |
void cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, const CvMat* translation_vector, const CvMat* intrinsic_matrix, const CvMat* distortion_coeffs, CvMat* image_points, CvMat* dpdrot=NULL, CvMat* dpdt=NULL, CvMat* dpdf=NULL, CvMat* dpdc=NULL, CvMat* dpddist=NULL ); |
|
参数说明 |
in |
bject_points物体点的坐标,为3xN或者Nx3的矩阵 |
in |
rotation_vector 旋转向量,1x3或者3x1。 |
|
in |
translation_vector 平移向量,1x3或者3x1。 |
|
in |
intrinsic_matrix摄像机内参数矩阵 |
|
in |
distortion_coeffs形变参数向量,4x1或者1x4,为[k1,k2,p1,p2]。如果是NULL,所有形变系数都设为0。 |
|
out |
image_points输出数组,存储图像点坐标。大小为2xN或者Nx2,这儿N是视图中的所有点的数目。 |
|
in |
dpdrot旋转向量部分的图像上点导数,Nx3矩阵。 |
|
in |
dpdt平移向量部分的图像上点导数,Nx3矩阵。 |
|
in |
dpdf可选参数,关于fx和fy的图像上点的导数,Nx2矩阵。 |
|
in |
dpdc可选参数, cx和cy的图像上点的导数,Nx2矩阵。 |
|
in |
dpddist可选参数,关于形变系数的图像上点的导数,Nx4矩阵。 |
|
备注 |
函数cvProjectPoints2通过给定的内参数和外参数计算三维点投影到二维图像平面上的坐标。另外,这个函数可以计算关于投影参数的图像点偏导数的雅可比矩阵。雅可比矩阵可以用在cvCalibrateCamera2和cvFindExtrinsicCameraParams2函数的全局优化中。这个函数也可以用来计算内参数和外参数的反投影误差 |
c