因为识别物体之后需要测距,然而带深度带RGB图的摄像头都好贵,所以入手了一个双目摄像头,长下面这个样子:
淘宝买的,只要35块钱…壳都没有,我是直接贴在电脑上的(请忽略杂乱的背景)
卖家把我拉进了个群,群里有开摄像头的方法,写的清清楚楚,包括标定和测距,所以问题不是很大。
摄像头开启后长这个样子:
代码也很短:(这个摄像头有一个脚本 .sh 用来开启,估计是一个类似于驱动的东西)
#include
#include
using namespace cv;
using namespace std;
int main()
{
VideoCapture Camera(1);
if (!Camera.isOpened())
{
cout << "Could not open the Camera " << endl;
return -1;
}
Mat Fream;
Camera >> Fream;
imshow("ALL",Fream);
Mat DoubleImage;
system("/home/cxm-irene/文档/Two-eye/Camera/camera.sh"); //此处改成你的脚本存放绝对路径
//imshow("【双目视图】",Fream);e
cout<<"here"<<endl;
while (true)
{
Camera >> Fream;
if (Fream.empty()) break;
resize(Fream, DoubleImage, Size(640, 240), (0, 0), (0, 0), INTER_AREA);
imwrite("【双目视图】.jpg",Fream);
imshow("ALL-Double", DoubleImage);
Mat LeftImage = DoubleImage(Rect(0, 0, 320, 240));
Mat RightImage = DoubleImage(Rect(320, 0, 320, 240));
imshow("LEFT", LeftImage);
imshow("RIGHT", RightImage);
char key = waitKey(30);
char c = cvWaitKey(30);
if (c == 27)//Esc键退出
{
break;
}
}
return 0;
}
下面将说一下标定和测距的过程,不涉及原理,后期有时间再写,主要贴代码及效果图
注意点:
1、拍摄数量建议30到40,开始的时候我只拍了10张,测距的时候发现数据怎么都不对
2、建议摄像头固定好,水平放置,标定板也就是棋盘不同角度不同距离多拍几次
如果标定完后测距的时候发现算出来的深度图和彩色图基本上对不上,那就建议重新标定,标定纸最好最好不要拿在手上,最好贴在一个硬的板子上
用左右摄像头拍摄照片后,将图片的绝对路径写进一个xml文件里,如下所示:
如果是自己写这个xml文件的话,末尾别忘了这两句:
附上标定的代码(无开摄像头拍照的内容):
#if 1
#include
#include
#include
#include
#include
#include
#include >
#include
#include
#include
#include
#include
//此处参数需要根据棋盘格个数修改
//例如 黑白棋盘格 宽(w)为10个棋盘格 那么 w 为 10 -1 = 9
#define w 9 //棋盘格宽的黑白交叉点个数
#define h 6 //棋盘格高的黑白交叉点个数
const float chessboardSquareSize = 24.6f; //每个棋盘格方块的边长 单位 为 mm
using namespace std;
using namespace cv;
//从 xml 文件中读取图片存储路径
static bool readStringList(const string& filename, vector<string>& list)
{
list.resize(0);
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
return false;
FileNode n = fs.getFirstTopLevelNode();
if (n.type() != FileNode::SEQ)
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for (; it != it_end; ++it)
list.push_back((string)*it);
return true;
}
//记录棋盘格角点个数
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
for (int i = 0; i < boardSize.height; i++) //height和width位置不能颠倒
for (int j = 0; j < boardSize.width; j++)
{
corners.push_back(Point3f(j*squareSize, i*squareSize, 0));
}
}
bool calibrate(Mat& intrMat, Mat& distCoeffs, vector<vector<Point2f>>& imagePoints,
vector<vector<Point3f>>& ObjectPoints, Size& imageSize, const int cameraId,
vector<string> imageList)
{
double rms = 0; //重投影误差
Size boardSize;
boardSize.width = w;
boardSize.height = h;
vector<Point2f> pointBuf;
float squareSize = chessboardSquareSize;
vector<Mat> rvecs, tvecs; //定义两个摄像头的旋转矩阵 和平移向量
bool ok = false;
int nImages = (int)imageList.size() / 2;
cout <<"图片张数"<< nImages;
namedWindow("View", 1);
int nums = 0; //有效棋盘格图片张数
for (int i = 0; i< nImages; i++)
{
Mat view, viewGray;
cout<<"Now: "<<imageList[i * 2 + cameraId]<<endl;
view = imread(imageList[i * 2 + cameraId], 1); //读取图片
imageSize = view.size();
cvtColor(view, viewGray, COLOR_BGR2GRAY); //转化成灰度图
bool found = findChessboardCorners(view, boardSize, pointBuf,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);//寻找棋盘格角点
if (found)
{
nums++;
cornerSubPix(viewGray, pointBuf, Size(11, 11),
Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(view, boardSize, Mat(pointBuf), found);
bitwise_not(view, view);
imagePoints.push_back(pointBuf);
cout << '.';
}
else{
cout<<"Wrong"<<endl;
}
imshow("View", view);
waitKey(100);
}
cout << "有效棋盘格张数" << nums << endl;
//calculate chessboardCorners
calcChessboardCorners(boardSize, squareSize, ObjectPoints[0]);
ObjectPoints.resize(imagePoints.size(), ObjectPoints[0]);
rms = calibrateCamera(ObjectPoints, imagePoints, imageSize, intrMat, distCoeffs,
rvecs, tvecs);
ok = checkRange(intrMat) && checkRange(distCoeffs);
if (ok)
{
cout << "done with RMS error=" << rms << endl;
return true;
}
else
return false;
}
int main()
{
//initialize some parameters
bool okcalib = false;
Mat intrMatFirst, intrMatSec, distCoeffsFirst, distCoffesSec;
Mat R, T, E, F, RFirst, RSec, PFirst, PSec, Q;
vector<vector<Point2f>> imagePointsFirst, imagePointsSec;
vector<vector<Point3f>> ObjectPoints(1);
Rect validRoi[2];
Size imageSize;
int cameraIdFirst = 0, cameraIdSec = 1;
double rms = 0;
//get pictures and calibrate
vector<string> imageList;
string filename = "stereo_calibration.xml";
bool okread = readStringList(filename, imageList);
if (!okread || imageList.empty())
{
cout << "can not open " << filename << " or the string list is empty" << endl;
return false;
}
if (imageList.size() % 2 != 0)
{
cout << "Error: the image list contains odd (non-even) number of elements\n";
return false;
}
FileStorage fs("intrinsics.yml", FileStorage::WRITE);
//calibrate
cout << "calibrate left camera..." << endl;
okcalib = calibrate(intrMatFirst, distCoeffsFirst, imagePointsFirst, ObjectPoints,
imageSize, cameraIdFirst, imageList);
if (!okcalib)
{
cout << "fail to calibrate left camera" << endl;
return -1;
}
else
{
cout << "calibrate the right camera..." << endl;
}
okcalib = calibrate(intrMatSec, distCoffesSec, imagePointsSec, ObjectPoints,
imageSize, cameraIdSec, imageList);
fs << "M1" << intrMatFirst << "D1" << distCoeffsFirst <<
"M2" << intrMatSec << "D2" << distCoffesSec;
if (!okcalib)
{
cout << "fail to calibrate the right camera" << endl;
return -1;
}
destroyAllWindows();
//estimate position and orientation
cout << "estimate position and orientation of the second camera" << endl
<< "relative to the first camera..." << endl;
cout << "intrMatFirst:";
cout << intrMatFirst << endl;
cout << "distCoeffsFirst:";
cout << distCoeffsFirst << endl;
cout << "intrMatSec:";
cout << intrMatSec << endl;
cout << "distCoffesSec:";
cout << distCoffesSec << endl;
rms = stereoCalibrate(ObjectPoints, imagePointsFirst, imagePointsSec,
intrMatFirst, distCoeffsFirst, intrMatSec, distCoffesSec,
imageSize, R, T, E, F, CALIB_USE_INTRINSIC_GUESS,//CV_CALIB_FIX_INTRINSIC,
TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, 1e-6)); //计算重投影误差
cout << "done with RMS error=" << rms << endl;
//stereo rectify
cout << "stereo rectify..." << endl;
stereoRectify(intrMatFirst, distCoeffsFirst, intrMatSec, distCoffesSec, imageSize, R, T, RFirst,
RSec, PFirst, PSec, Q, CALIB_ZERO_DISPARITY, -1, imageSize, &validRoi[0], &validRoi[1]);
cout << "Q" << Q << endl;
cout << "P1" << PFirst << endl;
cout << "P2" << PSec << endl;
//read pictures for 3d-reconstruction
if (fs.isOpened())
{
cout << "in";
fs << "R" << R << "T" << T << "R1" << RFirst << "R2" << RSec << "P1" << PFirst << "P2" << PSec << "Q" << Q;
fs.release();
}
namedWindow("canvas", 1);
cout << "read the picture for 3d-reconstruction..."<<endl;;
Mat canvas(imageSize.height, imageSize.width * 2, CV_8UC3), viewLeft, viewRight;
Mat canLeft = canvas(Rect(0, 0, imageSize.width, imageSize.height));
Mat canRight = canvas(Rect(imageSize.width, 0, imageSize.width, imageSize.height));
viewLeft = imread(imageList[6], 1);//cameraIdFirst
viewRight = imread(imageList[7], 1); //cameraIdSec
cout<<"Choose: "<<imageList[6]<<" "<<imageList[7]<<endl;
viewLeft.copyTo(canLeft);
viewRight.copyTo(canRight);
cout << "done" << endl;
imshow("canvas", canvas);
waitKey(1500); //必须要加waitKey ,否则可能存在无法显示图像问题
//stereoRectify
Mat rmapFirst[2], rmapSec[2], rviewFirst, rviewSec;
initUndistortRectifyMap(intrMatFirst, distCoeffsFirst, RFirst, PFirst,
imageSize, CV_16SC2, rmapFirst[0], rmapFirst[1]);//CV_16SC2
initUndistortRectifyMap(intrMatSec, distCoffesSec, RSec, PSec,//CV_16SC2
imageSize, CV_16SC2, rmapSec[0], rmapSec[1]);
remap(viewLeft, rviewFirst, rmapFirst[0], rmapFirst[1], INTER_LINEAR);
imshow("remap", rviewFirst);
waitKey(2000);
remap(viewRight, rviewSec, rmapSec[0], rmapSec[1], INTER_LINEAR);
rviewFirst.copyTo(canLeft);
rviewSec.copyTo(canRight);
//rectangle(canLeft, validRoi[0], Scalar(255, 0, 0), 3, 8);
//rectangle(canRight, validRoi[1], Scalar(255, 0, 0), 3, 8);
Mat before_rectify = imread("/home/cxm-irene/文档/Two-eye/Image-Collect/Picture/thumbnail_3.jpg");
for (int j = 0; j <= canvas.rows; j += 16) //画绿线
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
for (int j = 0; j <= canvas.rows; j += 16) //画绿线
line(before_rectify, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
cout << "stereo rectify done" << endl;
imshow("Before", before_rectify); //显示画绿线的校正后图像
imshow("After", canvas); //显示画绿线的校正前图像
waitKey(400000);//必须要加waitKey ,否则可能存在无法显示图像问题
//官方解释 http://masikkk.com/article/OpenCV-imshow-waitkey/
/* http://masikkk.com/article/OpenCV-imshow-waitkey/
A common mistake for OpenCV newcomers is to call cv::imshow() in a loop through video frames,
without following up each draw with cv::waitKey(30).In this case, nothing appears on screen,
because highgui is never given time to process the draw requests from cv::imshow().
*/
return 0;
}
#endif
需要改动的也就是几个路径以及摄像头的参数:
#define w 9 //棋盘格宽的黑白交叉点个数
#define h 6 //棋盘格高的黑白交叉点个数
const float chessboardSquareSize = 24.6f; //每个棋盘格方块的边长 单位 为 mm
直接运行就好
过程中会显示这样的图片:
这就表示这张照片是有效的。程序运行结束后,会生成一个 intrinsics.yml 文件,这个文件里保存了摄像头的参数:左右内参矩阵、左右畸变参数、平移向量、旋转向量,内容如下图所示:
得到了参数后,就可以进行测距了
分别用BM算法和SGBM算法进行尝试,所有参数的读取都是直接读的 intrinsics.yml 文件
代码:
#include
#include
#include
using namespace std;
using namespace cv;
const int imageWidth = 320; //摄像头的分辨率
const int imageHeight = 240;
Vec3f point3;
float d;
Size imageSize = Size(imageWidth, imageHeight);
Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;
Rect validROIL;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域
Rect validROIR;
Mat cameraMatrixL,distCoeffL,cameraMatrixR,distCoeffR,R,T;
Mat mapLx, mapLy, mapRx, mapRy; //映射表
Mat Rl, Rr, Pl, Pr, Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat xyz; //三维坐标
Point origin; //鼠标按下的起始点
Rect selection; //定义矩形选框
bool selectObject = false; //是否选择对象
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);
/*****立体匹配*****/
void stereo_match(int, void*)
{
bm->setBlockSize(2 * blockSize + 5); //SAD窗口大小,5~21之间为宜
bm->setROI1(validROIL);
bm->setROI2(validROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //最小视差,默认值为0, 可以是负值,int型
bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型 nt blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
Mat disp, disp8;
bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
xyz = xyz * 16;
imshow("disparity", disp8);
}
/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
}
switch (event)
{
case EVENT_LBUTTONDOWN: //鼠标左按钮按下的事件
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
//cout << origin << "in world coordinate is: " << xyz.at(origin) << endl;
point3 = xyz.at<Vec3f>(origin);
point3[0];
//cout << "point3[0]:" << point3[0] << "point3[1]:" << point3[1] << "point3[2]:" << point3[2]<
cout << "世界坐标:" << endl;
cout << "x: " << point3[0] << " y: " << point3[1] << " z: " << point3[2] << endl;
d = point3[0] * point3[0]+ point3[1] * point3[1]+ point3[2] * point3[2];
d = sqrt(d); //mm
// cout << "距离是:" << d << "mm" << endl;
d = d / 10.0; //cm
cout << "距离是:" << d << "cm" << endl;
// d = d/1000.0; //m
// cout << "距离是:" << d << "m" << endl;
break;
case EVENT_LBUTTONUP: //鼠标左按钮释放的事件
selectObject = false;
if (selection.width > 0 && selection.height > 0)
break;
}
}
/*****主函数*****/
int main()
{
FileStorage fs("/home/cxm-irene/文档/Two-eye/Check/build/intrinsics.yml", FileStorage::READ);
if (fs.isOpened())
{
cout << "read";
fs["M1"] >> cameraMatrixL;fs["D1"] >> distCoeffL;
fs["M2"] >> cameraMatrixR;fs["D2"] >> distCoeffR;
fs["R"] >> R;
fs["T"] >> T;
fs["Q"] >> Q;
cout << "M1-D1" << cameraMatrixL << endl << distCoeffL << endl;
cout << "M2-D2" << cameraMatrixL << endl << distCoeffL << endl;
cout << "R" << R <<endl;
cout << "T" << T <<endl;
cout << "Q" << Q <<endl;
fs.release();
}
/*
立体校正
*/
// Rodrigues(rec, R); //Rodrigues变换
stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
0, imageSize, &validROIL, &validROIR);
initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
/*
读取图片
*/
rgbImageL = imread("/home/cxm-irene/文档/Two-eye/Image-Collect/Picture/left_0.jpg", CV_LOAD_IMAGE_COLOR);
cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);
rgbImageR = imread("/home/cxm-irene/文档/Two-eye/Image-Collect/Picture/right_0.jpg", CV_LOAD_IMAGE_COLOR);
cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);
imshow("ImageL Before Rectify", grayImageL);
imshow("ImageR Before Rectify", grayImageR);
/*
经过remap之后,左右相机的图像已经共面并且行对准了
*/
remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
/*
把校正结果显示出来
*/
Mat rgbRectifyImageL, rgbRectifyImageR;
cvtColor(rectifyImageL, rgbRectifyImageL, CV_GRAY2BGR); //伪彩色图
cvtColor(rectifyImageR, rgbRectifyImageR, CV_GRAY2BGR);
//单独显示
//rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
//rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
imshow("ImageL After Rectify", rgbRectifyImageL);
imshow("ImageR After Rectify", rgbRectifyImageR);
//显示在同一张图上
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h, w * 2, CV_8UC3); //注意通道
//左图像画到画布上
Mat canvasPart = canvas(Rect(w * 0, 0, w, h)); //得到画布的一部分
resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); //把图像缩放到跟canvasPart一样大小
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf), //获得被截取的区域
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
//rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //画上一个矩形
cout << "Painted ImageL" << endl;
//右图像画到画布上
canvasPart = canvas(Rect(w, 0, w, h)); //获得画布的另一部分
resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
//rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
cout << "Painted ImageR" << endl;
//画上对应的线条
for (int i = 0; i < canvas.rows; i += 16)
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
/*
立体匹配
*/
namedWindow("disparity", CV_WINDOW_AUTOSIZE);
// 创建SAD窗口 Trackbar
createTrackbar("BlockSize:\n", "disparity", &blockSize, 16, stereo_match);
// 创建视差唯一性百分比窗口 Trackbar
createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
// 创建视差窗口 Trackbar
createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
//鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)
setMouseCallback("disparity", onMouse, 0);
stereo_match(0, 0);
waitKey(0);
return 0;
}
效果图如下,先放出BlockSize、UniquenessRatio、NumDisparities均为0的时候的样子:
因为识别的对象是人,所以测试的时候也就直接站人了,虽然结果图很模糊,但也能大致地看出是个人了,调参完后得到一个看上去还可以的结果:
点击人站的地方的某一处,终端会显示出距离,这个距离是距离摄像头的直线距离:
代码:
#if 1
#include
#include
using namespace std;
using namespace cv;
Mat rectifyImageL, rectifyImageR;
Rect validROIL; //图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域
Rect validROIR;
Mat Q;
Mat xyz; //三维坐标
Ptr<StereoBM> bm = StereoBM::create(16, 9);
int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
void stereo_match_sgbm(int, void*) //SGBM匹配算法
{
bm->setBlockSize(2 * blockSize + 5); //SAD窗口大小,5~21之间为宜
bm->setROI1(validROIL);
bm->setROI2(validROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //最小视差,默认值为0, 可以是负值,int型
bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
Mat disp, disp8;
bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
xyz = xyz * 16;
imshow("disparity", disp8);
}
Point origin; //鼠标按下的起始点
Rect selection; //定义矩形选框
bool selectObject = false; //是否选择对象
//--描述:鼠标操作回调--------------------------------------------------
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
}
switch (event)
{
case EVENT_LBUTTONDOWN: //鼠标左按钮按下的事件
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
break;
case EVENT_LBUTTONUP: //鼠标左按钮释放的事件
selectObject = false;
if (selection.width > 0 && selection.height > 0)
break;
}
}
int main()
{
Mat intrMatFirst, intrMatSec, distCoeffsFirst, distCoffesSec;
Mat R, T, E, F, RFirst, RSec, PFirst, PSec ;
Rect validRoi[2];
Mat viewLeft, viewRight;
viewLeft = imread("/home/cxm-irene/文档/Two-eye/Image-Collect/Picture/left_0.jpg", 1);//cameraIdLef 参数为相机采集的图像路径
viewRight = imread("/home/cxm-irene/文档/Two-eye/Image-Collect/Picture/right_0.jpg", 1); //cameraIRight
imshow("viewLeft", viewLeft);
waitKey(40);
cout << "done" << endl;
Size imageSize = viewLeft.size();
FileStorage fs("/home/cxm-irene/文档/Two-eye/Check/build/intrinsics.yml", FileStorage::READ);
if (fs.isOpened())
{
cout << "read";
fs["M1"] >> intrMatFirst;fs["D1"] >> distCoeffsFirst;
fs["M2"] >> intrMatSec;fs["D2"] >> distCoffesSec;
fs["R"] >> R;
fs["T"] >> T;
fs["Q"] >> Q;
cout << "M1" << intrMatFirst << endl << distCoeffsFirst;
fs.release();
}
cout << "stereo rectify..." << endl;
stereoRectify(intrMatFirst, distCoeffsFirst, intrMatSec, distCoffesSec, imageSize, R, T, RFirst,
RSec, PFirst, PSec, Q, CALIB_ZERO_DISPARITY, -1, imageSize, &validROIL, &validROIR);
//stereoRectify
Mat rmapFirst[2], rmapSec[2], rviewFirst, rviewSec;
initUndistortRectifyMap(intrMatFirst, distCoeffsFirst, RFirst, PFirst,
imageSize, CV_16SC2, rmapFirst[0], rmapFirst[1]);//CV_16SC2
initUndistortRectifyMap(intrMatSec, distCoffesSec, RSec, PSec,//CV_16SC2
imageSize, CV_16SC2, rmapSec[0], rmapSec[1]);
remap(viewLeft, rectifyImageL, rmapFirst[0], rmapFirst[1], INTER_LINEAR);
remap(viewRight, rectifyImageR, rmapSec[0], rmapSec[1], INTER_LINEAR);
cvtColor(rectifyImageL, rectifyImageL, CV_BGR2GRAY);
cvtColor(rectifyImageR, rectifyImageR, CV_BGR2GRAY);
imshow("remap_left", rectifyImageL);
imshow("remap_right", rectifyImageR);
//--显示结果-------------------------------------------------------------------------------------
namedWindow("disparity", WINDOW_NORMAL);
//--创建SAD窗口 Trackbar-------------------------------------------------------------------------
createTrackbar("BlockSize:\n", "disparity", &blockSize, 16, stereo_match_sgbm);
//--创建视差唯一性百分比窗口 Trackbar------------------------------------------------------------
createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match_sgbm);
//--创建视差窗口 Trackbar------------------------------------------------------------------------
createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 30, stereo_match_sgbm);
//--鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)------------
setMouseCallback("disparity", onMouse, 0);
stereo_match_sgbm(0, 0); //--【需要调整参数的位置5】,本行调用sgbm算法
waitKey(400000);//必须要加waitKey ,否则可能存在无法显示图像问题
while (1);
}
#endif
同样也给出结果图,其实参数啥的都差不多:
也是一样,点击某一处显示出距离:
不过和BM显示的不一样的是,这里是xyz的值,单位为mm
BM和SGBM测出来的距离都差不太多,主要我也忘了我站的位置了…
总而言之,标定很重要,得到的参数将直接决定视差图算距离的好坏,后期有时间再整理视差图算距离以及立体影像匹配的原理叭