opencv实现双目视觉测距

有个群193369905,相关毕设也可找群主,最近一直在研究双目视觉测距,资料真的特别多网上,有matlab 的,python的,C++的,但个人感觉都不详细,对于小白,特别不容易上手,在这里我提供一个傻瓜式教程吧,利用matlab来进行标注,图形界面,无须任何代码,然后利用C++实现测距与深度图,原理太多我就不提了,小白直接照做就OK
1、准备工作
硬件准备
https://item.taobao.com/item.htm?spm=a1z10.1-c-s.w4004-17093912817.2.6af681c0jaZTur&id=562773790704
摄像头一个(如图),淘宝连接
opencv实现双目视觉测距_第1张图片
软件准备
VS+opencv3.1
Matlab+toolbox标定工具箱
C++代码
Vs+opencv配置各位见这篇博客 https://www.cnblogs.com/linshuhe/p/5764394.html,讲解的够详细了,我们需要用VS+opencv3.1实现实时测距

2matlab标定****

matlab用于单目摄像头,双目摄像头的标定,这个我也已经做过了,各位直接参考这篇博客 http://blog.csdn.net/hyacinthkiss/article/details/41317087
各位准备好上面那些工具以后我们才能正式开始
我们通过matlab标定可以获得以下数据

opencv实现双目视觉测距_第2张图片

或者使用C++ 进行单双目标定

单目标定

#include 
#include 
#include 
#include 
#include 

#include 
#include 
#include 
#include 

using namespace cv;
using namespace std;
#define calibration

int main()
{
#ifdef calibration

	ifstream fin("right_img.txt");             /* 标定所用图像文件的路径 */
	ofstream fout("caliberation_result_right.txt");  /* 保存标定结果的文件 */

	// 读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
	int image_count = 0;  /* 图像数量 */
	Size image_size;      /* 图像的尺寸 */
	Size board_size = Size(11,8);             /* 标定板上每行、列的角点数 */
	vector<Point2f> image_points_buf;         /* 缓存每幅图像上检测到的角点 */
	vector<vector<Point2f>> image_points_seq; /* 保存检测到的所有角点 */
	string filename;      // 图片名
	vector<string> filenames;

	while (getline(fin, filename))
	{
		++image_count;
		Mat imageInput = imread(filename);
		filenames.push_back(filename);

		// 读入第一张图片时获取图片大小
		if (image_count == 1)
		{
			image_size.width = imageInput.cols;
			image_size.height = imageInput.rows;
		}

		/* 提取角点 */
		if (0 == findChessboardCorners(imageInput, board_size, image_points_buf))
		{
			//cout << "can not find chessboard corners!\n";  // 找不到角点
			cout << "**" << filename << "** can not find chessboard corners!\n";
			exit(1);
		}
		else
		{
			Mat view_gray;
			cvtColor(imageInput, view_gray, CV_RGB2GRAY);  // 转灰度图

			/* 亚像素精确化 */
			// image_points_buf 初始的角点坐标向量,同时作为亚像素坐标位置的输出
			// Size(5,5) 搜索窗口大小
			// (-1,-1)表示没有死区
			// TermCriteria 角点的迭代过程的终止条件, 可以为迭代次数和角点精度两者的组合
			cornerSubPix(view_gray, image_points_buf, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

			image_points_seq.push_back(image_points_buf);  // 保存亚像素角点

			/* 在图像上显示角点位置 */
			drawChessboardCorners(view_gray, board_size, image_points_buf, false); // 用于在图片中标记角点

			imshow("Camera Calibration", view_gray);       // 显示图片

			waitKey(500); //暂停0.5S      
		}
	}
	int CornerNum = board_size.width * board_size.height;  // 每张图片上总的角点数

	//-------------以下是摄像机标定------------------

	/*棋盘三维信息*/
	Size square_size = Size(60, 60);         /* 实际测量得到的标定板上每个棋盘格的大小 */
	vector<vector<Point3f>> object_points;   /* 保存标定板上角点的三维坐标 */

	/*内外参数*/
	Mat cameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0));  /* 摄像机内参数矩阵 */
	vector<int> point_counts;   // 每幅图像中角点的数量
	Mat distCoeffs = Mat(1, 5, CV_32FC1, Scalar::all(0));       /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
	vector<Mat> tvecsMat;      /* 每幅图像的旋转向量 */
	vector<Mat> rvecsMat;      /* 每幅图像的平移向量 */

	/* 初始化标定板上角点的三维坐标 */
	int i, j, t;
	for (t = 0; t<image_count; t++)
	{
		vector<Point3f> tempPointSet;
		for (i = 0; i<board_size.height; i++)
		{
			for (j = 0; j<board_size.width; j++)
			{
				Point3f realPoint;

				/* 假设标定板放在世界坐标系中z=0的平面上 */
				realPoint.x = i * square_size.width;
				realPoint.y = j * square_size.height;
				realPoint.z = 0;
				tempPointSet.push_back(realPoint);
			}
		}
		object_points.push_back(tempPointSet);
	}

	/* 初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板 */
	for (i = 0; i<image_count; i++)
	{
		point_counts.push_back(board_size.width * board_size.height);
	}

	/* 开始标定 */
	// object_points 世界坐标系中的角点的三维坐标
	// image_points_seq 每一个内角点对应的图像坐标点
	// image_size 图像的像素尺寸大小
	// cameraMatrix 输出,内参矩阵
	// distCoeffs 输出,畸变系数
	// rvecsMat 输出,旋转向量
	// tvecsMat 输出,位移向量
	// 0 标定时所采用的算法
	calibrateCamera(object_points, image_points_seq, image_size, cameraMatrix, distCoeffs, rvecsMat, tvecsMat, 0);

	//------------------------标定完成------------------------------------

	// -------------------对标定结果进行评价------------------------------

	double total_err = 0.0;         /* 所有图像的平均误差的总和 */
	double err = 0.0;               /* 每幅图像的平均误差 */
	vector<Point2f> image_points2;  /* 保存重新计算得到的投影点 */
	fout << "每幅图像的标定误差:\n";

	for (i = 0; i<image_count; i++)
	{
		vector<Point3f> tempPointSet = object_points[i];

		/* 通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点 */
		projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, image_points2);

		/* 计算新的投影点和旧的投影点之间的误差*/
		vector<Point2f> tempImagePoint = image_points_seq[i];
		Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
		Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);

		for (int j = 0; j < tempImagePoint.size(); j++)
		{
			image_points2Mat.at<Vec2f>(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);
			tempImagePointMat.at<Vec2f>(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
		}
		err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
		total_err += err /= point_counts[i];
		fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
	}
	fout << "总体平均误差:" << total_err / image_count << "像素" << endl << endl;

	//-------------------------评价完成---------------------------------------------

	//-----------------------保存定标结果------------------------------------------- 
	Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0));  /* 保存每幅图像的旋转矩阵 */
	fout << "相机内参数矩阵:" << endl;
	fout << cameraMatrix << endl << endl;
	fout << "畸变系数:\n";
	fout << distCoeffs << endl << endl << endl;
	for (int i = 0; i<image_count; i++)
	{
		fout << "第" << i + 1 << "幅图像的旋转向量:" << endl;
		fout << tvecsMat[i] << endl;

		/* 将旋转向量转换为相对应的旋转矩阵 */
		Rodrigues(tvecsMat[i], rotation_matrix);
		fout << "第" << i + 1 << "幅图像的旋转矩阵:" << endl;
		fout << rotation_matrix << endl;
		fout << "第" << i + 1 << "幅图像的平移向量:" << endl;
		fout << rvecsMat[i] << endl << endl;
	}
	fout << endl;

	//--------------------标定结果保存结束-------------------------------

	//----------------------显示定标结果--------------------------------

	Mat mapx = Mat(image_size, CV_32FC1);
	Mat mapy = Mat(image_size, CV_32FC1);
	Mat R = Mat::eye(3, 3, CV_32F);
	string imageFileName;
	std::stringstream StrStm;
	for (int i = 0; i != image_count; i++)
	{
		initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, image_size, CV_32FC1, mapx, mapy);
		Mat imageSource = imread(filenames[i]);
		Mat newimage = imageSource.clone();
		remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
		StrStm.clear();
		imageFileName.clear();
		StrStm << i + 1;
		StrStm >> imageFileName;
		imageFileName += "_d.jpg";
		imwrite(imageFileName, newimage);
	}

	fin.close();
	fout.close();

#else 
		/// 读取一副图片,不改变图片本身的颜色类型(该读取方式为DOS运行模式)
		Mat src = imread("F:\\lane_line_detection\\left_img\\1.jpg");
		Mat distortion = src.clone();
		Mat camera_matrix = Mat(3, 3, CV_32FC1);
		Mat distortion_coefficients;


		//导入相机内参和畸变系数矩阵
		FileStorage file_storage("F:\\lane_line_detection\\left_img\\Intrinsic.xml", FileStorage::READ);
		file_storage["CameraMatrix"] >> camera_matrix;
		file_storage["Dist"] >> distortion_coefficients;
		file_storage.release();

		//矫正
		cv::undistort(src, distortion, camera_matrix, distortion_coefficients);

		cv::imshow("img", src);
		cv::imshow("undistort", distortion);
		cv::imwrite("undistort.jpg", distortion);

		cv::waitKey(0);
#endif // DEBUG
	return 0;
}


双目标定

//双目相机标定 
#include 
#include 
#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include 
//#include 
//#include 

using namespace std;
using namespace cv;
//摄像头的分辨率
const int imageWidth = 640;
const int imageHeight = 480;
//横向的角点数目
const int boardWidth = 11;
//纵向的角点数目
const int boardHeight = 8;
//总的角点数目
const int boardCorner = boardWidth * boardHeight;
//相机标定时需要采用的图像帧数
const int frameNumber = 8;
//标定板黑白格子的大小 单位是mm
const int squareSize = 60;
//标定板的总内角点
const Size boardSize = Size(boardWidth, boardHeight);
Size imageSize = Size(imageWidth, imageHeight);

Mat R, T, E, F;
//R旋转矢量 T平移矢量 E本征矩阵 F基础矩阵
vector<Mat> rvecs; //R
vector<Mat> tvecs; //T
//左边摄像机所有照片角点的坐标集合
vector<vector<Point2f>> imagePointL;
//右边摄像机所有照片角点的坐标集合
vector<vector<Point2f>> imagePointR;
//各图像的角点的实际的物理坐标集合
vector<vector<Point3f>> objRealPoint;
//左边摄像机某一照片角点坐标集合
vector<Point2f> cornerL;
//右边摄像机某一照片角点坐标集合
vector<Point2f> cornerR;

Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;

Mat intrinsic;
Mat distortion_coeff;
//校正旋转矩阵R,投影矩阵P,重投影矩阵Q
Mat Rl, Rr, Pl, Pr, Q;
//映射表
Mat mapLx, mapLy, mapRx, mapRy;
Rect validROIL, validROIR;
//图像校正之后,会对图像进行裁剪,其中,validROI裁剪之后的区域
/*事先标定好的左相机的内参矩阵
fx 0 cx
0 fy cy
0  0  1
*/
Mat cameraMatrixL = (Mat_<double>(3,3) << 271.7792785637638, 0, 313.4559554347688,
	0, 271.9513066781816, 232.7561625477742,
	0, 0, 1);
//获得的畸变参数
Mat distCoeffL = (Mat_<double>(5,1) << -0.3271838086967946, 0.1326861805365006, -0.0008527407221595511, -0.0003398213328658643, -0.02847446149341753);
/*事先标定好的右相机的内参矩阵
fx 0 cx
0 fy cy
0  0  1
*/
Mat cameraMatrixR = (Mat_<double>(3,3) << 268.4990780091891, 0, 325.75156647688,
	0, 269.7906504513069, 212.5928387210573,
	0, 0, 1);
Mat distCoeffR = (Mat_<double>(5,1) << -0.321298212260166, 0.1215100334221875, -0.0007504391036193558, -1.732473939234179e-05, -0.02234659175488724);

/*计算标定板上模块的实际物理坐标*/
void calRealPoint(vector<vector<Point3f>>& obj, int boardWidth, int boardHeight, int imgNumber, int squareSize)
{
    vector<Point3f> imgpoint;
    for (int rowIndex = 0; rowIndex < boardHeight; rowIndex++)
    {
        for (int colIndex = 0; colIndex < boardWidth; colIndex++)
        {
            imgpoint.push_back(Point3f(rowIndex * squareSize, colIndex * squareSize, 0));
        }
    }
    for (int imgIndex = 0; imgIndex < imgNumber; imgIndex++)
    {
        obj.push_back(imgpoint);
    }
}



void outputCameraParam(void)
{
	/*保存数据*/
	/*输出数据*/
	FileStorage fs("intrisics.yml", FileStorage::WRITE);
	if (fs.isOpened())
	{
		fs << "cameraMatrixL" << cameraMatrixL << "cameraDistcoeffL" << distCoeffL << "cameraMatrixR" << cameraMatrixR << "cameraDistcoeffR" << distCoeffR;
		fs.release();
		cout << "cameraMatrixL=:" << cameraMatrixL << endl << "cameraDistcoeffL=:" << distCoeffL << endl << "cameraMatrixR=:" << cameraMatrixR << endl << "cameraDistcoeffR=:" << distCoeffR << endl;
	}
	else
	{
		cout << "Error: can not save the intrinsics!!!!" << endl;
	}

	fs.open("extrinsics.yml", FileStorage::WRITE);
	if (fs.isOpened())
	{
		fs << "R" << R << "T" << T << "Rl" << Rl << "Rr" << Rr << "Pl" << Pl << "Pr" << Pr << "Q" << Q;
		cout << "R=" << R << endl << "T=" << T << endl << "Rl=" << Rl << endl << "Rr" << Rr << endl << "Pl" << Pl << endl << "Pr" << Pr << endl << "Q" << Q << endl;
		fs.release();
	}
	else
	{
		cout << "Error: can not save the extrinsic parameters\n";
	}

}


int main(int argc, char* argv[])
{
    Mat img;
    int goodFrameCount = 0;
    while (goodFrameCount < frameNumber)
    {
        char filename[100];
        /*读取左边的图像*/
        sprintf(filename, "/home/crj/calibration/left_img/left%d.jpg", goodFrameCount + 1);
		
        rgbImageL = imread(filename, CV_LOAD_IMAGE_COLOR);
		imshow("chessboardL", rgbImageL);
        cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);
        /*读取右边的图像*/
        sprintf(filename, "/home/crj/calibration/right_img/right%d.jpg", goodFrameCount + 1);
        rgbImageR = imread(filename, CV_LOAD_IMAGE_COLOR);
        cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);

        bool isFindL, isFindR;
        isFindL = findChessboardCorners(rgbImageL, boardSize, cornerL);
        isFindR = findChessboardCorners(rgbImageR, boardSize, cornerR);
        if (isFindL == true && isFindR == true)
        {
            cornerSubPix(grayImageL, cornerL, Size(5,5), Size(-1,1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
            drawChessboardCorners(rgbImageL, boardSize, cornerL, isFindL);
            imshow("chessboardL", rgbImageL);
            imagePointL.push_back(cornerL);

            cornerSubPix(grayImageR, cornerR, Size(5,5), Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
            drawChessboardCorners(rgbImageR, boardSize, cornerR, isFindR);
            imshow("chessboardR", rgbImageR);
            imagePointR.push_back(cornerR);

            goodFrameCount++;
            cout << "the image" << goodFrameCount << " is good" << endl;
        }
        else
        {
            cout << "the image is bad please try again" << endl;
        }
        if (waitKey(10) == 'q')
        {
            break;
        }
    }

    //计算实际的校正点的三维坐标,根据实际标定格子的大小来设置
    calRealPoint(objRealPoint, boardWidth, boardHeight, frameNumber, squareSize);
    cout << "cal real successful" << endl;

    //标定摄像头
    double rms = stereoCalibrate(objRealPoint, imagePointL, imagePointR,
        cameraMatrixL, distCoeffL,
        cameraMatrixR, distCoeffR,
        Size(imageWidth, imageHeight), R, T, E, F, CALIB_USE_INTRINSIC_GUESS,
        TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, 1e-5));

    cout << "Stereo Calibration done with RMS error = " << rms << endl;

    stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, 
        Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY, -1, imageSize, &validROIL,&validROIR);
    

    //摄像机校正映射
    initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl, imageSize, CV_32FC1, mapLx, mapLy);
    initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);

    Mat rectifyImageL, rectifyImageR;
    cvtColor(grayImageL, rectifyImageL, CV_GRAY2BGR);
    cvtColor(grayImageR, rectifyImageR, CV_GRAY2BGR);

    imshow("Recitify Before", rectifyImageL);
    cout << "按Q1退出..." << endl;
    //经过remap之后,左右相机的图像已经共面并且行对准了
    Mat rectifyImageL2, rectifyImageR2;
    remap(rectifyImageL, rectifyImageL2, mapLx, mapLy, INTER_LINEAR);
    remap(rectifyImageR, rectifyImageR2, mapRx, mapRy, INTER_LINEAR);
    cout << "按Q2退出..." << endl;

    imshow("rectifyImageL", rectifyImageL2);
    imshow("rectifyImageR", rectifyImageR2);

    outputCameraParam();

    //显示校正结果
    Mat canvas;
    double sf;
    int w,h;
    sf = 600. / MAX(imageSize.width, imageSize.height);
    w = cvRound(imageSize.width * sf);
    h = cvRound(imageSize.height * sf);
    canvas.create(h, w*2, CV_8UC3);

    //左图像画到画布上
    Mat canvasPart = canvas(Rect(0, 0, w, h));
    resize(rectifyImageL2, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
    Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),
        cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
    rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);

    cout << "Painted ImageL" << endl;

    //右图像画到画布上
    canvasPart = canvas(Rect(w, 0, w, h));
    resize(rectifyImageR2, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
    Rect vroiR(cvRound(validROIR.x*sf), cvRound(validROIR.y*sf),
        cvRound(validROIR.width*sf), cvRound(validROIR.height*sf));
    rectangle(canvasPart, vroiR, Scalar(0, 255, 0), 3, 8);

    cout << "Painted ImageR" << endl;

    //画上对应的线条
    for (int i = 0; i < canvas.rows; i += 16)
        line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
    
    imshow("rectified", canvas);
    
    cout << "wait key" << endl;
    waitKey(0);
    return 0;
}

**3 **C++与opencv实现测距 ****
我们把matlab数据填写到下列代码中,各位切记不要填写错误,以免导致出现离奇的数据,我已经注释的非常清楚了;

/*
事先标定好的相机的参数
fx 0 cx
0 fy cy
0 0  1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 682.55880, 0, 384.13666,
    0, 682.24569, 311.19558,
    0, 0, 1);
//对应matlab里的左相机标定矩阵
Mat distCoeffL = (Mat_<double>(5, 1) << -0.51614, 0.36098, 0.00523, -0.00225, 0.00000);
//对应Matlab所得左i相机畸变参数

Mat cameraMatrixR = (Mat_<double>(3, 3) << 685.03817, 0, 397.39092,
    0, 682.54282, 272.04875,
    0, 0, 1);
//对应matlab里的右相机标定矩阵

Mat distCoeffR = (Mat_<double>(5, 1) << -0.46640, 0.22148, 0.00947, -0.00242, 0.00000);
//对应Matlab所得右相机畸变参数

Mat T = (Mat_<double>(3, 1) << -61.34485, 2.89570, -4.76870);//T平移向量
                                                    //对应Matlab所得T参数
Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//rec旋转向量,对应matlab om参数
Mat R;//R 旋转矩阵

4、完整代码 (我这里以上述摄像头拍摄的两张图来作为测距,同样可修改作为视频的实时测距,一定要将图片拷贝到你的工程目录下)


/******************************/
/*        立体匹配和测距        */
/******************************/

#include   
#include   

using namespace std;
using namespace cv;

const int imageWidth = 800;                             //摄像头的分辨率  
const int imageHeight = 600;
Size imageSize = Size(imageWidth, imageHeight);

Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;

Rect validROIL;//图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域  
Rect validROIR;

Mat mapLx, mapLy, mapRx, mapRy;     //映射表  
Mat Rl, Rr, Pl, Pr, Q;              //校正旋转矩阵R,投影矩阵P 重投影矩阵Q
Mat xyz;              //三维坐标

Point origin;         //鼠标按下的起始点
Rect selection;      //定义矩形选框
bool selectObject = false;    //是否选择对象

int blockSize = 0, uniquenessRatio = 0, numDisparities = 0;
Ptr<StereoBM> bm = StereoBM::create(16, 9);

/*
事先标定好的相机的参数
fx 0 cx
0 fy cy
0 0  1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 682.55880, 0, 384.13666,
	0, 682.24569, 311.19558,
	0, 0, 1);
//对应matlab里的左相机标定矩阵
Mat distCoeffL = (Mat_<double>(5, 1) << -0.51614, 0.36098, 0.00523, -0.00225, 0.00000);
//对应Matlab所得左i相机畸变参数

Mat cameraMatrixR = (Mat_<double>(3, 3) << 685.03817, 0, 397.39092,
	0, 682.54282, 272.04875,
	0, 0, 1);
//对应matlab里的右相机标定矩阵

Mat distCoeffR = (Mat_<double>(5, 1) << -0.46640, 0.22148, 0.00947, -0.00242, 0.00000);
//对应Matlab所得右相机畸变参数

Mat T = (Mat_<double>(3, 1) << -61.34485, 2.89570, -4.76870);//T平移向量
                                                    //对应Matlab所得T参数
Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//rec旋转向量,对应matlab om参数
Mat R;//R 旋转矩阵


	  /*****立体匹配*****/
void stereo_match(int, void*)
{
	bm->setBlockSize(2 * blockSize + 5);     //SAD窗口大小,5~21之间为宜
	bm->setROI1(validROIL);
	bm->setROI2(validROIR);
	bm->setPreFilterCap(31);
	bm->setMinDisparity(0);  //最小视差,默认值为0, 可以是负值,int型
	bm->setNumDisparities(numDisparities * 16 + 16);//视差窗口,即最大视差值与最小视差值之差,窗口大小必须是16的整数倍,int型
	bm->setTextureThreshold(10);
	bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatio主要可以防止误匹配
	bm->setSpeckleWindowSize(100);
	bm->setSpeckleRange(32);
	bm->setDisp12MaxDiff(-1);
	Mat disp, disp8;
	bm->compute(rectifyImageL, rectifyImageR, disp);//输入图像必须为灰度图
	disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//计算出的视差是CV_16S格式
	reprojectImageTo3D(disp, xyz, Q, true); //在实际求距离时,ReprojectTo3D出来的X / W, Y / W, Z / W都要乘以16(也就是W除以16),才能得到正确的三维坐标信息。
	xyz = xyz * 16;
	imshow("disparity", disp8);
}

/*****描述:鼠标操作回调*****/
static void onMouse(int event, int x, int y, int, void*)
{
	if (selectObject)
	{
		selection.x = MIN(x, origin.x);
		selection.y = MIN(y, origin.y);
		selection.width = std::abs(x - origin.x);
		selection.height = std::abs(y - origin.y);
	}

	switch (event)
	{
	case EVENT_LBUTTONDOWN:   //鼠标左按钮按下的事件
		origin = Point(x, y);
		selection = Rect(x, y, 0, 0);
		selectObject = true;
		cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
		break;
	case EVENT_LBUTTONUP:    //鼠标左按钮释放的事件
		selectObject = false;
		if (selection.width > 0 && selection.height > 0)
			break;
	}
}


/*****主函数*****/
int main()
{
	/*
	立体校正
	*/
	Rodrigues(rec, R); //Rodrigues变换
	stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
		0, imageSize, &validROIL, &validROIR);
	initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
	initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);

	/*
	读取图片
	*/
	rgbImageL = imread("left.bmp", CV_LOAD_IMAGE_COLOR);
	cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);
	rgbImageR = imread("right.bmp", CV_LOAD_IMAGE_COLOR);
	cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);

	imshow("ImageL Before Rectify", grayImageL);
	imshow("ImageR Before Rectify", grayImageR);

	/*
	经过remap之后,左右相机的图像已经共面并且行对准了
	*/
	remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
	remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);

	/*
	把校正结果显示出来
	*/
	Mat rgbRectifyImageL, rgbRectifyImageR;
	cvtColor(rectifyImageL, rgbRectifyImageL, CV_GRAY2BGR);  //伪彩色图
	cvtColor(rectifyImageR, rgbRectifyImageR, CV_GRAY2BGR);

	//单独显示
	//rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
	//rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
	imshow("ImageL After Rectify", rgbRectifyImageL);
	imshow("ImageR After Rectify", rgbRectifyImageR);

	//显示在同一张图上
	Mat canvas;
	double sf;
	int w, h;
	sf = 600. / MAX(imageSize.width, imageSize.height);
	w = cvRound(imageSize.width * sf);
	h = cvRound(imageSize.height * sf);
	canvas.create(h, w * 2, CV_8UC3);   //注意通道

										//左图像画到画布上
	Mat canvasPart = canvas(Rect(w * 0, 0, w, h));                                //得到画布的一部分  
	resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);     //把图像缩放到跟canvasPart一样大小  
	Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),                //获得被截取的区域    
		cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
	//rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);                      //画上一个矩形  
	cout << "Painted ImageL" << endl;

	//右图像画到画布上
	canvasPart = canvas(Rect(w, 0, w, h));                                      //获得画布的另一部分  
	resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
	Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
		cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
	//rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
	cout << "Painted ImageR" << endl;

	//画上对应的线条
	for (int i = 0; i < canvas.rows; i += 16)
		line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
	imshow("rectified", canvas);

	/*
	立体匹配
	*/
	namedWindow("disparity", CV_WINDOW_AUTOSIZE);
	// 创建SAD窗口 Trackbar
	createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
	// 创建视差唯一性百分比窗口 Trackbar
	createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
	// 创建视差窗口 Trackbar
	createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
	//鼠标响应函数setMouseCallback(窗口名称, 鼠标回调函数, 传给回调函数的参数,一般取0)
	setMouseCallback("disparity", onMouse, 0);
	stereo_match(0, 0);

	waitKey(0);
	return 0;
}

如果对此代码和标定过程,或者遇到什么困难,大家可发邮件到[email protected],或者加入到 193369905进行讨论,谢谢大家,原创不易,欢迎打赏
opencv实现双目视觉测距_第3张图片

你可能感兴趣的:(python-opencv,c++与opencv做图像处理,linux操作系统)