Refractive three-dimensional reconstruction for underwater stereo digital image correlation论文代码

在这篇论文中,作者提出了双目相机水下三维重建会产生畸变的解决方法,该方法的最关键一步就是折射界面方程的重建,但是在作者给的源码中却没有该部分的代码。
我在折射平面上粘贴圆形标记,然后使用双目相机得到标记点的三维坐标,最后通过RANSAC算法来拟合平面方程。
折射平面方程的构建代码能够用来拟合任何平面方程,如果你需要使用双目相机来拟合平面得到平面方程,该部分的代码同样实用。
我将在下面给出折射界面方程构建的代码以及折射三维重建的代码
折射三维重建的代码你也能够在该论文中作者给出的源码中获得:https://github.com/ZL-Su/Matrice
本文给出的代码源码以及相关测试文件能够在我的github中获得:https://github.com/duanying98/Refractive-three-dimensional-reconstruction-for-underwater-stereo-digital-image-correlation

平面方程构建代码

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include
using namespace std;
using namespace cv;
#define PI acos(-1)


Size imageSize;//标定图像的分辨率
Mat cameraMatrix1;//左相机内参矩阵
Mat distCoeffs1;//左相机畸变系数矩阵
Mat cameraMatrix2;//右相机内参矩阵
Mat distCoeffs2;//右相机畸变系数矩阵
Mat R, T; //R 旋转矢量 T平移矢量
Mat R1, R2, P1, P2, Q; //校正旋转矩阵R,投影矩阵P 重投影矩阵Q 
Mat mapx1, mapy1, mapx2, mapy2; //映射表  
Rect validROI1, validROI2; //图像校正之后,会对图像进行裁剪,这里的validROI就是指裁剪之后的区域  
Mat imgLeft;//校正后的左图像
Mat imgRight;//校正后的右图像
#define cannythreshold 80

//标记点检测做了canny后的图像存储的位置
const string savePath = "E://bandzipDocuments//Circle//data_1_Circle//imgL_Circle.jpg";
const string savePath2 = "E://bandzipDocuments//Circle//data_1_Circle//imgR_Circle.jpg";

//相机配置文件
const string filename = "D://Projects//c++Projects//circleCoding//Calibration_Result.xml";

//左右相机拍摄的用于平面重建的图像
const string path1 = "E://bandzipDocuments//Circle//data_1//imgL.jpg";
const string path2 = "E://bandzipDocuments//Circle//data_1//imgR.jpg";

//是否展示立体校正后的结果图像
bool is_show_rectify_performance = false;


//y x    左右图像标记点匹配时,在y允许的匹配误差,在imgLeft.cols / x上允许的误差
int y = 10;
int x = 23;

//是否展示左右图像的标记点匹配的情况图片
bool is_show_stereo_match = false;


//ransac算法得到平面算法
int max_iter = 10000; //迭代次数
double ransac_threshold = 0.000005;


//平面方程参数或者直线方程参数
struct vec4d {
	double A;
	double B;
	double C;
	double D;
};


//读取相机参数
bool readFile(string filename)
{
	FileStorage fs(filename, FileStorage::READ);
	if (fs.isOpened())
	{
		fs["width"] >> imageSize.width;
		fs["height"] >> imageSize.height;
		fs["cameraMatrix1"] >> cameraMatrix1;
		fs["distCoeffs1"] >> distCoeffs1;
		fs["cameraMatrix2"] >> cameraMatrix2;
		fs["distCoeffs2"] >> distCoeffs2;
		fs["R"] >> R;
		fs["T"] >> T;
		fs.release();


		/*cout << "Succeed to read the Calibration result!!!" << endl;
		cout << "图片大小   " << imageSize.width << "  " << imageSize.height << endl;
		cout << "左相机内参矩阵:" << endl << cameraMatrix1 << endl;
		cout << "左相机外参矩阵:" << endl << distCoeffs1 << endl;
		cout << "右相机内参矩阵:" << endl << cameraMatrix2 << endl;
		cout << "右相机外参矩阵:" << endl << distCoeffs2 << endl;
		cout << "R:" << endl << R << endl;
		cout << "T:" << endl << T << endl;*/

		return true;
	}
	else
	{
		cerr << "Error: can not open the Calibration result file!!!!!" << endl;
		return false;
	}
}

//读取相机参数
bool readFile(string intrinsics, string extrinsics)
{
	FileStorage is(intrinsics, FileStorage::READ);
	FileStorage es(extrinsics, FileStorage::READ);
	if (is.isOpened() && es.isOpened())
	{
		is["M1"] >> cameraMatrix1;
		is["D1"] >> distCoeffs1;
		is["M2"] >> cameraMatrix2;
		is["D2"] >> distCoeffs2;
		es["R"] >> R;
		es["T"] >> T;
		is.release();
		es.release();

		cout << "Succeed to read the Calibration result!!!" << endl;
		cout << "左相机内参:" << endl << cameraMatrix1 << endl;
		cout << "左相机外参:" << endl << distCoeffs1 << endl;
		cout << "右相机内参:" << endl << cameraMatrix2 << endl;
		cout << "右相机外参:" << endl << distCoeffs2 << endl;
		cout << "R:" << endl << R << endl;
		cout << "T:" << endl << T << endl;
		return true;
	}
	else
	{
		cerr << "Error: can not open the Calibration result file!!!!!" << endl;
		return false;
	}
}

//输入左右图像,获得立体校正后的imgLeft,imgRight
void stereo_rectify(Mat grayimgLeft, Mat grayimgRight)
{
	/**************************************************************************************/
	/*
	要实现立体校正,使左右图像共平面且行对准,需要用到以下参数:
	cameraMatrix1, distCoeffs1, R1, P1
	cameraMatrix2, distCoeffs2, R2, P2
	 Bouguet方法
	这种方法称为“标定立体校正方法”,它是根据立体标定获得的内参矩阵、畸变系数、R和T作为
	输入,利用stereoRectify()函数得到R1、P1、R2、P2的值。
	*/
	/**************************************************************************************/
		//参数alpha的设置对结果影响很大
		//alpha:图像剪裁系数,取值范围是-1、0~1。
		//当取值为 0 时,OpenCV会对校正后的图像进行缩放和平移,使得remap图像只显示有效像素(即去除不规则的边角区域),适用于机器人避障导航等应用;
		//当alpha取值为1时,remap图像将显示所有原图像中包含的像素,该取值适用于畸变系数极少的高端摄像头;
		//alpha取值在0-1之间时,OpenCV按对应比例保留原图像的边角区域像素。
		//Alpha取值为-1时,OpenCV自动进行缩放和平移
	stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q,
		0, -1, imageSize, &validROI1, &validROI2);

	/*根据stereoRectify计算出来的R和P来计算图像的映射表mapx,mapy
	mapx,mapy这两个映射表接下来可以给remap()函数调用,来校正图像,使得两幅图像共面并且行对准
	initUndistortRectifyMap()的参数newCameraMatrix就是校正后的摄像机矩阵。
	在openCV里面,校正后的计算机矩阵Mrect是跟投影矩阵P一起返回的。
	所以我们在这里传入投影矩阵P,此函数可以从投影矩阵P中读出校正后的摄像机矩阵
	*/
	initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, imageSize, CV_16SC2, mapx1, mapy1);
	initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, imageSize, CV_16SC2, mapx2, mapy2);

	Mat medianBlurLeft, medianBlurRight;
	medianBlur(grayimgLeft, medianBlurLeft, 5);
	medianBlur(grayimgRight, medianBlurRight, 5);

	Mat gaussianBlurLeft, gaussianBlurRight;
	GaussianBlur(medianBlurLeft, gaussianBlurLeft, Size(5, 5), 0, 0);
	GaussianBlur(medianBlurRight, gaussianBlurRight, Size(5, 5), 0, 0);

	//经过remap之后,左右相机的图像已经共面并且行对准了 
	remap(gaussianBlurLeft, imgLeft, mapx1, mapy1, INTER_LINEAR);
	remap(gaussianBlurRight, imgRight, mapx2, mapy2, INTER_LINEAR);
}

//展示
void show_rectify_performance()
{
	/**********************************************************************************/
	/***************把左右图像的校正结果显示到同一画面上进行对比*********************/
	Mat canvas;
	double sf = 0.7;
	int w, h;
	w = cvRound(imageSize.width * sf);
	h = cvRound(imageSize.height * sf);
	canvas.create(h, w * 2, CV_8UC1);
	//左图像画到画布上
	//得到画布的左半部分 
	Mat canvasPart = canvas(Rect(w * 0, 0, w, h));
	//把图像缩放到跟canvasPart一样大小并映射到画布canvas的ROI区域中  
	resize(imgLeft, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
	//右图像画到画布上 
	canvasPart = canvas(Rect(w, 0, w, h));
	resize(imgRight, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
	//画上对应的线条
	for (int i = 0; i < canvas.rows; i += 16)
		line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
	imshow("rectified", canvas);
	cv::waitKey(0);
	/**********************************************************************************/
}

//输入立体校正后的灰度图,以及检测到的圆的保存路径, 返回找到的标记点的外接矩形的质心
vector<RotatedRect> findellipse(Mat LLL,string savePath)
{
	Mat src_img, dst_img;
	vector<RotatedRect> box2;
	src_img = LLL;
	//均值滤波
	blur(src_img, src_img, Size(3, 3));
	Canny(src_img, dst_img, cannythreshold, cannythreshold * 3);

	imwrite(savePath, dst_img.clone());

	vector<vector<Point>> contours;
	vector<Vec4i> hireachy;
	findContours(dst_img, contours, hireachy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1);
	for (unsigned int i = 0; i < contours.size(); i++)
	{

		double S1 = contourArea(contours[i]);
		if (S1>20)
		{
			Mat pointsf;
			Mat(contours[i]).convertTo(pointsf, CV_32F);
			RotatedRect box = fitEllipse(pointsf);
			if (box.size.width / box.size.height > 10 && box.size.width / box.size.height<0.1)
				continue;
			double S2 = box.size.width * box.size.height * PI / 4;
			if (S1 / S2 >0.95 && S1 / S2<1.1)
			{
				box2.push_back(box);
				circle(LLL, box.center, 2, Scalar(255));
				ellipse(LLL, box, Scalar(255), 1, 8);			
			}
		}
	}
	return  box2;
}

//Match_boxL Match_boxR 左右图像匹配的标记点的坐标
//boxL boxR  左右图像找到的标记点坐标
//y x    在y允许的匹配误差,在imgLeft.cols / x上允许的误差
//isShow   是否显示匹配结果
void stereo_match(vector<RotatedRect> &Match_boxL, vector<RotatedRect> &Match_boxR, vector<RotatedRect> boxL, vector<RotatedRect> boxR, int y,int x,bool isShow)
{
	int K = 0;
	for (int i = 0; i < boxL.size(); i++) {
		//cout << "当前左图像质心" << boxL[i].center << endl;
		for (int j = 0; j < boxR.size(); j++) {
			if (abs(boxL[i].center.y - boxR[j].center.y) < y && abs(boxL[i].center.x - boxR[j].center.x) < imgLeft.cols / x) {
				//cout << "Y接近的右图像" << boxR[j].center << endl;
				K++;
				Match_boxL.push_back(boxL[i]);
				Match_boxR.push_back(boxR[j]);
				break;
			}
		}
		//cout << "---------" << endl;
	}
	cout << "匹配到的点数" << K << endl;
	if (isShow) {
		Mat left_img = imread(savePath);
		Mat right_img = imread(savePath2);
		for (int i = 0; i < Match_boxL.size(); i++) {
			putText(left_img, to_string(i), Match_boxL[i].center, FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2, 8);
			putText(right_img, to_string(i), Match_boxR[i].center, FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2, 8);
		}
		imshow("left", left_img);
		imshow("right", right_img);
		cv::waitKey(0);
	}
}

//匹配到的左右图像标记点的坐标,返回三维坐标
vector<Point3d> getWord3d(vector<RotatedRect> Match_boxL, vector<RotatedRect> Match_boxR) {
	//计算视差图,左减右,如果为负的话赋值为-1,且后续计算舍弃该点
	vector<double> disparity(Match_boxL.size());
	for (int i = 0; i < Match_boxL.size(); i++) {
		if ((Match_boxL[i].center.x - Match_boxR[i].center.x) > 0) {
			disparity[i] = Match_boxL[i].center.x - Match_boxR[i].center.x;
		}
		else {
			disparity[i] = -1;
		}
	}
	//pix4d[u,v,d,1]  
	vector<Mat> pix4d(Match_boxL.size());
	//word4d[Xw,Yw,Zw,W]
	vector<Mat> word4d(Match_boxL.size());
	//世界坐标;
	vector<Point3d> word3d2(Match_boxL.size());
	int useNum = 0;
	for (int i = 0; i < Match_boxL.size(); i++) {
		pix4d[i].create(4, 1, CV_64FC1);
		word4d[i].create(4, 1, CV_64FC1);
		if (disparity[i] != -1) {
			pix4d[i].at<double>(0, 0) = Match_boxL[i].center.x;
			pix4d[i].at<double>(1, 0) = Match_boxL[i].center.y;
			pix4d[i].at<double>(2, 0) = disparity[i];
			pix4d[i].at<double>(3, 0) = 1;
			word4d[i] = Q * pix4d[i];
			word3d2[i].x = word4d[i].at<double>(0, 0) / word4d[i].at<double>(3, 0);
			word3d2[i].y = word4d[i].at<double>(1, 0) / word4d[i].at<double>(3, 0);
			word3d2[i].z = word4d[i].at<double>(2, 0) / word4d[i].at<double>(3, 0);
			useNum++;
		}
		else {
			word3d2[i].x = 0;
			word3d2[i].y = 0;
			word3d2[i].z = 0;
		}
	}
	cout << "实际可用点数" << useNum << endl;
	vector<Point3d> word3d(useNum);
	for (int i = 0, j = 0; j < Match_boxL.size();j++ ) {
		if (disparity[j] != -1) {
			word3d[i] = word3d2[j];
			i++;
		}
	}
	return word3d;
}



//基于RANSAC算法合成平面
vec4d ransac(vector<Point3d> &pts_3d, int max_iter, double threshold)
{
	vec4d plane;
	srand(time(0)); //随机种子
	int size_old = 3;
	double a_final, b_final, c_final, d_final; //平面法向量系数
	while (--max_iter) //设置循环的次数
	{
		vector<int> index;
		for (int k = 0; k < 3; ++k)
		{
			index.push_back(rand() % pts_3d.size()); //随机选取三个点 
		}
		auto idx = index.begin();
		double x1 = pts_3d.at(*idx).x, y1 = pts_3d.at(*idx).y, z1 = pts_3d.at(*idx).z;
		++idx;
		double x2 = pts_3d.at(*idx).x, y2 = pts_3d.at(*idx).y, z2 = pts_3d.at(*idx).z;
		++idx;
		double x3 = pts_3d.at(*idx).x, y3 = pts_3d.at(*idx).y, z3 = pts_3d.at(*idx).z;

		double a = (y3 - y1)*(z3 - z1) - (z2 - z1)*(y3 - y1);
		double b = (z2 - z1)*(x3 - x1) - (x2 - x1)*(z3 - z1);
		double c = (x2 - x1)*(y3 - y1) - (y2 - y1)*(x3 - x1);
		double d = -(a * x1 + b * y1 + c * z1);

		for (auto iter = pts_3d.begin(); iter != pts_3d.end(); ++iter)
		{
			double dis = fabs(a*iter->x + b * iter->y + c * iter->z + d) / sqrt(a*a + b * b + c * c);//点到平面的距离公式
			if (dis < threshold) {
				index.push_back(iter - pts_3d.begin());
			}
		}
		//更新集合
		if (index.size() > size_old)
		{
			size_old = index.size();
			a_final = a; b_final = b; c_final = c; d_final = d;
		}
		//index.clear();
	}
	//cout << a_final << " " << b_final << " " << c_final << " " << d_final << endl;
	plane.A = a_final;
	plane.B = b_final;
	plane.C = c_final;
	plane.D = d_final;
	return plane;
}




//获取平面
vec4d plane_reconstruction() {
	//--------------------------------------读取相机参数
	readFile(filename);


	//--------------------------------------立体校正
	Mat grayimgLeft = imread(path1, IMREAD_GRAYSCALE);
	Mat grayimgRight = imread(path2, IMREAD_GRAYSCALE);


	//得到校正后的imgLeft,imgRight
	stereo_rectify(grayimgLeft, grayimgRight);


	//展示
	if (is_show_rectify_performance)
		show_rectify_performance();



	//--------------------------------------找到标记点坐标
	vector<RotatedRect> boxL, boxR;
	boxL = findellipse(imgLeft, savePath);
	boxR = findellipse(imgRight, savePath2);
	cout << "找到的左图像标记点:" << boxL.size() << endl;
	cout << "找到的右图像标记点:" << boxR.size() << endl;


	//--------------------------------------标记点匹配
	vector<RotatedRect> Match_boxL, Match_boxR;

	//匹配标记点
	stereo_match(Match_boxL, Match_boxR, boxL, boxR, y, x, is_show_stereo_match);


	//--------------------------------------标记点三维重建
	vector<Point3d> word3d;
	word3d = getWord3d(Match_boxL, Match_boxR);
	cout << word3d << endl;


	//--------------------------------------折射界面重建
	cout << "-------------------" << endl;
	vec4d plane;
	plane = ransac(word3d, max_iter, ransac_threshold);

	return plane;
}



int main()
{
	vec4d plane;
	plane = plane_reconstruction();
	cout << plane.A << " " << plane.B << " " << plane.C << " " << plane.D << endl;
	system("pause");
	return 0;
}

折射三维重建代码

#include 
#include 
#define PI acos(-1)
using namespace std;


//三维向量或者三维点
struct vec3d {
	double x;
	double y;
	double z;
};


//平面方程参数或者直线方程参数
struct vec4d{
	double A;
	double B;
	double C;
	double D;
};


//计算两个三维向量的叉积
//n = u(x1, y1, z1) x v(x2, y2, z2)
//= (y1z2 - y2z1, x2z1 - z2x1, x1y2 - x2y1)
vec3d _Cross(vec3d left, vec3d right) {
	return {
		left.y*right.z - right.y*left.z,
		right.x*left.z - right.z*left.x,
		left.x*right.y - right.x*left.y
	};
}


//计算三维向量的单位向量
vec3d _Normalize(vec3d T) {
	double a = sqrt(pow(T.x, 2) + pow(T.y, 2) + pow(T.z, 2));
	return { T.x / a,T.y / a,T.z / a };
}


//计算直线与平面的交点
//其中plane表示平面方程,应为Ax+By+Cz+D=0的格式,其中(A,B,C)为其法向量
//point表示线段的终点
//normal表示线段的单位向量
vec3d _Intersection(vec3d point, vec3d normal, vec4d plane) {
	double x = -((plane.D * normal.x - plane.B * normal.y * point.x - plane.C * normal.z * point.x + plane.B * normal.x * point.y + plane.C * normal.x * point.z) / (plane.A * normal.x + plane.B * normal.y + plane.C * normal.z));
	double y = -((plane.D * normal.y + plane.A * normal.y * point.x - plane.A * normal.x * point.y - plane.C * normal.z * point.y + plane.C * normal.y * point.z) / (plane.A * normal.x + plane.B * normal.y + plane.C * normal.z));
	double z = -((plane.D * normal.z + plane.A * normal.z * point.x + plane.B * normal.z * point.y - plane.A * normal.x * point.z - plane.B * normal.y * point.z) / (plane.A * normal.x + plane.B * normal.y + plane.C * normal.z));
	if (x == -0) {
		x = 0;
	}
	if (y == -0) {
		y = 0;
	}
	if (z == -0) {
		z = 0;
	}
	return { x,y,z };
}


//计算两个三维向量的夹角
double _Vector_angle(vec3d vec1, vec3d vec2){
	vec3d vec11 = _Normalize(vec1);
	vec3d vec22 = _Normalize(vec2);

	return acos(
		(vec11.x*vec22.x + vec11.y*vec22.y + vec11.z*vec22.z) 
		/
		(sqrt(pow(vec11.x, 2) + pow(vec11.y, 2) + pow(vec11.z, 2))*sqrt(pow(vec22.x, 2) + pow(vec22.y, 2) + pow(vec22.z, 2)))
	);
}


//Compute the unit direction of the refracted ray.     计算折射光线的单位向量  
// Incident vector   入射光线单位向量
// Normal of the interface   折射界面的法向量
// Refractive index of incident medium   入射角的折射率
// Refractive index of emergent medium   折射角的折射率
// Incident angle   入射角
// Emergent angle   折射角
// Unit direction vector
vec3d _Get_refracted_vector(vec3d vec1, vec3d N, double n1, double n2, double theta1, double theta2){
	vec3d _Prev = { (n1 / n2) * vec1.x, 
		            (n1 / n2) * vec1.y, 
		            (n1 / n2) * vec1.z };

	vec3d _Tail = { (n1 / n2 * cos(theta1) - cos(theta2)) * N.x, 
		            (n1 / n2 * cos(theta1) - cos(theta2)) * N.y, 
		            (n1 / n2 * cos(theta1) - cos(theta2)) * N.z };

	return { _Prev.x - _Tail.x, _Prev.y - _Tail.y, _Prev.z - _Tail.z };
}


//Compute the intersection of two lines.          计算两条线的交点
/// End point of line 1;   线段1的端点 
/// Direction of line 1;     线段1的方向向量
/// End point of line 2;   线段2的端点
/// Direction of line 2.     线段2的方向向量
/// Coordinates of the intersection      两条线段的交点坐标
vec3d _Intersection(vec3d point1, vec3d vec1, vec3d point2, vec3d vec2) {
	vec3d vec3 = _Cross(vec1, vec2);
	double x1 = -((point1.z * vec1.x * vec2.y * vec3.x - point2.z * vec1.x * vec2.y * vec3.x -
		point1.x * vec1.z * vec2.y * vec3.x - point1.y * vec1.x * vec2.z * vec3.x +
		point2.y * vec1.x * vec2.z * vec3.x + point1.x * vec1.y * vec2.z * vec3.x -
		point1.z * vec1.x * vec2.x * vec3.y + point2.z * vec1.x * vec2.x * vec3.y +
		point1.x * vec1.z * vec2.x * vec3.y - point2.x * vec1.x * vec2.z * vec3.y +
		point1.y * vec1.x * vec2.x * vec3.z - point2.y * vec1.x * vec2.x * vec3.z -
		point1.x * vec1.y * vec2.x * vec3.z + point2.x * vec1.x * vec2.y * vec3.z) /
		(vec1.z * vec2.y * vec3.x - vec1.y * vec2.z * vec3.x - vec1.z * vec2.x * vec3.y +
			vec1.x * vec2.z * vec3.y + vec1.y * vec2.x * vec3.z - vec1.x * vec2.y * vec3.z));
	double y1 = -((-point1.z * vec1.y * vec2.y * vec3.x + point2.z * vec1.y * vec2.y * vec3.x +
		point1.y * vec1.z * vec2.y * vec3.x - point2.y * vec1.y * vec2.z * vec3.x +
		point1.z * vec1.y * vec2.x * vec3.y - point2.z * vec1.y * vec2.x * vec3.y -
		point1.y * vec1.z * vec2.x * vec3.y + point1.y * vec1.x * vec2.z * vec3.y -
		point1.x * vec1.y * vec2.z * vec3.y + point2.x * vec1.y * vec2.z * vec3.y +
		point2.y * vec1.y * vec2.x * vec3.z - point1.y * vec1.x * vec2.y * vec3.z +
		point1.x * vec1.y * vec2.y * vec3.z - point2.x * vec1.y * vec2.y * vec3.z) /
		(-vec1.z * vec2.y * vec3.x + vec1.y * vec2.z * vec3.x + vec1.z * vec2.x * vec3.y -
			vec1.x * vec2.z * vec3.y - vec1.y * vec2.x * vec3.z + vec1.x * vec2.y * vec3.z));
	double z1 = -((point2.z * vec1.z * vec2.y * vec3.x - point1.z * vec1.y * vec2.z * vec3.x +
		point1.y * vec1.z * vec2.z * vec3.x - point2.y * vec1.z * vec2.z * vec3.x -
		point2.z * vec1.z * vec2.x * vec3.y + point1.z * vec1.x * vec2.z * vec3.y -
		point1.x * vec1.z * vec2.z * vec3.y + point2.x * vec1.z * vec2.z * vec3.y +
		point1.z * vec1.y * vec2.x * vec3.z - point1.y * vec1.z * vec2.x * vec3.z +
		point2.y * vec1.z * vec2.x * vec3.z - point1.z * vec1.x * vec2.y * vec3.z +
		point1.x * vec1.z * vec2.y * vec3.z - point2.x * vec1.z * vec2.y * vec3.z) /
		(-vec1.z * vec2.y * vec3.x + vec1.y * vec2.z * vec3.x + vec1.z * vec2.x * vec3.y -
			vec1.x * vec2.z * vec3.y - vec1.y * vec2.x * vec3.z + vec1.x * vec2.y * vec3.z));

	double x2 = -((-point1.z * vec1.y * vec2.x * vec3.x + point2.z * vec1.y * vec2.x * vec3.x +
		point1.y * vec1.z * vec2.x * vec3.x - point2.y * vec1.z * vec2.x * vec3.x +
		point2.x * vec1.z * vec2.y * vec3.x - point2.x * vec1.y * vec2.z * vec3.x +
		point1.z * vec1.x * vec2.x * vec3.y - point2.z * vec1.x * vec2.x * vec3.y -
		point1.x * vec1.z * vec2.x * vec3.y + point2.x * vec1.x * vec2.z * vec3.y -
		point1.y * vec1.x * vec2.x * vec3.z + point2.y * vec1.x * vec2.x * vec3.z +
		point1.x * vec1.y * vec2.x * vec3.z - point2.x * vec1.x * vec2.y * vec3.z) /
		(-vec1.z * vec2.y * vec3.x + vec1.y * vec2.z * vec3.x + vec1.z * vec2.x * vec3.y -
			vec1.x * vec2.z * vec3.y - vec1.y * vec2.x * vec3.z + vec1.x * vec2.y * vec3.z));
	double y2 = -((-point1.z * vec1.y * vec2.y * vec3.x + point2.z * vec1.y * vec2.y * vec3.x +
		point1.y * vec1.z * vec2.y * vec3.x - point2.y * vec1.y * vec2.z * vec3.x -
		point2.y * vec1.z * vec2.x * vec3.y + point1.z * vec1.x * vec2.y * vec3.y -
		point2.z * vec1.x * vec2.y * vec3.y - point1.x * vec1.z * vec2.y * vec3.y +
		point2.x * vec1.z * vec2.y * vec3.y + point2.y * vec1.x * vec2.z * vec3.y +
		point2.y * vec1.y * vec2.x * vec3.z - point1.y * vec1.x * vec2.y * vec3.z +
		point1.x * vec1.y * vec2.y * vec3.z - point2.x * vec1.y * vec2.y * vec3.z) /
		(-vec1.z * vec2.y * vec3.x + vec1.y * vec2.z * vec3.x +
			vec1.z * vec2.x * vec3.y - vec1.x * vec2.z * vec3.y -
			vec1.y * vec2.x * vec3.z + vec1.x * vec2.y * vec3.z));
	double z2 = -((point2.z * vec1.z * vec2.y * vec3.x - point1.z * vec1.y * vec2.z * vec3.x +
		point1.y * vec1.z * vec2.z * vec3.x - point2.y * vec1.z * vec2.z * vec3.x -
		point2.z * vec1.z * vec2.x * vec3.y + point1.z * vec1.x * vec2.z * vec3.y -
		point1.x * vec1.z * vec2.z * vec3.y + point2.x * vec1.z * vec2.z * vec3.y +
		point2.z * vec1.y * vec2.x * vec3.z - point2.z * vec1.x * vec2.y * vec3.z -
		point1.y * vec1.x * vec2.z * vec3.z + point2.y * vec1.x * vec2.z * vec3.z +
		point1.x * vec1.y * vec2.z * vec3.z - point2.x * vec1.y * vec2.z * vec3.z) /
		(-vec1.z * vec2.y * vec3.x + vec1.y * vec2.z * vec3.x +
			vec1.z * vec2.x * vec3.y - vec1.x * vec2.z * vec3.y -
			vec1.y * vec2.x * vec3.z + vec1.x * vec2.y * vec3.z));


	double x = (x1 + x2) / 2;
	double y = (y1 + y2) / 2;
	double z = (z1 + z2) / 2;
	if (x == -0) {
		x = 0;
	}
	if (y == -0) {
		y = 0;
	}
	if (z == -0) {
		z = 0;
	}


	return { x,	y, z };
}


/********************************************************************************************
 * \brief Driver for refractive 3D reconstruction.          折射三维重建
 * \param 'raw_point' distorted object point P.             'raw_point'代表扭曲的点P。
 * \param 'translation' translation vector of the right camera relative to the frame O-XYZ.  translation代表右相机相对于参考坐标系O-XYZ的平移矢量。
 * \param 'interf1' refracting interface Ax + By + Cz +D = 0, with N = (A. B. C).   
 * \param n1 n2 n3 thickness  折射率以及玻璃厚度
 * \returns The true 3D coordinates of the object point.    返回真实的三维坐标
 ********************************************************************************************/
vec3d refractive_3d_reconstruction(vec3d raw_point, vec3d translation, vec4d interf1, double n1, double n2, double n3, double thickness) {

	double delta = thickness;
	vec3d t = translation;
	double near_d = interf1.D;

	// Build the interface 2
	double far_d = near_d - delta * sqrt(pow(interf1.A, 2) + pow(interf1.B, 2) + pow(interf1.C, 2));

	vec4d interf2 = { interf1.A, interf1.B, interf1.C, far_d };

	// Compute the normalized normal of the interfaces 1 and 2
	vec3d N1 = { interf1.A, interf1.B, interf1.C };
	vec3d N2 = { interf2.A, interf2.B, interf2.C };
	N1 = _Normalize(N1);
	N2 = _Normalize(N2);

	// Direction vectors of the ray L1 and L1'
	vec3d nlineL1 = _Normalize(raw_point);
	vec3d nlineR1 = _Normalize({ raw_point.x + t.x,raw_point.y + t.y,raw_point.z + t.z });

	// Incident points P1 and P1' of L1 and L1' at the interface 1
	vec3d pointL1 = _Intersection({ 0, 0, 0 }, nlineL1, interf1);
	vec3d pointR1 = _Intersection({ -t.x, -t.y, -t.z }, nlineR1, interf1);

	// Incident angles of rays L1 and L1'
	double angleL1 = _Vector_angle(nlineL1, N1);
	double angleR1 = _Vector_angle(nlineR1, N1);

	// Refracted angles of rays L1 and L1' 
	double angleL2 = asin(n1 * sin(angleL1) / n2);
	double angleR2 = asin(n1 * sin(angleR1) / n2);

	// Direction vectors of the ray L2 and L2'
	vec3d nlineL2 = _Get_refracted_vector(nlineL1, N1, n1, n2, angleL1, angleL2);
	vec3d nlineR2 = _Get_refracted_vector(nlineR1, N1, n1, n2, angleR1, angleR2);

	// Incident points P2 and P2' of L2 and L2' at the interface 2
	vec3d pointL2 = _Intersection(pointL1, nlineL2, interf2);
	vec3d pointR2 = _Intersection(pointR1, nlineR2, interf2);

	// Incident angles of rays L2 and L2'
	double angleL3 = angleL2;
	double angleR3 = angleR2;

	// Refracted angles of rays L2 and L2'
	double angleL4 = asin(n2 * sin(angleL3) / n3);
	double angleR4 = asin(n2 * sin(angleR3) / n3);

	// Direction vectors of the ray L3 and L3'
	vec3d nlineL3 = _Get_refracted_vector(nlineL2, N2, n2, n3, angleL3, angleL4);
	vec3d nlineR3 = _Get_refracted_vector(nlineR2, N2, n2, n3, angleR3, angleR4);

	//auto nCommonVerticalLine3 = detail::_Cross(nlineL3, nlineR3);

	// Compute the true object point Q
	return _Intersection(pointL2, nlineL3, pointR2, nlineR3);
}






int main() {
	//折射重建测试
	vec3d raw_point;
	raw_point.x = 9;
	raw_point.y = 0;
	raw_point.z = 3 * 1.732;

	vec3d translation;
	translation.x = -18;
	translation.y = 0;
	translation.z = 0;

	vec4d interf1;
	interf1.A = 0;
	interf1.B = 0;
	interf1.C = 1;
	interf1.D = -2;

	double n1 = 1;
	double n2 = 1.732;
	double n3 = 1;

	double thickness = 1;

	vec3d res;
	res = refractive_3d_reconstruction(raw_point, translation, interf1, n1, n2, n3, thickness);
	cout << res.x << endl;
	cout << res.y << endl;
	cout << res.z << endl;

	system("pause");
	return 0;
}

参考:

  1. https://blog.csdn.net/qq_35789421/article/details/123627753
  2. https://blog.csdn.net/qq_45043397/article/details/120141976?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522166865308616782395327114%2522%252C%2522scm%2522%253A%252220140713.130102334.pc%255Fall.%2522%257D&request_id=166865308616782395327114&biz_id=0&spm=1018.2226.3001.4187
  3. https://blog.csdn.net/weixin_45341885/article/details/120127451?ops_request_misc=&request_id=&biz_id=102&spm=1018.2226.3001.4187
  4. https://github.com/YanShuang17/binocular_stereo_vision

你可能感兴趣的:(计算机视觉,c++)