本题来自于哈工大自动化专业大四课程数字图像处理的实验1,需要自己编写程序实现OpenCV中求解单应性矩阵的函数findHomography以及实现单应性变换的函数warpPerspective。本文包含整个工程的全部源码,仅供学习交流使用。
2022.04.22补充:
为了防止通篇照搬,且出于对老师的尊重,数学推导全部省略,代码故意设置了一些bug,直接跑是跑不通的,请各位耗子尾汁。
为了防止通篇照搬,且出于对老师的尊重,数学推导全部省略,代码故意设置了一些bug,直接跑是跑不通的,请各位耗子尾汁。
为了防止通篇照搬,且出于对老师的尊重,数学推导全部省略,代码故意设置了一些bug,直接跑是跑不通的,请各位耗子尾汁。
欢迎在本文档的基础上学习,如果能够调通程序则说明你已经掌握了这部分的原理,也就不需要我给出数学推导过程了。
输入图片为src.jpg(待校正图像,待校正目标的四对点需要使用Photoshop等软件预提取)和dst.jpg(利用扫描全能王等得到的校正后的图像,用于提供校正后的图像尺寸),如下所示:
输出结果见以下帖子(直接调用OpenCV现成函数findHomography和warpPerspective):
OpenCV-C++实现单应性矩阵的求解_Quentin的博客-CSDN博客
#ifndef _MYFINDHOMOGRAPHY_HPP_
#define _MYFINDHOMOGRAPHY_HPP_
using namespace std;
using namespace cv;
using namespace Eigen;
enum H_param {MethodInverse, MethodSVD};
//根据4对点求解单应性矩阵
Mat myfindHomography(vector srcPoints, vector dstPoints, H_param flag)
{
vector src=srcPoints;
vector dst=dstPoints;
Point2f src1 = src[0], src2 = src[1], src3 = src[2], src4 = src[3];
Point2f dst1 = dst[0], dst2 = dst[1], dst3 = dst[2], dst4 = dst[3];
//输入点(x, y);
double x1 = (double)src1.x, x2 = (double)src2.x, x3 = (double)src3.x, x4 = (double)src4.x;
double y1 = (double)src1.y, y2 = (double)src2.y, y3 = (double)src3.y, y4 = (double)src4.y;
//输出点(u, v)
double u1 = (double)dst1.x, u2 = (double)dst2.x, u3 = (double)dst3.x, u4 = (double)dst4.x;
double v1 = (double)dst1.y, v2 = (double)dst2.y, v3 = (double)dst3.y, v4 = (double)dst4.y;
//--方法1:利用矩阵求逆的方法求解--
if(flag == MethodInverse)
{
cout << "Inverse Matrix Method" << endl;
Mat M = (Mat_(8, 8) << -x1, y1, -1, 0, 0, 0, u1*x1, u1*y1,
0, 0, 0, -x1, -y1, 1, v1*x1, v1*y1,
-x2, y2, -1, 0, 0, 0, u2*x2, u2*y2,
0, 0, 0, -x2, -y2, 1, v2*x2, v2*y2,
-x3, y3, -1, 0, 0, 0, u3*x3, u3*y3,
0, 0, 0, -x3, -y3, 1, v3*x3, v3*y3,
-x4, y4, -1, 0, 0, 0, u4*x4, u4*y4,
0, 0, 0, -x4, -y4, 1, v4*x4, v4*y4);
Mat B = (Mat_(8, 1)<< -u1, v1, -u2, -v2, -u3, -v3, -u4, -v4);
Mat M_inv = M.inv();
Mat H0 = M_inv*B;
double H11 = H0.at(0,0);
double H12 = H0.at(1,0);
double H13 = H0.at(2,0);
double H21 = H0.at(3,0);
double H22 = H0.at(4,0);
double H23 = H0.at(5,0);
double H31 = H0.at(6,0);
double H32 = H0.at(7,0);
double H33 = 1.0;
Mat H = (Mat_(3, 3)<< H11, H12, H13, H21, H22, H23, H31, H32, H33);
return H;
}
else if(flag == MethodSVD)
{
//--方法2:利用SVD奇异值分解的方法求解--
cout << "SVD Method" << endl;
MatrixXd A(8, 9);
A << -x1, y1, -1, 0, 0, 0, u1*x1, u1*y1, u1,
0, 0, 0, -x1, -y1, 1, v1*x1, v1*y1, v1,
-x2, y2, -1, 0, 0, 0, u2*x2, u2*y2, u2,
0, 0, 0, -x2, -y2, 1, v2*x2, v2*y2, v2,
-x3, y3, -1, 0, 0, 0, u3*x3, u3*y3, u3,
0, 0, 0, -x3, -y3, 1, v3*x3, v3*y3, v3,
-x4, y4, -1, 0, 0, 0, u4*x4, u4*y4, u4,
0, 0, 0, -x4, -y4, 1, v4*x4, v4*y4, v4;
//求解Full SVD
JacobiSVD svd(A, ComputeFullV);
MatrixXd V;
V = svd.matrixV();
//所需要求解的H11-H33即为V矩阵的最后一列(证明见报告)
double H11 = V(0,8) / V(8,8);
double H12 = V(1,8) / V(8,8);
double H13 = V(2,8) / V(8,8);
double H21 = V(3,8) / V(8,8);
double H22 = V(4,8) / V(8,8);
double H23 = V(5,8) / V(8,8);
double H31 = V(6,8) / V(8,8);
double H32 = V(7,8) / V(8,8);
double H33 = 1.0;
Mat H = (Mat_(3,3)<< H11, H12, H13, H21, H22, H23, H31, H32, H33);
return H;
}
else
{
return Mat();
}
}
#endif
#ifndef _MYWARPPERSPECTIVE_HPP_
#define _MYWARPPERSPECTIVE_HPP_
#include "myremap.hpp"
//using namespace std;
using namespace cv;
//利用求解出的单应性矩阵进行单应性变换
void mywarpPerspective(Mat src, Mat &dst, Mat H, Size size)
{
//创建原图的四个顶点的3*4矩阵(此处我的顺序为左上,右上,左下,右下)
Mat tmp = (Mat_(3, 4) << 0, src.cols, 0, src.cols,
0, 0, src.rows, src.rows,
1, 1, 1, 1);
//获得原图四个顶点变换后的坐标,计算变换后的图像尺寸
Mat corner = H * tmp;
//创建向前映射矩阵 map_x, map_y
dst.create(size, src.type());
Mat map_x(dst.size(), CV_32FC1);
Mat map_y(dst.size(), CV_32FC1);
Mat point_src(3, 1, CV_32FC1, 1);
Mat point_dst(3, 1, CV_32FC1, 1);
//H的类型为CV_64FC1,point_dst的类型CV_32FC1
//本句是为了令H与point_dst同类型(同类型才可以相乘,否则报错)
H.convertTo(H, point_dst.type());
Mat H_inv = H.inv();
//根据输出点及单应性逆矩阵来寻找输出点在输入图像中对应的坐标,以确保每个输出点都有值
//输出图像的行数与列数由dst.jpg决定
//此处输出图像的第一个点一定是(0, 0),即输出像素点中的左上点
for (int i = 0; i < dst.rows; i++)
{
for (int j = 0; j < dst.cols; j++)
{
point_dst.at(0) = j;
point_dst.at(1) = i;
point_src = H_inv * point_dst;
//齐次坐标转换为像素坐标
map_x.at(i, j) = point_src.at(0) / point_src.at(2);
map_y.at(i, j) = point_src.at(1) / point_src.at(2);
}
}
myremap(src,dst,map_x,map_y);
}
#endif
#ifndef _MYREMAP_HPP_
#define _MYREMAP_HPP_
using namespace cv;
//通过输出像素点与输入像素点的对应关系矩阵,来应用几何变换
//map_x为二维矩阵,代表该位置上的输出像素点对应的输入像素点的x坐标
//map_y为二维矩阵,代表该位置上的输出像素点对应的输入像素点的y坐标
void myremap(Mat src, Mat &dst, Mat map_x, Mat map_y)
{
//在浮点数表示的颜色空间中,数值范围是0-1.0
//imshow的时候会把图像x255后再显示
//因此需要在转换的时候设置scale factor=1/255以归一化
src.convertTo(src, CV_32F,1/255.0);
dst.convertTo(dst, CV_32F,1/255.0);
//输入图像是彩色图像,需要分离通道再单独处理
vector src_planes;
vector dst_planes;
split(src,src_planes);
split(dst,dst_planes);
for (int i = 0; i < dst.rows; i++)
{
for (int j = 0; j < dst.cols; j++)
{
//应用几何变换,插值方式选择最近邻法
//利用成员函数at取值时,会自动将坐标转换成int类型,因此相当于最近邻法插值
dst_planes[0].at(i, j)=src_planes[0].at(map_y.at(i, j),map_x.at(i, j));
dst_planes[1].at(i, j)=src_planes[1].at(map_y.at(i, j),map_x.at(i, j));
dst_planes[2].at(i, j)=src_planes[2].at(map_y.at(i, j),map_x.at(i, j));
}
}
//合并通道
Mat plane[] = {dst_planes[0], dst_planes[1], dst_planes[2]};
merge(plane, 3, dst);
}
#endif
#ifndef _GETMSE_HPP_
#define _GETMSE_HPP_
using namespace std;
using namespace cv;
//得到均方误差MSE
double getMSE(Mat & srcImage,Mat & dstImage)
{
Mat src = dstImage;
Mat dst = srcImage;
int rowsNumber = src.rows;
int colsNumber = src.cols;
double mse=0.0;
for (int i = 0; i < rowsNumber; i++)
{
for (int j = 0; j < colsNumber; j++)
{
mse += (src.ptr(i)[j] - dst.ptr(i)[j])*(src.ptr(i)[j] - dst.ptr(i)[j]);
}
}
mse = mse / (rowsNumber*colsNumber);
return mse;
}
#endif
#ifndef _MYRESIZE_HPP_
#define _MYRESIZE_HPP_
using namespace cv;
//调整图片大小
void myResize(Mat &srcImage, double ratio)
{
float scaleW = ratio;
float scaleH = ratio;
int width = int(srcImage.cols * scaleW);
int height = int(srcImage.rows * scaleH);
resize(srcImage, srcImage, Size(width, height));
}
#endif
#ifndef _TEST_HPP_
#define _TEST_HPP_
#include "myfindHomography.hpp"
#include "mywarpPerspective.hpp"
using namespace std;
using namespace cv;
//直接调用OpenCV库函数,用于效果对比
Mat Test_findHomography_warpPerspective(vector srcPoints, vector dstPoints, Mat im_src, Mat im_dst)
{
Mat H, im_out;
//求解单应性矩阵
H = findHomography(srcPoints,dstPoints);
cout<< "Homography matrix calculated by findHomography:"< srcPoints, vector dstPoints, Mat im_src, Mat im_dst)
{
Mat H, im_out;
//求解单应性矩阵
//myfindHomography第三个参数
//MethodInverse->矩阵求逆法
//MethodSVD->SVD奇异值分解法
H = myfindHomography(srcPoints,dstPoints,MethodSVD);
cout<< "Homography matrix calculated by myfindHomography:"<
#include
#include
#include
#include
#include "myResize.hpp"
#include "test.hpp"
#include "myfindHomography.hpp"
#include "mywarpPerspective.hpp"
#include "myremap.hpp"
#include "getMSE.hpp"
int main()
{
//--读取图片--
Mat im_src,im_dst,im_out;
//待变换的图片
im_src=imread("../../image/src.jpg");
//利用图像校正软件预校正后的图片,用于确认输出图像的Size
im_dst=imread("../../image/dst.jpg");
//--根据待变换的图片和预校正后的图片的四对点(左上,右上,左下,右下)来计算单应性矩阵--
//待变换的图片(利用Photoshop预先获取四个点的坐标)
vector pts_src{Point2f(889,1032),Point2f(2471,1079),Point2f(276,2868),Point2f(2829,2979)};
//预校正后的图片(图片大小2070×2500)
vector pts_dst{Point2f(0,0),Point2f(2069,0),Point2f(0,2499),Point2f(2069,2499)};
//利用自己编写的函数求解单应性矩阵及应用单应性变换
Mat myH = Test_myfindHomography_mywarpPerspective(pts_src, pts_dst, im_src, im_dst);
//直接调用OpenCV库函数,用于效果对比
Mat H = Test_findHomography_warpPerspective(pts_src, pts_dst, im_src, im_dst);
//计算均方误差
double MSE = getMSE(myH, H);
cout << "MSE: " << MSE << endl;
//将图片缩小至0.12倍,便于查看
myResize(im_src, 0.12);
imshow("待变换的图片",im_src);
waitKey();
destroyAllWindows();
return 0;
}
cmake_minimum_required(VERSION 2.8)
project(Experiment1)
add_subdirectory(src)
set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS "-std=c++11")
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
find_package(Eigen3 REQUIRED)
include_directories(${EIGEN3_INCLUDE_DIRS})
include_directories(${PROJECT_SOURCE_DIR}/include)
add_executable(Experiment1 main.cpp)
target_link_libraries(Experiment1 ${OpenCV_LIBS})