OPENCV C++(六)canny边缘检测+仿射变换+透射变换

图像的缩放

	resize(image, image, Size(round(image.cols * 0.5), round(image.rows * 0.5)));

 输入图像 输出图像 大小变换

canny边缘算子的使用

	cvtColor(image, gray, COLOR_BGR2GRAY);
	Canny(gray, canny_mat, 40, 100);

 必须先转化为灰度图,作为输入 超过100是真的边缘 低于40是确定不是边缘 在中间若连接边缘 则为边缘 

普通旋转缩放变换(仿射变换)

 获取仿射变换矩阵

	float angel = -10.0, scale = 1;
	Mat dstmat;
	Point2f center(image.cols * 0.5, image.rows * 0.5);
	Mat affine_matrix = getRotationMatrix2D(center, angel, scale);

获取仿射变换的矩阵 中心点 旋转角度 大小是否变换

-10是顺时针转

仿射变换函数

warpAffine(image, dstmat, affine_matrix,image.size());

输入图 输出图 仿射变换矩阵 画布的大小 

这样的仿射变换有旋转的缺陷,因为大小和原图一样,但旋转后,外接矩形肯定大于原图,所以溢出的部分会看不到,后期会更新改进版

点到点的仿射变换(6变量 所以要3个点对3个点)

	Mat affine_Mat;
	const cv::Point2f src_pt[] = {
		  cv::Point2f(100,100),
		  cv::Point2f(20,30),
		  cv::Point2f(70,90),
	};
	const cv::Point2f warp_pt[] = {
		  cv::Point2f(50,100),
		  cv::Point2f(50,20),
		  cv::Point2f(70,96),
	};

	Mat affine_matrix2 = cv::getAffineTransform(src_pt, warp_pt);

	warpAffine(image, affine_Mat, affine_matrix2,image.size());

一个点对应一个点 

计算机会帮我们求出仿射变换的矩阵

点到点的透射变换(8变量 所以要4个点对4个点)

	Mat perspective_Mat;
	cv::Point2f pts1[] = {
		cv::Point2f(150,150),
		cv::Point2f(150,300),
		cv::Point2f(350,300),
		cv::Point2f(350,150),

	};
	cv::Point2f pts2[] = {
	cv::Point2f(200,150),
	cv::Point2f(200,300),
	cv::Point2f(340,270),
	cv::Point2f(340,180),
	};

	Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2);
	warpPerspective(image, perspective_Mat, perspective_matrix, image.size());

 总体代码:

#include 
#include
using namespace std;
using namespace cv;


int main() {
	Mat image = imread("lena.jpeg");
	imshow("lena", image);
	waitKey(0);
	cvDestroyAllWindows();

	resize(image, image, Size(round(image.cols * 0.5), round(image.rows * 0.5)));
	imshow("lena", image);
	waitKey(0);
	cvDestroyAllWindows();

	Mat gray;
	Mat canny_mat;
	cvtColor(image, gray, COLOR_BGR2GRAY);
	Canny(gray, canny_mat, 40, 100);
	imshow("canny_mat", canny_mat);
	waitKey(0);
	cvDestroyAllWindows();	

	float angel = -10.0, scale = 1;
	Mat dstmat;
	Point2f center(image.cols * 0.5, image.rows * 0.5);
	Mat affine_matrix = getRotationMatrix2D(center, angel, scale);
	warpAffine(image, dstmat, affine_matrix,image.size());
	imshow("dstmat", dstmat);
	waitKey(0);
	cvDestroyAllWindows();


	Mat affine_Mat;
	const cv::Point2f src_pt[] = {
		  cv::Point2f(100,100),
		  cv::Point2f(20,30),
		  cv::Point2f(70,90),
	};
	const cv::Point2f warp_pt[] = {
		  cv::Point2f(50,100),
		  cv::Point2f(50,20),
		  cv::Point2f(70,96),
	};

	Mat affine_matrix2 = cv::getAffineTransform(src_pt, warp_pt);

	warpAffine(image, affine_Mat, affine_matrix2,image.size());
	imshow("affine_Mat", affine_Mat);
	waitKey(0);
	cvDestroyAllWindows();



	Mat perspective_Mat;
	cv::Point2f pts1[] = {
		cv::Point2f(150,150),
		cv::Point2f(150,300),
		cv::Point2f(350,300),
		cv::Point2f(350,150),

	};
	cv::Point2f pts2[] = {
	cv::Point2f(200,150),
	cv::Point2f(200,300),
	cv::Point2f(340,270),
	cv::Point2f(340,180),
	};

	Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2);
	warpPerspective(image, perspective_Mat, perspective_matrix, image.size());
	imshow("perspective_Mat", perspective_Mat);
	waitKey(0);
	cvDestroyAllWindows();

	//疑问 图像的平移如何实现  image.size()是什么个东西 如何改变图像大小?


	
	return 0;
}

你可能感兴趣的:(opencv,c++,人工智能)