C++opencv图像处理系统学习的接口API

C++opencv图像处理系统学习的接口API_第1张图片

MyApi.h

#pragma once
#include
#include

using namespace std;
using namespace cv;

class MyApi 
{
public:
	void improve_image_contrast(Mat& image);//提高图像的对比度
	void filter2DAPI(Mat& image);//调用提高对比度的API也同样可以
	void travel_image_pixel_and_reverse(Mat& image);//遍历图像所有像素并像素反转
	void change_contrasr_brightness(Mat& image);//改变图片的亮度和对比度
	void draw_line_onImgage(Mat& image);//在图片上画线
	void draw_rectangle_onImage(Mat& image);
	void image_blur(Mat& image);//图像模糊
	void image_filter(Mat& image);//图像滤波
	void extract_hLine_vLine(Mat& image);//利用形态学操作提取水平和垂直线
	void image_pyramid_(Mat& image);//图像金字塔实现上采样和降采样
	void linear_filter(Mat& image);//不同算子的图像滤波
	void image_edge_processing(Mat& image);
	void image_edge_extract_Sobel(Mat& image);//Sobel边缘提取
	void image_edge_extraxt_Laplance(Mat& image);//Laplace算子边缘提取
	void image_canny_edge_detect(Mat& image);
};

 MyApi.cpp

#include"MyAPI.h"
#include
#include

using namespace std;
using namespace cv;


void MyApi::improve_image_contrast(Mat& image)
{
	Mat dst;
	//由于读取的时RGB图所有实际的cols和row要乘以通道数、
	int cols = (image.cols - 1) * image.channels();
	int offsetx = image.channels();
	int rows = image.rows;
	dst = Mat::zeros(image.size(), image.type());
	//看图3*3的掩模,将每个绿色的相加就是中间红色部分的值
	for (int row = 1; row < (rows - 1); row++)
	{
		const uchar* previous = image.ptr(row - 1);
		const uchar* current = image.ptr(row);
		const uchar* next = image.ptr(row + 1);
		uchar* output = dst.ptr(row);
		for (int col = offsetx; col < cols; col++)
		{
			output[col] = saturate_cast(5 * current[col] - (current[col - offsetx] + current[col + offsetx] + previous[col] + next[col]));
		}
	}
	namedWindow("contrast_improved_image", WINDOW_AUTOSIZE);
	imshow("contrast_improved_image", dst);
}

void MyApi::filter2DAPI(Mat& image)
{
	Mat dst;
	Mat kernel = (Mat_(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
	filter2D(image, dst, image.depth(), kernel);
	namedWindow("contrast_improved_image", WINDOW_AUTOSIZE);
	imshow("contrast_improved_image", dst);
}

void MyApi::travel_image_pixel_and_reverse(Mat& image)
{
	Mat dst;
	dst.create(image.size(), image.type());
	int height = image.rows;
	int width = image.cols;
	int nc = image.channels();
	for (int row = 0; row < height; row++)
	{
		for (int col = 0; col < width; col++)
		{
			if (nc == 1)//灰度图
			{
				int gray = dst.at(row, col);//遍历的是整个图片的像素值
				dst.at(row, col) = 255 - gray;//将像素反转
			}else if (nc == 3)
			{
				int b = image.at(row, col)[0];
				int g = image.at(row, col)[1];
				int r = image.at(row, col)[2];
				//像素反差
				dst.at(row, col)[0] = 255 - b;
				dst.at(row, col)[1] = 255 - g;
				dst.at(row, col) [2]= 255 - r;

			}
		}
	}
	
	imshow("reverse_output", dst);
}

void MyApi::change_contrasr_brightness(Mat& image)
{
	Mat dst;
	int height = image.rows;
	int width = image.cols;
	dst = Mat::zeros(image.size(), image.type());
	float alpha = 1.2;//增加对比度
	float beta = 100;//增加亮度
	for (int row = 0; row < height; row++)
	{
		for (int col = 0; col < width; col++)
		{
			if (image.channels() == 3)
			{
				float b = image.at(row, col)[0];
				float g = image.at(row, col)[1];
				float r = image.at(row, col)[2];

				dst.at(row, col)[0] = saturate_cast(b + alpha + beta);
				dst.at(row, col)[1] = saturate_cast(g + alpha + beta);
				dst.at(row, col)[2] = saturate_cast(r + alpha + beta);
			}
			else if (image.channels() == 1) 
			{
				float v = image.at(row, col);
				dst.at(row, col) = saturate_cast(v + alpha + beta);
			}
		}
	}
	namedWindow("contrast_brightness_change_image", WINDOW_AUTOSIZE);
	imshow("contrast_brightness_change_image", dst);
}

void MyApi::draw_line_onImgage(Mat& image)
{
	Point p1 = Point(20, 30);
	Point p2;
	p2.x = 300;
	p2.y = 300;
	Scalar color = Scalar(0, 0, 255);
	line(image, p1, p2, color, 1, LINE_8);

	namedWindow("output", WINDOW_AUTOSIZE);
	imshow("output", image);
}

void MyApi::draw_rectangle_onImage(Mat& image)
{
	Rect rect = Rect(250, 220, 280, 250);
	Scalar color = Scalar(255, 0, 0);
	rectangle(image, rect, color, 2, LINE_8);
	namedWindow("output", WINDOW_AUTOSIZE);
	imshow("output", image);

}

void MyApi::image_blur(Mat& image)
{
	Mat dst;
	blur(image, dst, Size(3, 3), Point(-1, -1));
	namedWindow("均值模糊", WINDOW_AUTOSIZE);
	imshow("均值模糊", dst);

	GaussianBlur(image, dst, Size(5, 5), 11, 11);
	namedWindow("高斯模糊", WINDOW_AUTOSIZE);
	imshow("高斯模糊", dst);
}

void MyApi::extract_hLine_vLine(Mat& image)
{
	Mat gray_image;
	cvtColor(image, gray_image, COLOR_BGR2GRAY);
	imshow("gray_image", gray_image);

	//将灰度图像转化为二值图像,只有灰度图像才可以变为二值图像
	Mat binImage;
	//~表示取反操作,我们想把图像的白黑互换,也可以不做。
	adaptiveThreshold(~gray_image, binImage, 255,
		ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 15, -2);
	imshow("binary_image", binImage);

	//开操作
	Mat hline = getStructuringElement(MORPH_RECT, Size(image.cols / 16, 1), Point(-1, -1));//水平结构元素
	Mat vline = getStructuringElement(MORPH_RECT, Size(1, image.rows / 16), Point(-1, -1));//垂直结构元素
	Mat kernel = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));//矩形结构
	//提取水平线
	//Mat h_temp, h_dst;
	//erode(binImage, h_temp, hline);
	//dilate(h_temp, h_dst, hline);
	bitwise_not(dst, dst);//输出的背景黑白互换
	//imshow("Final Result", h_dst);

	//提取垂直线 也可以直接使用APImorphologyEx(binImg,dst,MOP_OPEN,vline)
	Mat v_temp,v_dst;
	erode(binImage, v_temp, vline);
	dilate(v_temp, v_dst, vline);
	//bitwise_not(dst, dst);//输出的背景黑白互换
	imshow("Final Result", v_dst);	
}
 
void MyApi::image_pyramid_(Mat& image)
{
	Mat dst;
	//上采样
	pyrUp(image, dst, Size(image.cols * 2, image.rows * 2));
	imshow("上采样输出", dst);

	//降采样
	Mat s_down;
	pyrDown(image, s_down, Size(image.cols / 2, image.rows / 2));
	imshow("降采样输出", s_down);

	//高斯不同DOG
	Mat gray_image,g1,g2,diff_image;
	cvtColor(image, gray_image, COLOR_BGR2GRAY);
	GaussianBlur(gray_image, g1, Size(3, 3), 0, 0);
	GaussianBlur(g1, g2, Size(3, 3), 0, 0);
	//计算高斯不同
	subtract(g1, g2, diff_image, Mat());
	normalize(diff_image, diff_image, 255, 0, NORM_MINMAX);//把他放缩到0-255空间就可以看到了
	imshow("DOG图像", diff_image);
}


void threshold_demo(Mat &image)
{
	Mat gray_image;//阈值操作需要转为灰度图
	cvtColor(image, gray_image, COLOR_BGR2GRAY);
}

void MyApi::linear_filter(Mat & image)
{
	//Mat x_dst;
	Robert X方向
	//Mat kerner_x = (Mat_(2, 2) << 1, 0, 0, -1);
	//filter2D(image, x_dst, -1, kerner_x, Point(-1, -1), 0.0);

	Robert Y方向
	//Mat y_dst;
	//Mat kerner_y = (Mat_(2, 2) << 0, 1, -1, 0);
	//filter2D(image, y_dst, -1, kerner_y, Point(-1, -1), 0.0);

	Sobel算子X方向
	//Mat sobel_x_dst;
	//Mat sobel_kerner_x = (Mat_(3, 3) <<-1,0,1,-2,0,2,-1,0,1);
	//filter2D(image, sobel_x_dst, -1, sobel_kerner_x, Point(-1, -1), 0.0);

	Sobel算子Y方向
	//Mat sobel_y_dst;
	//Mat sobel_kerner_y = (Mat_(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);
	//filter2D(image, sobel_y_dst, -1, sobel_kerner_y, Point(-1, -1), 0.0);

	//namedWindow("Robert_x", WINDOW_FREERATIO);
	//imshow("Robert_x", x_dst);

	//namedWindow("Robert_y", WINDOW_FREERATIO);
	//imshow("Robert_y", y_dst);

	//namedWindow("Sobel_x", WINDOW_FREERATIO);
	//imshow("Sobel_x", sobel_x_dst);

	//namedWindow("Sobel_y", WINDOW_FREERATIO);
	//imshow("Sobel_y", sobel_y_dst);

	//自定义算子:完成模糊逐渐加深,每过0.5s就加深模糊一次
	Mat dst;
	int c = 0;
	int index = 0;
	int ksize = 0;
	while (true)
	{
		c = waitKey(500);
		if ((char)c == 27)//当按ESC键跳出
		{
			break;
		}
		ksize = 4 + (index % 5) * 2 + 1;
		Mat kernel = Mat::ones(Size(ksize, ksize), CV_32F / (float)(ksize * ksize));
		filter2D(image, dst, -1, kernel, Point(-1, -1));
		index++;
		namedWindow("自定义算子", WINDOW_FREERATIO);
		imshow("自定义算子", dst);
	}

}

void MyApi::image_edge_processing(Mat& image)
{
	//每半秒中图像的边缘就会变化一次,我们也可以通过按键选择变化的类型
	Mat dst;
	int top = (int)(0.05 * image.rows);
	int bottom = (int)(0.05 * image.rows);
	int left = (int)(0.05 * image.cols);
	int right = (int)(0.05 * image.cols);
	RNG rng(12345);
	int borderType = BORDER_DEFAULT;

	int c = 0;
	while (true)
	{
		c = waitKey(500);
		//ESC
		if ((char)c == 27)
		{
			break;
		}
		if ((char)c == 'r')
		{
			borderType = BORDER_REPLICATE;
		}
		else if ((char)c == 'w')
		{
			borderType == BORDER_WRAP;
		}
		else if ((char)c == 'c')
		{
			borderType == BORDER_CONSTANT;
		}
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		copyMakeBorder(image, dst, top, bottom, left, right, borderType, color);
		imshow("OUTPUT", dst);
	}

}

void MyApi::image_edge_extract_Sobel(Mat& image)
{
	Mat gray_image, dst;
	GaussianBlur(image, dst, Size(5, 5), 0, 0);
	cvtColor(dst, gray_image, COLOR_BGR2GRAY);
	Mat xgard, ygrad;
	Sobel(gray_image, xgard, CV_16S, 1, 0, 3);
	Sobel(gray_image, ygrad, CV_16S, 0, 1, 3);
	//convert(ScaleAbs());
	convertScaleAbs(xgard, xgard);
	convertScaleAbs(ygrad, ygrad);
	//x方向梯度
	imshow("xgrad", xgard); 
	//y方向梯度
	imshow("ygrad", ygrad);
	//xy方向梯度
	Mat xygrad;
	addWeighted(xgard, 0.5, ygrad, 0.5,0,xygrad);
	imshow("xygrad", xygrad);
}

void MyApi::image_edge_extraxt_Laplance(Mat& image)
{
	Mat gray_image, edge_image, dst;
	GaussianBlur(image, dst, Size(3, 3), 0, 0);
	cvtColor(dst, gray_image, COLOR_BGR2GRAY);
	Laplacian(gray_image, edge_image, CV_16S, 3);
	convertScaleAbs(edge_image, edge_image);
	imshow("output", edge_image);
	threshold(edge_image, edge_image, 0, 255, THRESH_OTSU | THRESH_BINARY);
	imshow("threshold_output", edge_image);
}

void canny_demo(int,void*);
void MyApi::image_canny_edge_detect(Mat& image)
{
	const char* OUTOUT_TITLE = "Canny Result";
	int t1_value = 50;
	int max_value = 255;
	Mat gray_image;
	cvtColor(image, gray_image, COLOR_BGR2GRAY);
	createTrackbar("Treshold Value:", OUTOUT_TITLE, &t1_value, max_value, canny_demo);
	canny_demo(0, 0);
}

test.cpp

#include
#include
#include"MyAPI.h"
using namespace std;
using namespace cv;




int main()
{
	Mat src, dst;
	src = imread("D:\\testImage\\123456.png");
	if (!src.data)
	{
		cout << "could not load Image...";
		return -1;
	}
	namedWindow("input image", WINDOW_AUTOSIZE);
	imshow("input image", src);

	MyApi ma;
	ma.image_canny_edge_detect(src);

	waitKey(0);
	return 0;
}

void canny_demo(int, void*)
{

}

你可能感兴趣的:(c++opencv图像处理,c++,opencv)