都是自己辛辛苦苦编写的,转载注意备注
quickopencv.h
#pragma once
#include
using namespace cv;
typedef unsigned char BYTE;
class QuickDemo {
public:
//输入一张图,就会完成一系列的Demo操作
void colorSpace_Demo(Mat &image);
void pixel_visit_demo(Mat &image);
void pixel_operators_demo(Mat &image);
void bitwise_demo(Mat &image);
void channels_demo(Mat &image);
void inrange_demo(Mat &image);
void pixel_statistic_demo(Mat &image);
void drawing_demo(Mat &image);
void random_drawing_demo();
void mouse_drawing_demo(Mat &image);
void histogram_demo(Mat &image);
Mat histogram_grayImage(const Mat& image);
void histogram_2d_demo(Mat &image);
void norm_demo(Mat &image);
void resize_demo(Mat &image);
void resize_fixed_demo();
void flip_demo(Mat &image);
void rotate_demo(Mat &image);
void video_demo(Mat &image);
void histograme_eq_demo(Mat &image);
void blur_demo(Mat &image);
void gaussian_blur_demo(Mat &image);
void bifilter_demo(Mat &image);
void face_detection_demo();//不加形参是因为想做一个视频版的
void elementsizechange_open_demo(Mat& image);
void elementsizechange_close_demo(Mat& image);
void elementsizechange_gradient_demo(Mat& image);
void elementsizechange_tophat_demo(Mat& image);
void elementsizechange_blackhat_demo(Mat& image);
void draw_ROI_histogram_demo(Mat& image);
void template_match_demo(Mat *pTo,Mat *pTemplate,Mat *src);
void pic_masking1_way1_demo(Mat& image);//图像的掩模操作way1
void pic_masking_way2_demo(Mat& image);//图像的掩模操作way2
void read_write_piexl_demo(Mat& image);
void image_blending_demo();//图像混合
void adjust_brightness_and_contrast_demo(Mat& image);
void image_blur_demo(Mat& image);//图像模糊
void image_filtering_demo(Mat& image);
void convexhull_detect_demo();
Mat gray_histogram(Mat& image);
Mat grayImage_mask_print(Mat &image);//灰色图像按照灰度值进行掩模
/*
用红色Mask出一个图片中RGB三个通道像素值都符合在某一个范围内图像区域,用蓝色mask出既符合三通道
像素值在一个范围内也符合亮度在一个区间内的局部区域,并输出符合区域所占整个区域的比例
*/
Mat mask_localImage(Mat& image);
/*给图像加边界框*/
void draw_interest_pluse(Mat& image, int re_h, int re_w);
void histogram_local_image(Mat& image);//返回局部区域直方图
void draw_interset(Mat& image_add);//把图像中颜色为红色部分画矩形框圈起来
void image_mask(Mat& image, Mat& heightMat);//根据图像高度画ROI区域把高度大于1000的掩模为红色
void image_polar_mask_demo(Mat& image, Mat& heightMat); //图像极性标定
void lead_box(Mat& image);//利用leadbox算法对引脚的缺陷进行检测。首先定位引脚的检测窗口,再定位每个引脚的头部和尾部
Mat lead_mask(Mat& image,Mat &gray_image);//对引脚的ROI主选窗口区域掩模为红色
Mat sub_lead_mask(Mat& image, Mat& gray_image);//对RGB值和亮度在一定范围内的引脚进行标记
Mat min_box(Mat& rgb_image, Mat& after_mask);对ROI大区域内的每个引脚进行画标记框
Mat unevenLightCompensate(Mat& image, int blockSize);//对于光度不均匀处理
Mat binary_image(Mat &image);
};
quickdemo.cpp
#include
#include
#include
#include
using namespace cv;
using namespace std;
//::表示引用自己创建的头文件方法
void QuickDemo::colorSpace_Demo(Mat &image) {
Mat gray, hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
cvtColor(image, gray, COLOR_BGR2GRAY);
imshow("HSV", hsv);
imshow("灰度", gray);
imwrite("D://hsv.png", hsv);
imwrite("D://gray.png", gray);
}
void QuickDemo::pixel_visit_demo(Mat &image) {
int w = image.cols;
int h = image.rows;
int dims = image.channels();
//遍历图像的像素,灰度图是一个通道,而彩色图是三个通道,因此遍历的方式是不同的
/*
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
//在这里要判断通道数,因为不同通道数对于图像像素遍历不同
if (dims == 1) {
//用下面方式就把原来的像素uchar类型的转化为int类型
int pv = image.at(row, col);
//对像素值进行取反
image.at(row, col) = 255 - pv;
}
if (dims == 3) {
//对于彩色图片一个位置存放三个通道的三个数
Vec3b bgr = image.at(row, col);
//各个通道的的数据取反为如下所示:
image.at(row, col) = 255 - bgr[0];
image.at(row, col) = 255 - bgr[1];
image.at(row, col) = 255 - bgr[2];
}
}
}
*/
//下面我们用指针的方式进行实现,给每一行一个指针
for (int row = 0; row < h; row++) {
//指针是uchar类型
uchar* current_row = image.ptr(row);
for (int col = 0; col < w; col++) {
//在这里要判断通道数,因为不同通道数对于图像像素遍历不同
if (dims == 1) {
//用下面方式就把原来的像素uchar类型的转化为int类型
int pv = *current_row;
//对像素值进行取反
*current_row++ = 255 - pv;
}
if (dims == 3) {
//对于彩色图片一个位置存放三个通道的三个数
Vec3b bgr = image.at(row, col);
//各个通道的的数据取反为如下所示:
*current_row++ = 255 - bgr[0];
*current_row++ = 255 - bgr[1];
*current_row++ = 255 - bgr[2];
}
}
}
//像素的读写显示
imshow("像素读写显示", image);
}
void QuickDemo::pixel_operators_demo(Mat &image){
Mat dst;
dst = image + Scalar(50, 50, 50);
imshow("像素加法操作", dst);
}
void QuickDemo::bitwise_demo(Mat &image) {
Mat m1 = Mat::zeros(Size(256, 256), CV_8UC3);
Mat m2 = Mat::zeros(Size(256, 256), CV_8UC3);
//对于m1图像画矩形,左上角的坐标是(100,100),矩形的大小是(80*80),矩形的颜色scalar是(255,255,0)
//thickness参数大于0表示绘制矩形,小于0表示填充矩形
rectangle(m1, Rect(100, 100, 80, 80), Scalar(255,255,0),-1, LINE_8, 0);
rectangle(m2, Rect(150, 150, 80, 80), Scalar(0, 255, 255), -1, LINE_8, 0);
imshow("m1", m1);
imshow("m2", m2);
}
//主要完成通道分离与通道合并
void QuickDemo::channels_demo(Mat &image) {
//由于有多个通道,所以用集合容器,把多个通道放进容器
std::vector mv;
split(image, mv);
imshow("蓝色", mv[0]);
imshow("绿色", mv[1]);
imshow("红色", mv[2]);
Mat dst;
mv[1] = 0;
mv[2] = 0;
merge(mv, dst);
imshow("蓝色", dst);
}
void QuickDemo::inrange_demo(Mat& image) {
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
Mat mask;
//inRange()函数OpenCV中的inRange()函数可实现二值化功能,更关键的是可以同时针对多通道进行操作,使用起来非常方便!
//主要是将在两个阈值内的像素值设置为白色(255),而不在阈值区间内的像素值设置为黑色(0),该功能类似于之间所讲的双阈值化操作。
//第一个Scalar是绿色hsv三个参数的最小值,第二个是最大值,可以把背景为绿色的去掉,转化为二值化图像
inRange(hsv, Scalar(35,43,46), Scalar(77,255,255), mask);
imshow("mask", mask);
}
void QuickDemo::pixel_statistic_demo(Mat& image) {
double minv, maxv;
Point minLoc, maxLoc;
std::vector mv;
split(image, mv);
for (int i = 0; i < mv.size(); i++) {
minMaxLoc(mv[i], &minv, &maxv, &minLoc, &maxLoc, Mat());
std::cout << "min value:" << minv << "max value:" << maxv << std::endl;
}
//求取均值和方差
Mat mean, stddev;
meanStdDev(image, mean, stddev);
std::cout << "means:" << mean << "stddev:" << stddev << std::endl;
}
void QuickDemo::drawing_demo(Mat& image) {
Rect rect;
rect.x = 200;
rect.y = 200;
rect.width = 250;
rect.height = 300;
//生成一个模板,我们想在bg上面绘制
Mat bg = Mat::zeros(image.size(), image.type());
rectangle(bg, rect, Scalar(0, 0, 255),2, 8, 0);
//在图像(350,400)的位置画一个半径为15的圆,颜色为蓝
circle(bg, Point(350,400),15,Scalar(255,0,0),2,0);
Mat dst;
addWeighted(image, 0.7, bg, 0.3, 0, dst);
imshow("绘制演示", bg);
}
void QuickDemo::random_drawing_demo() {
//随机绘制
Mat canvas = Mat::zeros(Size(512, 512), CV_8UC3);
int w = canvas.cols;
int h = canvas.rows;
RNG rng(12345);//OpenCV随机数产生器,12345是随机数种子,默认产生的是系统时间
while(true){//无限循环一直画
//键盘的响应操作,只有按键盘的ESC键退出才不会一直画,否则会一直画线
int c = waitKey(10);
if (c == 27) {
break;
}
//线段有两个点,设定点的X,Y坐标
int x1 = rng.uniform(0, w);//产生统一分布的a,b之间的数
int y1 = rng.uniform(0, h);
int x2 = rng.uniform(0, w);
int y2 = rng.uniform(0, h);
//颜色也随机产生
int b = rng.uniform(0, 255);
int g = rng.uniform(0, 255);
int r = rng.uniform(0, 255);
line(canvas, Point(x1, y1), Point(x2, y2), Scalar(b, g, r), 1, LINE_4, 0);
imshow("随机错误演示:", canvas);
}
}
Point sp(-1, -1); //鼠标开始的位置
Point ep(-1, -1);//鼠标结束的位置
Mat temp;
static void on_draw(int event, int x, int y, int flag, void* userdata) {
Mat image = *((Mat*)userdata);
if (event == EVENT_LBUTTONDOWN) {//左键点击
sp.x = x;
sp.y = y;
std::cout << "start point" << sp << std::endl;
}
else if (event == EVENT_LBUTTONUP) {//左键放开
ep.x = x;
ep.y = y;
int dx = ep.x - sp.x;
int dy = ep.y - sp.y;
if (dx > 0 && dy > 0) {
Rect box(sp.x, sp.y, dx, dy);//可以用鼠标画矩形,但是我们希望在图像上画,所以需要添加如下代码
rectangle(image, box, Scalar(0, 0, 255), 2, 8, 0);
imshow("鼠标绘制显示:", image);//绘制了要急时更新
//一个矩形绘制好了以后要把鼠标的初始位置还是置为-1,为了下一次绘制做准备
sp.x = -1;
sp.y = -1;
}
}
else if (event == EVENT_MOUSEMOVE) {
if (sp.x > 10 && sp.y > 0) {//表示鼠标的左键要按下去
ep.x = x;
ep.y = y;
int dx = ep.x - sp.x;
int dy = ep.y - sp.y;
if (dx > 0 && dy > 0) {
Rect box(sp.x, sp.y, dx, dy);
temp.copyTo(image);
imshow("ROI区域", image(box));
rectangle(image, box, Scalar(0, 0, 255), 2, 8, 0);
imshow("鼠标绘制", image);
}
}
}
}
void QuickDemo::mouse_drawing_demo(Mat &image) {
//鼠标的基本操作:使用鼠标画图
namedWindow("鼠标绘制", WINDOW_AUTOSIZE);
setMouseCallback("鼠标绘制", on_draw, (void*)(&image));
imshow("鼠标绘制显示:", image);
temp = image.clone();//把图像保存在这边
}
void QuickDemo::norm_demo(Mat &image) {
Mat dst;
image.convertTo(dst,CV_32F);//把类型转化为float类型
std::cout << image.type() << dst.type() << std::endl;
//转化为float类型后就可以使用归一化了
normalize(image, dst, 1.0, 0, NORM_MINMAX);
imshow("图像归一化", dst);
}
void QuickDemo::resize_demo(Mat &image) {
Mat zoomin, zoomout;//定义放大和缩小
int h = image.rows;
int w = image.cols;
resize(image,zoomin,Size(h/2,w/2),0,0,INTER_LINEAR);//0,0这两个参数表示水平方向和数值方向缩放的比率
imshow("zoomin", zoomin);
//放大
resize(image, zoomout, Size(3*h / 2, 3*w / 2), 0, 0, INTER_LINEAR);//0,0这两个参数表示水平方向和数值方向缩放的比率
imshow("zoomout", zoomout);
}
void QuickDemo::resize_fixed_demo() {
Mat src;
string path1 = "D:\\testImage\\";
string path2 = "D:\test_0data\\";
string pp = ".jpg";
for (int i = 1; i < 4; i++)
{
string c;
c = to_string(i);
string path = path1 + c + pp;
src = imread(path, 1);
resize(src, src, Size(120, 120));
string pathh = path2 + c + pp;
imwrite(pathh, src);
}
}
void QuickDemo::flip_demo(Mat &image) {
Mat dst;
flip(image, dst, 0);//参数0表示上下反转,1左右反转,-1表示180度旋转
imshow("图像反转", dst);
}
void QuickDemo::rotate_demo(Mat& image) {
//旋转
Mat dst, M;//dst输出,M是旋转矩阵
int w = image.cols;
int h = image.rows;
M = getRotationMatrix2D(Point2f(w / 2, h / 2), 45, 1.0);//获得旋转矩阵,参数为矩阵中心,旋转角度,尺寸是否变化,1就是不变
warpAffine(image, dst, M, image.size());
imshow("旋转演示", dst);
}
void QuickDemo::video_demo(Mat& image) {
//VideoCapture capture(0); //获取摄像头文件,初始化对象,把0换成视频文件就可以传入需要处理的视频
VideoCapture capture("D:\video_data");
//获取frame的宽高
int frame_width = capture.get(CAP_PROP_FRAME_WIDTH);
int frame_height = capture.get(CAP_PROP_FRAME_HEIGHT);
int frame_count = capture.get(CAP_PROP_FRAME_COUNT);
double fps = capture.get(CAP_PROP_FPS);
std::cout << "frame width:" << frame_width << std::endl;//标准的输入输出
std::cout << "frame height:" << frame_height << std::endl;
std::cout << "frame count:" << frame_count << std::endl;
std::cout << "FPS" << fps << std::endl;
//将处理好的视频的保存
VideoWriter writer("D:/test.mp4",capture.get(CAP_PROP_FOURCC),fps,Size(frame_width,frame_height),true);
Mat frame;
while (true) {
capture.read(frame);
flip(frame, frame, 1);
if (frame.empty()) {
break;
}
imshow("frame", frame);
//这里的下面可以对视频做一些前面讲的操作
//例如做一个色彩空间的转换
//To do something......
colorSpace_Demo(frame);
//视频处理好了开始保存
writer.write(frame);
//视频需要键盘操作停顿
int c = waitKey(10);
if (c == 27) {//退出
break;
}
}
//视频做完了一定要释放
capture.release();
writer.release();
}
void QuickDemo::histogram_demo(Mat& image) {
/*图像直方图是图像像素值的统计学特征,计算代价较小,具有图像的平移、旋转、缩放不变性的优点。
Bins是指直方图的大小范围
*/
//三通道分离
std::vectorbgr_plane;
split(image, bgr_plane);
//定义参数变量
const int channels[1] = { 0 };
const int bins[1] = { 256 };//一共有256个灰度级别
float hranges[2] = { 0,255 };//每个通道的灰度级别是0-255
const float* ranges[1] = { hranges };
Mat b_hist;
Mat g_hist;
Mat r_hist;
//计算Blue、Green、Red通道的直方图,1表示只有一张图,因为可以支持多张图多个通道;0表示只有1个通道;raanges就是直方图的取值范围0-25
calcHist(&bgr_plane[0], 1, 0, Mat(), b_hist, 1, bins, ranges);
calcHist(&bgr_plane[1], 1, 0, Mat(), g_hist, 1, bins, ranges);
calcHist(&bgr_plane[2], 1, 0, Mat(), r_hist, 1, bins, ranges);
//显示直方图
int hist_w = 512;
int hist_h = 400;
int bin_w = cvRound((double)hist_w / bins[0]);
Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
//归一化直方图数据
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat());
//绘制直方图曲线
for (int i = 1; i < bins[0]; i++) {
line(histImage, Point(bin_w * (i - 1), hist_h - cvRound(b_hist.at(i - 1))),
Point(bin_w * (i), hist_h - cvRound(b_hist.at(i))), Scalar(255, 0, 0), 2, 8, 0);
line(histImage, Point(bin_w * (i - 1), hist_h - cvRound(g_hist.at(i - 1))),
Point(bin_w * (i), hist_h - cvRound(g_hist.at(i))), Scalar(0, 255, 0), 2, 8, 0);
line(histImage, Point(bin_w * (i - 1), hist_h - cvRound(r_hist.at(i - 1))),
Point(bin_w * (i), hist_h - cvRound(r_hist.at(i))), Scalar(0, 0, 255), 2, 8, 0);
}
//显示直方图
namedWindow("Histogram Demo", WINDOW_AUTOSIZE);
imshow("Histogram Demo", histImage);
}
Mat QuickDemo::histogram_grayImage(const Mat& image)
{
//定义求直方图的通道数目,从0开始索引
int channels[] = { 0 };
//定义直方图的在每一维上的大小,例如灰度图直方图的横坐标是图像的灰度值,就一维,bin的个数
//如果直方图图像横坐标bin个数为x,纵坐标bin个数为y,则channels[]={1,2}其直方图应该为三维的,Z轴是每个bin上统计的数目
const int histSize[] = { 256 };
//每一维bin的变化范围
float range[] = { 0,256 };
//所有bin的变化范围,个数跟channels应该跟channels一致
const float* ranges[] = { range };
//定义直方图,这里求的是直方图数据
Mat hist;
//opencv中计算直方图的函数,hist大小为256*1,每行存储的统计的该行对应的灰度值的个数
calcHist(&image, 1, channels, Mat(), hist, 1, histSize, ranges, true, false);
//找出直方图统计的个数的最大值,用来作为直方图纵坐标的高
double maxValue = 0;
//找矩阵中最大最小值及对应索引的函数
minMaxLoc(hist, 0, &maxValue, 0, 0);
//最大值取整
int rows = cvRound(maxValue);
//定义直方图图像,直方图纵坐标的高作为行数,列数为256(灰度值的个数)
//因为是直方图的图像,所以以黑白两色为区分,白色为直方图的图像
Mat histImage = Mat::zeros(rows, 256, CV_8UC1);
//直方图图像表示
for (int i = 0; i < 256; i++)
{
//取每个bin的数目
int temp = (int)(hist.at(i, 0));
//如果bin数目为0,则说明图像上没有该灰度值,则整列为黑色
//如果图像上有该灰度值,则将该列对应个数的像素设为白色
if (temp)
{
//由于图像坐标是以左上角为原点,所以要进行变换,使直方图图像以左下角为坐标原点
histImage.col(i).rowRange(Range(rows - temp, rows)) = 255;
}
}
//由于直方图图像列高可能很高,因此进行图像对列要进行对应的缩减,使直方图图像更直观
Mat resizeImage;
resize(histImage, resizeImage, Size(256, 256));
return resizeImage;
}
void QuickDemo::histogram_2d_demo(Mat& image) {
//2D直方图
Mat hsv, hs_hist;
cvtColor(image, hsv, COLOR_BGR2HSV);
int hbins = 30;//H一共有180,设置hbins为30可以理解为分30个类统计
int sbins = 32;
int hist_bins[] = { hbins,sbins };
float h_range[] = { 0,180 };
float s_range[] = { 0,256 };
const float* hs_ranges[] = { h_range,s_range };
int hs_channels[] = { 0,1 };
calcHist(&hsv, 1, hs_channels, Mat(), hs_hist, 2, hist_bins, hs_ranges, true, false);
double maxVal = 0;
minMaxLoc(hs_hist, 0, &maxVal, 0, 0);
int scale = 10;
Mat hist2d_image = Mat::zeros(sbins * scale, hbins * scale, CV_8UC3);
for (int h = 0; h < hbins; h++) {
for (int s = 0; s < sbins; s++) {
float binVal = hs_hist.at(h, s);
int intensity = cvRound(binVal * 255 / maxVal);
rectangle(hist2d_image, Point(h * scale, s * scale), Point((h + 1) * scale - 1, (s + 1) * scale - 1), Scalar::all(intensity), -1);
}
}
applyColorMap(hist2d_image, hist2d_image, COLORMAP_JET);
imshow("H-S Histogram", hist2d_image);
imwrite("D:/hist_2d.png", hist2d_image);
}
void QuickDemo::histograme_eq_demo(Mat &image) {
//图像均衡化只支持灰度图像
Mat gray;
cvtColor(image,gray, COLOR_BGR2GRAY);
imshow("灰度图像:", gray);
Mat dst;
equalizeHist(gray, dst);
imshow("直方图均衡化演示:", dst);
}
void QuickDemo::blur_demo(Mat& image) {//卷积使得模糊
Mat dst;
blur(image, dst, Size(13, 13), Point(-1,-1));//卷积核3*3,卷积核的中心位置作为输出点
imshow("图像模糊", dst);
}
void QuickDemo::gaussian_blur_demo(Mat &image) {
Mat dst;
GaussianBlur(image, dst, Size(5, 5),15);//Gaussian的窗口大小为5*5,Sigma是15
imshow("高斯模糊", dst);
}
void QuickDemo::bifilter_demo(Mat &image) {
Mat dst;
bilateralFilter(image, dst, 0, 100, 10);
imshow("双边模糊", dst);
}
void QuickDemo::face_detection_demo() {
std::string root_dir = "D:/opencv/opencv/sources/samples/dnn/ace_detector/";
//读取深度学习网络
dnn::Net net = dnn::readNetFromTensorflow(root_dir+"opencv_face_detector_uint8.pb",root_dir+"opencv_face_detector.pbtxt");
//网络模型加载好了我们需要加载视频
VideoCapture capture("D:/opencv/opencv/sources/samples/data/vtest.avi");
Mat frame;
while (true) {
capture.read(frame);
if (frame.empty()) {
break;
}
//To do something.....
Mat blob = dnn::blobFromImage(frame, 1.0, Size(300, 300), Scalar(104, 177, 123), false, false);
net.setInput(blob);
Mat probs = net.forward();
Mat detectionMat(probs.size[2], probs.size[3], CV_32F, probs.ptr());
//解析结果
for (int i = 0; i < detectionMat.rows; i++) {
float confidence = detectionMat.at(i,2);
if (confidence > 0.5) {
//得到矩形的宽高
int x1 = static_cast(detectionMat.at(i, 3) * frame.cols);
int y1 = static_cast(detectionMat.at(i, 4) * frame.rows);
int x2 = static_cast(detectionMat.at(i, 5) * frame.cols);
int y2 = static_cast(detectionMat.at(i, 6) * frame.rows);
Rect box(x1, y1, x2 - x1, y2 - y1);
rectangle(frame, box, Scalar(0, 0, 255), 2, 8, 0);
}
}
imshow("人脸检测显示", frame);
int c = waitKey(1);
if (c == 27) {//退出
break;
}
}
}
void QuickDemo::elementsizechange_open_demo(Mat& image) {
//开操作
Mat dst;
int elementsize = 2;
Mat kernel = getStructuringElement(MORPH_RECT, Size(elementsize * 2 + 1, elementsize * 2 + 1));//保证是奇数
morphologyEx(image, dst, MORPH_OPEN, kernel);
namedWindow("开操作后", WINDOW_FREERATIO);
imshow("开操作后", dst);
}
void QuickDemo::elementsizechange_close_demo(Mat& image) {
//闭操作
Mat dst;
int elementsize = 2;
Mat kernel = getStructuringElement(MORPH_RECT, Size(elementsize * 2 + 1, elementsize * 2 + 1));
morphologyEx(image, dst, MORPH_CLOSE, kernel);
namedWindow("闭操作以后", WINDOW_AUTOSIZE);
imshow("闭操作以后", dst);
}
void QuickDemo::elementsizechange_gradient_demo(Mat& image) {
//形态学梯度
Mat dst;
int elementsize = 3;
Mat kernel = getStructuringElement(MORPH_RECT, Size(elementsize * 2 + 1, elementsize * 2 + 1));
morphologyEx(image, dst, MORPH_GRADIENT, kernel);
imshow("GRADIENT", dst);
}
void QuickDemo::elementsizechange_tophat_demo(Mat& image) {
//顶帽操作
Mat dst;
int elementsize = 3;
Mat kernel = getStructuringElement(MORPH_RECT, Size(elementsize * 2 + 1, elementsize * 2 + 1));
morphologyEx(image, dst, MORPH_TOPHAT, kernel);
imshow("TOPHAT", dst);
}
void QuickDemo::elementsizechange_blackhat_demo(Mat& image) {
Mat dst;
int elementsize = 3;
Mat kernel = getStructuringElement(MORPH_RECT, Size(elementsize * 2 + 1, elementsize * 2 + 1));
morphologyEx(image, dst, MORPH_BLACKHAT, kernel);
imshow("BLACKHAT", dst);
}
void QuickDemo::template_match_demo(Mat* pTo, Mat* pTemplate, Mat* src) {
//模板匹配
int i, j, m, n;
double dSumT;//模板元素的平均和
double dSumS;//图像子区域元素的平均和
double dSumST;//图像子区域和模板的点积
double R;//响应值
double MaxR;//记录当前的最大响应
//最大响应出现的位置
int nMaxX;
int nMaxY;
int nHeight = src->rows;
int nwidth = src->cols;
//模板的高和宽
int nTplHeight = pTemplate->rows;
int nTplwidth = pTemplate->cols;
//计算dSumT
dSumT = 0;
for (m = 0; m < nTplHeight; m++) {
for (n = 0; n < nTplwidth; n++) {
//模板图像第m行,第n个像素的灰度值
int nGray = *pTemplate->ptr(m, n);
dSumT += (double)nGray * nGray;
}
}
//找到图像中最大响应的出现位置
MaxR = 0;
for (i = 0; i < nHeight - nTplHeight + 1; i++) {
for (j = 0; j < nwidth - nTplwidth + 1; j++) {
dSumST = 0;
dSumS = 0;
for (m = 0; m < nTplHeight; m++) {
for (n = 0; n < nTplwidth; n++) {
// 原图像第i+m行,第j+n列象素的灰度值
int nGraySrc = *src->ptr(i + m, j + n);
//模板图像第m行,第n个像素的灰度值
int nGrayTp1 = *pTemplate->ptr(m, n);
dSumS += (double)nGraySrc * nGraySrc;
dSumST += (double)nGraySrc * nGrayTp1;
}
}
R = dSumS / (sqrt(dSumS) * sqrt(dSumT));//计算相应响应
//与最大相似性比较
if (R > MaxR) {
MaxR = R;
nMaxX = j;
nMaxY = i;
}
}
//将找到的最佳匹配区域复制到模板图像
for (int m=0; m < nTplHeight; m++) {
for (n = 0; n < nTplwidth; n++) {
int nGray = *src->ptr(nMaxY + m, nMaxX + n);
//pTo->setTo(nMaxX + n, nMaxY + m, RGB(nGray, nGray, nGray));
pTo->at(nMaxY + m, nMaxX + n) = nGray;
}
}
}
}
void QuickDemo::pic_masking1_way1_demo(Mat& image) {
int cols = (image.cols - 1)* image.channels();//获取图像的宽;
int offsetx = image.channels();
int rows = image.rows;
Mat dst = Mat::zeros(image.size(), image.type());
for (int row = 1; row < rows - 1; row++) {//是从(1,1)位置开始的,而不是从0位置开始的
const uchar* current = image.ptr(row);
const uchar* previous = image.ptr(row-1);
const uchar* nest = image.ptr(row+1);
uchar* output = dst.ptr(row);
for (int col = offsetx; col < cols; col++) {
output[col] = saturate_cast(5 * current[col] - (current[col - offsetx] + current[col + offsetx] + nest[col]));
}
}
imshow("contrast_image", dst);
}
void QuickDemo::pic_masking_way2_demo(Mat& image) {
Mat dst;
Mat kernel = (Mat_(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
filter2D(image, dst, image.depth(), kernel);
imshow("contrast_image",dst);
}
void QuickDemo::read_write_piexl_demo(Mat& image) {
Mat gray_image;
cvtColor(image, gray_image, COLOR_BGR2GRAY);
imshow("gray_image", gray_image);
int height = gray_image.rows;
int width = gray_image.cols;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int gray = gray_image.at(row, col);//获取每行每列的像素值
//我们可以改变像素显示反差之后的图像
gray_image.at(row, col) = 255 - gray;
}
}
imshow("反差图像", gray_image);
}
void QuickDemo::image_blending_demo() {
Mat image1, image2, dst;
image1 = imread("D:\\testImage\\12.jpg");
image2 = imread("D:\\testImage\\2.jpg");
if (!image1.data) {
cout << "image1图像不存在" << endl;
}
if (!image2.data) {
cout << "image2图像不存在" << endl;
}
if (image1.rows == image2.rows && image1.cols == image2.cols && image1.type() == image2.type()) {
//进行图像融合
double alpha = 0.5;
addWeighted(image1, alpha, image2, (1.0 - alpha), 0.0, dst);
imshow("image_blending", dst);
}
else {
cout << "could not blend image,the size of images is not same" << endl;
}
}
void QuickDemo::adjust_brightness_and_contrast_demo(Mat& image) {
int height = image.rows;
int width = image.cols;
//生成一张和原图大小相同的空白图
Mat dst;
dst = Mat::zeros(image.size(), image.type());
float alpha = 1.2;
float belta = 30;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
if (image.channels() == 3) {
float b = image.at(row, col)[0];
float g = image.at(row, col)[1];
float r = image.at(row, col)[2];
dst.at(row, col)[0] = saturate_cast(b*alpha+belta);
dst.at(row, col)[1] = saturate_cast(g * alpha + belta);
dst.at(row, col)[2] = saturate_cast(r * alpha + belta);
}
else if (image.channels() == 1) {
float v = image.at(row, col);
}
}
}
imshow("adjust_brightness_and_contrast_image", dst);
}
void QuickDemo::image_blur_demo(Mat& image) {
Mat dst;
blur(image, dst, Size(3, 3), Point(-1, -1));
imshow("中值模糊", dst);
GaussianBlur(image, dst, Size(5, 5), 11, 11);
imshow("高斯模糊",dst);
}
void QuickDemo::image_filtering_demo(Mat& image) {
Mat dst;
medianBlur(image, dst, 3);
imshow("中值滤波", dst);
bilateralFilter(image, dst, 15,150,3);//15是窗口大小,
imshow("双边滤波", dst);
}
Mat QuickDemo::gray_histogram(Mat& image)
{
double maxValue = 0;
double minValue = 0;
for (int i = 0; i < image.rows; i++)
{
for (int j = 0; j < image.cols; j++)
{
int b = (int)image.at(i, j);
cout << b << endl;
if (b > 20 && b < 200) {
}
}
}
minMaxLoc(image, &minValue, &maxValue, 0, 0);//找到全局最小、最大的像素值数目
//cout << "max: " << maxValue << "min: " << minValue << endl;
int imageSize = image.rows;
Mat histImage(imageSize, imageSize, CV_8UC3, Scalar(255, 255, 255));
int hpt = static_cast(0.9 * imageSize);
int total = 0;
Scalar color(172, 172, 150);//BGR
for (int h = 0; h < imageSize; h++)
{
float binVal = image.at(h);//读取对应灰度级的像素个数
//cout << h << ":" << binVal << endl;
total += binVal;
int intensity = static_cast(binVal * hpt / maxValue);
//line(histImage, Point(h, imageSize), Point(h, imageSize - intensity), color);
rectangle(histImage, Point(h, imageSize), Point(h + 1, imageSize - intensity), color);
/*cout << total;*/
}
return histImage;
}
Mat QuickDemo::grayImage_mask_print(Mat& image)
{
// 输入图像image是单通道的灰度图
int h = image.rows;
int w = image.cols;
int dims = image.channels();
// 将单通道灰度图转换成为伪灰度图的三通道图像
Mat image_bgr;
cvtColor(image, image_bgr, COLOR_GRAY2BGR);
for (int row = 0; row < h; row++)
{
for (int col = 0; col < w; col++) {
if (image.at(row, col) > 0 && image.at(row, col) < 40) {
image_bgr.at(row, col)[0] = 0;
image_bgr.at(row, col)[1] = 0;
image_bgr.at(row, col)[2] = 255;
}
}
}
return image_bgr;
}
Mat QuickDemo::mask_localImage(Mat& image)
{
// 输入图像image是单通道的灰度图
float h = image.rows;
float w = image.cols;
int dims = image.channels();
// 将单通道灰度图转换成为伪灰度图的三通道图像
Mat image_bgr1;
cvtColor(image, image_bgr1, COLOR_GRAY2BGR);
int r_count = 0;
for (int row = 0; row < h; row++)
{
for (int col = 0; col < w; col++)
{
if (image_bgr1.at(row, col)[0] > 10 &&
image_bgr1.at(row, col)[1] > 100 &&
image_bgr1.at(row, col)[2]>150)
{
image_bgr1.at(row, col)[0] = 0;
image_bgr1.at(row, col)[1] = 0;
image_bgr1.at(row, col)[2] = 255;
r_count++;
}
else if (image.at(row,col)>50 &&
image.at(row, col)<100)
{
image_bgr1.at(row, col)[0] = 0;
image_bgr1.at(row, col)[1] = 255;
image_bgr1.at(row, col)[2] = 0;
}
else if (image_bgr1.at(row, col)[0] > 10 &&
image_bgr1.at(row, col)[1] > 100 &&
image_bgr1.at(row, col)[2] > 150
&& image.at(row, col) > 50 ||
image.at(row, col) < 100)
{
image_bgr1.at(row, col)[0] = 255;
image_bgr1.at(row, col)[1] = 0;
image_bgr1.at(row, col)[2] = 0;
}
}
}
cout << "红色区域的像素个数" << r_count << endl;
cout << "红色区域的像素所占的比率是:" << r_count / (h * w) << endl;
return image_bgr1;
}
void QuickDemo::draw_interest_pluse(Mat& image, int re_h, int re_w) {
// 首先打上掩膜
Mat image_add = mask_localImage(image);
int h = image_add.rows; // 原图高
int w = image_add.cols; // 原图宽
int r_h = h / re_h; // 分割高度以后的区域
int r_w = w / re_w; // 分割宽度以后的区域
// vector points; // 设置容器,用来存储坐标
for (int i = 0; i < re_h; i++) {
for (int j = 0; j < re_w; j++) {
vector points; // 设置容器,用来存储坐标
for (int row = i * r_h; row < r_h * (i + 1); row++) {
for (int col = j * r_w; col < (j + 1) * r_w; col++) {
if (image_add.at(row, col)[0] == 0 &&
image_add.at(row, col)[1] == 0 &&
image_add.at(row, col)[2] == 255) {
// 查找符合要求的像素点
Point midPoint;
midPoint.x = col;
midPoint.y = row;
points.push_back(midPoint);
}
}
}
Rect rect = boundingRect(points);
rectangle(image_add, rect, Scalar(0, 0, 0), 1, 8);
}
}
namedWindow("add_mask", WINDOW_FREERATIO);
imshow("add_mask", image_add);
}
void QuickDemo::histogram_local_image(Mat& image)
{
Mat dst;
Mat mask(image.rows, image.cols, CV_8UC3, Scalar(0, 0, 0));
Rect rect = Rect(125, 1489, 309, 162);
Scalar color = Scalar(255, 255, 255);
rectangle(mask, rect, color, -1);
Mat m_out;
bitwise_and(image, mask, m_out);
namedWindow("m_out", 0);
imshow("m_out", m_out);
Mat m_out_gray;
cvtColor(m_out, m_out_gray, COLOR_BGR2GRAY);
Mat AOI_hist = histogram_grayImage(m_out);
namedWindow("AOI_hist", WINDOW_FREERATIO);
imshow("AOI_hist", AOI_hist);
}
void QuickDemo::draw_interset(Mat &image_add)
{
int h = image_add.rows; // 原图高
int w = image_add.cols; // 原图宽
vector points; // 设置容器,用来存储坐标
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
if (image_add.at(row, col)[0] == 0 &&
image_add.at(row, col)[1] == 0 &&
image_add.at(row, col)[2] == 255) {
// 查找符合要求的像素点
Point midPoint;
midPoint.x = col;
midPoint.y = row;
points.push_back(midPoint);
}
}
}
Rect rect = boundingRect(points);
rectangle(image_add, rect, Scalar(0, 0, 255), 4, 8);
namedWindow("add_mask", WINDOW_FREERATIO);
imshow("add_mask", image_add);
}
void QuickDemo::image_mask(Mat& image, Mat& heightMat)
{
Mat dst;
cvtColor(image, dst, COLOR_BGR2GRAY);
int h_height = heightMat.rows;
int h_width = heightMat.cols;
//mask
for (int row = 0; row < h_height; row++)
{
for (int col = 0; col < h_width; col++)
{
if (heightMat.at(row, col) > 1000)//高度大于1000
{
image.at(row, col)[0] = 0;
image.at(row, col)[1] = 0;
image.at(row, col)[2] = 255;
}
}
}
}
void QuickDemo::image_polar_mask_demo(Mat& image, Mat& heightMat)
{
Mat dst;
cvtColor(image, dst, COLOR_BGR2GRAY);
int h_height = heightMat.rows;
int h_width = heightMat.cols;
//mask
for (int row = 0; row < h_height; row++)
{
for (int col = 0; col < h_width; col++)
{
if (heightMat.at(row, col) >=1290 && heightMat.at(row, col) <=1400)//高度大于1000
{
image.at(row, col)[0] = 0;
image.at(row, col)[1] = 0;
image.at(row, col)[2] = 255;
}
}
}
}
void QuickDemo::lead_box(Mat& image)
{
//ROI区域主边框
int h = image.rows; // 原图高
int w = image.cols; // 原图宽
vector points; // 设置容器,用来存储坐标
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
if (image.at(row, col)[0] >= 100 &&
image.at(row, col)[1] >= 150 &&
image.at(row, col)[2] >= 50) {
// 查找符合要求的像素点
Point midPoint;
midPoint.x = col;
midPoint.y = row;
points.push_back(midPoint);
}
}
}
Rect rect = boundingRect(points);
rectangle(image, rect, Scalar(0, 0, 255), 1, 1);
namedWindow("add_mask", WINDOW_AUTOSIZE);
imshow("add_mask", image);
}
// 打掩模
Mat QuickDemo::lead_mask(Mat& image,Mat &gray_image)
{
int height = image.rows;
int width = image.cols;
Mat dstImg = image.clone();// 新图像(寄存图像,输出就是这张图像)
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (image.at(i, j)[0] < 150
&& image.at(i, j)[1] < 133
&& image.at(i, j)[2] < 150)
{
dstImg.at(i, j)[0] = 245;
dstImg.at(i, j)[1] = 4;
dstImg.at(i, j)[2] = 251;
}
}
}
namedWindow("lead_mask", WINDOW_AUTOSIZE);
imshow("lead_mask", dstImg);
return dstImg;
}
Mat QuickDemo::sub_lead_mask(Mat& image, Mat& gray_image)
{
int height = gray_image.rows;
int width = gray_image.cols;
int threshold_value = 100;
int threshold_max = 255;
vector points;
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++)
{
if (image.at(row, col)[0] > 20
&& image.at(row, col)[1] > 20
&& image.at(row, col)[2] > 20
&& gray_image.at(row, col) > 20)
{
image.at(row, col)[0] = 0;
image.at(row, col)[1] = 0;
image.at(row, col)[2] = 0;
}
}
}
//imshow("output", image);
return image;
}
Mat QuickDemo::min_box(Mat& rgb_image, Mat& after_mask)
{
Mat src_g;
cvtColor(rgb_image, src_g, COLOR_BGR2GRAY);
Mat dstImg = after_mask.clone();
Mat binary_src;
threshold(src_g, binary_src, 80, 255, THRESH_BINARY);
vector> contours;
vector hierarchy;
findContours(binary_src, contours, hierarchy,
RETR_EXTERNAL, CHAIN_APPROX_NONE);
vector boundRect(contours.size()); //定义外接矩形集合
Point2f rect[4];
for (int i = 0; i < contours.size(); i++)
{
boundRect[i] = boundingRect(Mat(contours[i]));
rectangle(dstImg, Point(boundRect[i].x, boundRect[i].y),
Point(boundRect[i].x + boundRect[i].width,
boundRect[i].y + boundRect[i].height), Scalar(0, 255, 0), 2, 8);
}
return dstImg;
}
Mat QuickDemo::unevenLightCompensate(Mat& image, int blockSize)
{
if (image.channels() == 3) cvtColor(image, image, 7);
double average = mean(image)[0];
int rows_new = ceil(double(image.rows) / double(blockSize));
int cols_new = ceil(double(image.cols) / double(blockSize));
Mat blockImage;
blockImage = Mat::zeros(rows_new, cols_new, CV_32FC1);
for (int i = 0; i < rows_new; i++)
{
for (int j = 0; j < cols_new; j++)
{
int rowmin = i * blockSize;
int rowmax = (i + 1) * blockSize;
if (rowmax > image.rows) rowmax = image.rows;
int colmin = j * blockSize;
int colmax = (j + 1) * blockSize;
if (colmax > image.cols) colmax = image.cols;
Mat imageROI = image(Range(rowmin, rowmax), Range(colmin, colmax));
double temaver = mean(imageROI)[0];
blockImage.at(i, j) = temaver;
}
}
blockImage = blockImage - average;
Mat blockImage2;
resize(blockImage, blockImage2, image.size(), (0, 0), (0, 0), INTER_CUBIC);
Mat image2;
image.convertTo(image2, CV_32FC1);
Mat dst = image2 - blockImage2;
dst.convertTo(image, CV_8UC1);
return image;
}
Mat QuickDemo::binary_image(Mat& image)
{
Mat dst;
threshold(image,dst,170,255,THRESH_BINARY);
return dst;
}
test.cpp
#include
#include
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
Mat src = imread("D:\\testImage\\图片\\图片\\882019100041-4.jpg");
if (src.empty()) {
printf("could not load image...");
return -1;
}
namedWindow("原图:", WINDOW_AUTOSIZE);
imshow("原图:", src);
QuickDemo qd;
Mat photometric_processing_img = qd.unevenLightCompensate(src, 64);
namedWindow("光度处理后图像:", WINDOW_AUTOSIZE);
imshow("光度处理后图像:", photometric_processing_img);
Mat binary_image = qd.binary_image(photometric_processing_img);
namedWindow("二值化后图像:", WINDOW_AUTOSIZE);
imshow("二值化后图像:", binary_image);
waitKey(0);
return 0;
}
//Mat roi_select(Mat& image, Point sp, Point ep) {
// int dx = ep.x - sp.x;
// int dy = ep.y - sp.y;
// Rect box(sp.x, sp.y, dx, dy);//ROI区域大小
// Mat new_img = image(box);
// /*QuickDemo qd;
// Mat ROI_his = qd.histogram_grayImage(new_img);
// namedWindow("ROI_his", WINDOW_FREERATIO);
// imshow("ROI_his", ROI_his);*/
// return new_img;
//}
//
//
//
//int main()
//{
// Mat src = imread("C:\\Users\\Administrator\\Documents\\WXWork\\1688850162194623\\Cache\\File\\2021-08\\0015.bmp");
// if (src.empty())
// {
// printf("could not load image...");
// return -1;
// }
// namedWindow("input_image", WINDOW_FREERATIO);
// imshow("input_image", src);
// int height = src.rows;
// int width = src.cols;
// cv::Mat textureMat(height, width, CV_8UC3);
// std::ifstream("C:\\Users\\Administrator\\Documents\\WXWork\\1688850162194623\\Cache\\File\\2021 - 08\\0015.bmp",
// std::ios::in | std::ios::binary).read((char*)textureMat.data,width * height * 3 * sizeof(uchar));
// cv::Mat heightMat(height, width, CV_32FC1);
// std::ifstream("C:\\Users\\Administrator\\Documents\\WXWork\\1688850162194623\\Cache\\File\\2021-08\\0015_13",
// std::ios::in | std::ios::binary).read((char*)heightMat.data, width * height * sizeof(float));
//
// QuickDemo qd;
// Mat hist = qd.histogram_grayImage(heightMat);
// namedWindow("hist", WINDOW_FREERATIO);
// imshow("hist", hist);
//
// //left_top
// Mat new_heightMat_lefttop = roi_select(heightMat, Point(666, 1706), Point(785, 1777));
// Mat new_src_lefttop = roi_select(src, Point(666, 1706), Point(785, 1777));
//
// //left_bottom
// Mat new_heightMat_leftbot = roi_select(heightMat, Point(666, 2926), Point(785, 2997));
// Mat new_src_leftbot = roi_select(src, Point(666, 2926), Point(785, 2997));
//
// //right_bottom
// Mat new_heightMat_rightbot = roi_select(heightMat, Point(1911, 2926), Point(2030, 2997));
// Mat new_src_rightbot = roi_select(src, Point(1911, 2926), Point(2030, 2997));
//
// //right_top
// Mat new_heightMat_righttop = roi_select(heightMat, Point(1911, 1706), Point(2030, 1777));
// Mat new_src_righttop = roi_select(src, Point(1911, 1706), Point(2030, 1777));
//
// //掩模
// qd.image_mask(new_src_lefttop, new_heightMat_lefttop);
//
// qd.image_mask(new_src_leftbot, new_heightMat_leftbot);
// qd.image_mask(new_src_righttop, new_heightMat_righttop);
// qd.image_mask(new_src_rightbot, new_heightMat_rightbot);
// qd.image_mask(new_src_rightbot, new_heightMat_rightbot);
//
// namedWindow("形态学操作前", WINDOW_FREERATIO);
// imshow("形态学操作前", src);
// qd.elementsizechange_close_demo(src);
// qd.draw_interset(src);
/*Mat polar_heightMat = roi_select(heightMat, Point(1767, 2781), Point(1905, 2911));
Mat polar_src = roi_select(src, Point(1767, 2781), Point(1905, 2911));
qd.image_polar_mask_demo(polar_src, polar_heightMat);
qd.elementsizechange_close_demo(src);
float src_height = src.rows;
float src_width = src.cols;
circle(polar_src, Point(src_height / 2, src_width / 2), 50, Scalar(0, 0, 255));
namedWindow("circle_mask_polar", WINDOW_FREERATIO);
imshow("circle_mask_polar", src);*/
// waitKey(0);
// return 0;
//}
//int main() {
// Mat src = imread("C:\\Users\\Administrator\\Documents\\WXWork\\1688850162194623\\Cache\\File\\2021-08\\0015.bmp");
// if (src.empty())
// {
// printf("could not load image...");
// return -1;
// }
// //namedWindow("input_image", WINDOW_FREERATIO);
// //imshow("input_image", src);
// Mat roi_src = roi_select(src, Point(672, 1571), Point(2007, 1724));
// namedWindow("roi_src", WINDOW_AUTOSIZE);
// imshow("roi_src", roi_src);
//
//
// Mat gray_roi_src;
// cvtColor(roi_src, gray_roi_src, COLOR_BGR2GRAY);
// imshow("gray_roi_src", gray_roi_src);
//
// QuickDemo qd;
//
// //思想是;把要处理的roi区域复制为两个图像,一个用来处理打掩模,以及满足一定的条件做一些操作,
// //然后在把他加到原有没有处理的ROI图像上。
//
// //引脚根部搜索:通过亮度和颜色的设定标示出引脚本体部分
// Mat mask_img = qd.lead_mask(roi_src, gray_roi_src);
//
// //通过亮度和颜色的设定标示出引脚部分
// Mat after_mask = qd.sub_lead_mask(mask_img, gray_roi_src);
// imshow("after_mask", after_mask);
//
// //通过亮度和颜色的设定标焊锡部分(引脚的头和尾)
// Mat box_label = qd.min_box(roi_src, after_mask);
// imshow("box_label", box_label);
//
// waitKey(0);
// return 0;
//}