透明通道的无论怎么操作显示的多是带白色背景的, 只有通过设置imread(“”,imread_unchanged), 保存下来的才是透明通道的
unchanged: 4通道, 有个透明通道
IMREAD_UNCHANGED
加载代透明的图片IMREAD_COLOR
: 加载bgr图片IMREAD_GRAYSCALE
: 加载灰色图片IMREAD_ANYCOLOR
: 加载各种颜色mat对象: 用来存储图像的数据(二维数据)的内存对象
存储枚举结果的
头部: 宽高, 数据类型,通道(单通道一般是灰色图像, 三通道一般是rgb图像. 四通道一般是透明通道)
数据部分: 像素值
创建Mat对象: Mat(size(x,y),cv_8uc3)
mat对象赋值: 只是地址的指向, 复制的变量, 他的内存地址是被复制的内存地址
mat对象拷贝或者克隆: 会创建一个新的mat对象, 而不是直接执行被拷贝的内存地址
mat.depth()
: 返回的是个int类型, 图像的深度
mat. type()
: 返回的是个int类型, 图像的类型
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-JL3V6aZE-1656318416402)(D:\opencvSourse\openImg\imgType&&depth对应表.png)]
type数值:
type = depth + (channle -1)* 8
Mat src = Mat::zeros(Size(100, 100), CV_8UC3);
cout << "图片类型8: " << src.type() << "\t" << "图像深度0: " << src.depth() << endl;
//图片类型8: 16 图像深度0: 0
Mat src1 = Mat::zeros(Size(100, 100), CV_8SC3);
cout << "图片类型9: " << src1.type() << "\t" << "图像深度1: " << src1.depth() << endl;
//图片类型9: 17 图像深度1: 1
Mat src2 = Mat::zeros(Size(100, 100), CV_16UC1);
cout << "图片类型9: " << src2.type() << "\t" << "图像深度2: " << src2.depth() << endl;
//图片类型8+2: 10 图像深度2: 2
Mat(x,x,type())
: 这样创建, 他默认不会全为0, 会自带一些颜色
Mat::zeros(Size(x,x), type())
: 创建一个全是0的对象
Mat::zeros
mat::once : 创建一个全是1的对象
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-8JS5rn6r-1656318416403)()]
imshow("原图", img);
Mat dst = Mat::zeros(img.size(), img.type());
/*for (int row = 0; row < img.rows; row++)
{
for (int col = 0; col < img.cols; col++)
{
if (img.channels() == 3)
{
dst.at(row, col)[0] = img.at(row, col)[0];
dst.at(row, col)[1] = img.at(row, col)[1];
dst.at(row, col)[2] = img.at(row, col)[2];
}
}
}*/
/*imshow("遍历", dst);*/
for (int row = 0; row < img.rows; row++)
{
uchar* y = img.ptr(row);
uchar* newI = dst.ptr(row);
for (int col = 0; col < img.cols; col++)
{
*newI++ = *y++;
*newI++ = *y++;
*newI++ = *y++;
}
}
imshow("遍历", dst);
注意点: 2张图片必须大小一致, 类型一致
越界: opencv会自动去处理, 如果得大于255 ,会等于255, 如果小于0会等于0
add, subtract,multiple, divide
API
addWeighted: 用来调节亮度和对比度(伪装透明度)
void Person::erithmetic(Mat& img)
{
imshow("原图", img);
Mat src = Mat::zeros(img.size(), img.type());
src = Scalar(128, 128, 128);
Mat dst;
//伪装透明度
addWeighted(img, 0.5, src, 0.5,0,dst);
//addWeighted(img, 1.5, img, -0.2, 0, dst);
imshow("位置透明度", dst);
}
mask满足条件:
bitwise_not (img,dst,mask)
:如果mask区域为0的地方, 不进行取反, 为黑色0, mask为255的地方则进行取反
bitwise_and, bitweise_or, bitweise_xor
imshow("原图", img);
Mat mask = Mat::zeros(img.size(), CV_8UC1);
for (int row = img.rows / 4; row < img.rows / 4 * 3; row++)
{
uchar* p = mask.ptr(row);
for (int col = 0 ; col < img.cols / 4 * 2; col++)
{
*(p + (col + img.cols / 4)) = 255;
}
}
imshow("mask", mask);
Mat dst;
bitwise_not(img, dst, mask);
imshow("取反", dst);
void Person::distribution(Mat& img)
{
imshow("原图", img);
double min; double max;
Point minLoc; Point maxLoc;
vector vMat(img.channels());
split(img, vMat);
//均值
Scalar mea = mean(img);
Scalar mea1;
vector stddev(img.channels());
for (int i = 1; i < vMat.size(); i++)
{
//最小最大值
minMaxLoc(vMat[i-1], &min, &max, &minLoc, &maxLoc, Mat());
cout << "通道:" << i << "最小值: " << min << "\t" << "最大值: " << max << "\t" << "最小值像素点: " << minLoc << "最大值像素点" << maxLoc << endl;
cout << "通道:" << i << "均值: " << mea[i - 1] << endl;
meanStdDev(vMat[i - 1], mea1, stddev[i-1], Mat());
cout << "通道:" << i << "方差: " << stddev[i - 1] << endl;
}
}
api: line(canvas, Point(x,y), Point(x1,y1), scalar(B,G,R), thickness, lineType,)
lineType: LINE_AA(反锯齿)
rectangle(canvas, Rect(10, 200, 300, 300), Scalar(0, 0, 200), -1, 8);
circle(canvas, Point(250, 250), 100, Scalar(200, 10, 10), -1, 8);
ellipse(canvas, RotatedRect(Point(300, 100), Size(200, 100), 45.5),Scalar(0, 200, 10), -1, LINE_AA);
在img中写文字
putText(canvas, “value”,Point(x,y), Font_, 1.0 , scalar(b,g,r), thickness, line_type)
putText(): 向图片中写文字
scalar: 可以()里面可以只有一个值
split: 通道分离
vector
容器merge: 合并(输入,输出),
roi:
rect roi
mat sub = img(roi)// sub是roi在img中的地方, 是赋值
mat sub = img(roi).clone// 是引用
pic: 最高峰
range: 像素分类
bin: 每个分类好了的像素区域
逻辑: 分离 --> 计算直方图(calcHist)–>输出图片归一化(对应高度)–>把归一化绘制到输出img上
API:
void Person::calcHistogram(Mat& img)
{
imshow("原图", img);
vector vImg;
split(img, vImg);
Mat bImg; Mat gImg; Mat rImg;
int bins = 256;
float rang[] = { 0,255 };
const float* rangs = { rang };
calcHist(&vImg[0], 1, 0, Mat(), bImg, 1, &bins, &rangs, true, false);
calcHist(&vImg[1], 1, 0, Mat(), gImg, 1, &bins, &rangs, true, false);
calcHist(&vImg[2], 1, 0, Mat(), rImg, 1, &bins, &rangs, true, false);
imshow("b", bImg);
imshow("g", bImg);
imshow("r", bImg);
Mat dst = Mat::zeros(Size(500, 300), img.type());
int margin = 50;
int height = dst.rows - 2 * margin;
cout << "高度" << height << endl;
normalize(bImg, bImg, 0, height, NORM_MINMAX);
normalize(gImg, gImg, 0, height, NORM_MINMAX);
normalize(rImg, rImg, 0, height, NORM_MINMAX);
double step = (dst.cols - 2 * 50) / double(bins);
cout << "步长" << step << endl;
for (int i = 0; i < bins-1; i++)
{
//cout << bImg.at(i, 0) << endl;
line(dst, Point(i * step + margin, height + margin - bImg.at(i, 0)), Point((i + 1) * step + margin, height + margin - bImg.at(i + 1, 0)), Scalar(255, 0, 0), 2);
//cout << "电一" << i * step + margin << endl;
line(dst, Point(i * step + margin, height + margin - gImg.at(i, 0)), Point((i + 1) * step + margin, height + margin - gImg.at(i + 1, 0)), Scalar(0, 255, 0), 2);
line(dst, Point(i * step + margin, height + margin - rImg.at(i, 0)), Point((i + 1) * step + margin, height + margin - rImg.at(i + 1, 0)), Scalar(0, 0, 255), 2);
}
imshow("输出", dst);
}
用来增加许多图像的全局对比度, 当图像的有用数据的对比度相当接近的时候,通过这种方法,亮度可以更好地在直方图上分布, 比较相似度
equalizeHist: 直方图均衡化 equalizeHist: 只接受灰色图像
compareHist: 直方图比较
下面代码最好把图片转换为hsv(色彩分明), 在进行比较
Mat myCalcHistogram(Mat& img)
{
int bin1 = 256; int bin2 = 256; int bin3 = 256;
int bins[] = { bin1,bin2,bin3 };
float rang1[] = { 0,255 }; float rang2[] = { 0,255 }; float rang3[] = { 0,255 };
const float* rangs[] = { rang1,rang2,rang3 };
int channels[] = { 0,1,2 };
Mat dst;
calcHist(&img, 1, channels, Mat(), dst, 3, bins, rangs, true, false);
return dst;
}
void Person::compare(Mat& img1, Mat& img2)
{
cout << "aa" << endl;
imshow("原图1", img1); imshow("原图2", img2);
Mat histImg = myCalcHistogram(img1);
Mat histImg2 = myCalcHistogram(img2);
normalize(histImg, histImg, 0, 1, NORM_MINMAX);
normalize(histImg2, histImg2, 0, 1, NORM_MINMAX);
double minBha = compareHist(histImg, histImg2, HISTCMP_BHATTACHARYYA);
double maxBha = compareHist(histImg, histImg, HISTCMP_BHATTACHARYYA);
cout << "巴斯距离越大差异越大" << minBha << "相同图" << maxBha << endl;
double minCor = compareHist(histImg, histImg2, HISTCMP_CORREL);
double manCor = compareHist(histImg, histImg, HISTCMP_CORREL);
cout << "相似性越大差异越小: 小" << minCor << "相同图: 大" << manCor << endl;
}
void Person::colorMap(Mat& img)
{
int colormap[] =
{
COLORMAP_AUTUMN ,
COLORMAP_BONE,
COLORMAP_CIVIDIS,
COLORMAP_DEEPGREEN,
COLORMAP_HOT,
COLORMAP_HSV,
COLORMAP_INFERNO,
COLORMAP_JET,
COLORMAP_MAGMA,
COLORMAP_OCEAN,
COLORMAP_PINK,
COLORMAP_PARULA,
COLORMAP_RAINBOW,
COLORMAP_SPRING,
COLORMAP_TWILIGHT,
COLORMAP_TURBO,
COLORMAP_TWILIGHT,
COLORMAP_VIRIDIS,
COLORMAP_TWILIGHT_SHIFTED,
COLORMAP_WINTER
};
imshow("原图", img);
Mat dst;
int index = 0;
while (true)
{
applyColorMap(img, dst, colormap[index % 19]);
index++;
imshow("颜色表", dst);
int key = waitKey(100);
if (key == 27)
{
break;
}
}
}
卷积核窗口系数: 卷积核里面的数据
原理: (卷积系数/权重 * 处于卷积核位置的img的像素相加) / 卷积核面积 = result,取整(四舍五入), 把原图中处于卷积核的中心位置像素替换为 result. 卷积核一次前进移动, 反复执行.
blur: 一般用于处理图像的随机噪声
boxFilter 是 blur的快速版本, 最好使用boxFilter
当卷积核大小为偶数: 其实这个时候中心也为(ksize/2), 对2x2的卷积核,中心位置为Point**(1,1),4x4的卷积核中心位置为Point(2,2)**。
imshow("原图", img);
Mat dst = Mat::zeros(img.size(),img.type());
Mat dst2;
for (int row = 1; row < img.rows -1; row++)
{
for (int col = 0; col < img.cols; col++)
{
int b = round((img.at(row - 1, col - 1)[0] + img.at(row - 1, col)[0] + img.at(row - 1, col + 1)[0] +
img.at(row, col - 1)[0] + img.at(row, col)[0] + img.at(row, col + 1)[0] +
img.at(row + 1, col - 1)[0] + img.at(row + 1, col)[0] + img.at(row + 1, col + 1)[0]) / 9);
int g = round((img.at(row - 1, col - 1)[1] + img.at(row - 1, col)[1] + img.at(row - 1, col + 1)[1] +
img.at(row, col - 1)[1] + img.at(row, col)[1] + img.at(row, col + 1)[1] +
img.at(row + 1, col - 1)[1] + img.at(row + 1, col)[1] + img.at(row + 1, col + 1)[1]) / 9);
int r = round((img.at(row - 1, col - 1)[2] + img.at(row - 1, col)[2] + img.at(row - 1, col + 1)[2] +
img.at(row, col - 1)[2] + img.at(row, col)[2] + img.at(row, col + 1)[2] +
img.at(row + 1, col - 1)[2] + img.at(row + 1, col)[2] + img.at(row + 1, col + 1)[2]) / 9);
dst.at(row, col)[0] = b; dst.at(row, col)[1] = g; dst.at(row, col)[2] = r;
}
}
imshow("手动blue", dst);
blur(img, dst2, Size(3, 3), Point(-1, -1),BORDER_DEFAULT);
imshow("blur", dst2);
进行卷积之前就应该填充好
填充类型 | 方法 |
---|---|
BORDERT_CONSTANT(常量填充),填充的是0 | iiii|abc|iii (i:常量,abc:像素值) |
border_replicate(填充的是2头的值) | aaa|abc|hhh |
border_warp(尾填到头,头填到尾) | cba|abc|abc |
border_reflect_101(看上去最合理) | fcb|abcf|cba |
border_default(看上去最合理) | fcb|abcf|cba |
copyMakeBorder(src,dst,margin, margin1, margin2,margin, 填充方式, 填充的颜色)
void Person::fill(Mat& img)
{
imshow("原图", img);
int top = 2; int bottom = 2; int left = 3; int right = 3;
int margin[] = { top,bottom,left,right };
Mat dst; Mat dst1;
copyMakeBorder(img, dst, margin[0], margin[1], margin[2], margin[3], BORDER_CONSTANT, Scalar(0, 0, 250));
imshow("dst", dst);
blur(dst, dst1, Size(3, 3), Point(-1, -1), BORDER_DEFAULT);
imshow("blur", dst1);
cout << img.rows << "\t" << dst.rows << "\t" << dst1.rows << endl;
}
高斯模糊
优点: 会更好的保留中心点像素, 对轮廓保存好
卷积核系数不一样, 非均值, 越是中心位置权重系数越高, 中心化对称(高斯数学公式),
API: GuassianBlur
盒子模糊(均值模糊)
void Person::fill(Mat& img)
{
imshow("原图", img);
int top = 2; int bottom = 2; int left = 3; int right = 3;
int margin[] = { top,bottom,left,right };
Mat dst; Mat dst1; Mat dst2;
copyMakeBorder(img, dst, margin[0], margin[1], margin[2], margin[3], BORDER_CONSTANT, Scalar(0, 0, 250));
imshow("dst", dst);
blur(img, dst1, Size(3, 3), Point(-1, -1), BORDER_DEFAULT);
imshow("blur", dst1);
boxFilter(img, dst2, -1, Size(3, 3), Point(-1, -1), true, BORDER_DEFAULT);
imshow("dst2", dst2);
cout << img.rows << "\t" << dst.rows << "\t" << dst1.rows <<"\t" << dst2.rows<
装换为8U的, 并且所有的值为正数
均值滤波如果原图是16或者16以下的depth,kernel depth * 2
非均值滤波: 非均值卷积核depth应该设置大些, (不然数据会溢出, 有负数), 然后使用abs转换全为正数, 不然输出的全是空白图像
void Person::myFilt(Mat& img)
{
imshow("input", img);
Mat dst; Mat dst1;
int width = 12;
Mat kernel = Mat::ones(width, width, CV_16F) / float(width * width);
filter2D(img, dst, -1, kernel, Point(-1, -1), 0, BORDER_DEFAULT);
imshow("均值滤波", dst);
cout << img.rows << "\t" << dst.rows << endl;
Mat kernel1 = (Mat_(2, 2) << 1, 0, 0, -1);
filter2D(img, dst1, CV_32F, kernel1, Point(-1, -1), 128, 4);
convertScaleAbs(dst1, dst1);
imshow("非均值滤波", dst1);
}
robot算子: 非均值卷积, 求出x的梯度和y梯度相加
sobel的增强版, scharr算子的卷子核系数大
三种梯度算子呈现强度: robot < sobel < scharr
imshow("原图", img);
Mat gradX; Mat gradY;
Mat robotX = (Mat_(2, 2) << 1, 0, 0, -1);
Mat robotY = (Mat_(2, 2) << 0, 1, -1, 0) ;
filter2D(img, gradX, CV_32F, robotX, Point(-1, -1), 0, 4);
filter2D(img, gradY, CV_32F, robotY);
convertScaleAbs(gradX, gradX);
convertScaleAbs(gradY, gradY);
Mat dst;
add(gradX, gradY, dst);
imshow("robot梯度", dst);
/*Mat sobelx = (Mat_(3,3)<<-1,0,1,
-2,0,2,
-1,0,1);
Mat sobelY = (Mat_(3, 3) -1, -2, -1,
0, 0, 0,
1, 2, 1);
Mat gradSobelX; Mat gradSobelY;
filter2D(img, gradSobelX, CV_32F, sobelx, Point(-1, -1));
filter2D(img, gradSobelY, CV_32F, sobelY, Point(-1, -1));
convertScaleAbs(gradSobelX, gradSobelX);
convertScaleAbs(gradSobelY, gradSobelY);
Mat dst1; Mat result;
add(gradSobelX, gradSobelY, dst1);*/
Mat dst1;
Sobel(img, gradX, CV_32F, 1, 0, 3, 1, 0, BORDER_DEFAULT);
Sobel(img, gradY, CV_32F, 1, 0, 3, 1, 0, BORDER_DEFAULT);
convertScaleAbs(gradX, gradX);
convertScaleAbs(gradY, gradY);
//因为sobel卷积核其中有些系数为2, 通过abs转化会放大梯度
addWeighted(gradX, 0.5, gradY, 0.5, 0, dst1); //不然图片会偏白,
imshow("sobel", dst1);
Mat dst2;
Scharr(img, gradX, CV_32F, 1, 0, 1, 0);
Scharr(img, gradY, CV_32F, 0, 1, 1, 0, 4);
convertScaleAbs(gradX, gradX);
convertScaleAbs(gradY, gradY);
addWeighted(gradX, 0.5, gradY, 0.5, 0, dst2);
imshow("scharr", dst2);
四邻域, 八邻域, 拉普拉斯变种
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-UKNPniCL-1656318416405)(D:\opencvSourse\openImg\拉普拉斯分类.png)]
推导公式:
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-PWX2C5wE-1656318416406)(D:\opencvSourse\openImg\图像锐化原理.png)]
图像锐化: 原图 + 拉普拉斯得出来的结果(图像的边缘跟梯度比较尖锐的值加到原图上), 可以很好的反映图像的边缘, 也可以反模糊
拉普拉斯的卷积核必须是奇数
缺点: 很容易受噪声干扰(对小的细节造成干扰)
拉普拉斯api得出来的是边缘
void Person::laplasi(Mat& img)
{
cout << img.channels() << endl;
imshow("原图", img);
Mat Box = (Mat_(3, 3) << 0, -1, 0,
-1, 5, -1,
0, -1, 0);
Mat dst;
filter2D(img, dst, CV_32F, Box,Point(-1,-1),0,4);
convertScaleAbs(dst, dst);
imshow("拉普拉斯", dst);
Mat dst1; Mat dst2;
Laplacian(img, dst1, -1);
add(img, dst1, dst2);
imshow("锐化", dst1);
}
原理: blur/高斯 - 拉普拉斯算子
相对于拉普拉斯, 他会处理掉极小值(对小的细节不造成干扰), 不容易受噪声干扰, 对大的细节进行锐化
imshow("原图", img);
Mat gua; Mat lap;
GaussianBlur(img, gua, Size(3,3),0);
Laplacian(img, lap, -1, 3, 1, 0);
Mat result;
addWeighted(gua, 1, lap, -0.7, 0, result);
imshow("usm", result);
API:
去噪:
中值滤波: 能反映信号理想的样子, 去除极致点,
原理: 对图像某一块区域进行排序, 取出sort中间的值替换这块局域的中心点
还有最小值滤波(sort[0]替换中心点), 最大值滤波(sort[lenght-1]替换中心点)
作用: 最适用于椒盐噪声非0就255
重要: 卷积核必须是奇数而且是大于1
均值滤波: 没有反映信号本来的样子, 会扰动
API:
medianBlur: 均值滤波
没有考虑中心像素的权重, 可能会导致图片破坏
GaussianBlur 高斯滤波 没有考虑中心像素点与周围像素点差值很大
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-DqhqGCcS-1656318416407)(D:\opencvSourse\openImg\均值滤波和中值滤波.png)]
考虑图像的梯度(边缘):高斯双边, 均值前移, 非局部均值去噪, 局部均值方差
高斯双边模糊
非局部均值滤波
简单理解原理: 相似像素块, 权重比较大, 不相似的权重比较小
API
void Person::noise(Mat& img)
{
Mat result1; Mat result2;
Mat dst = img.clone();
imshow("原图", img);
//salt and pepper
RNG r(12345);
int ipt = 1000;
for (int i = 0; i < ipt; i++)
{
int xNum = r.uniform(0, img.cols);
int yNum = r.uniform(0, img.rows);
if (i % 2 == 1)
{
img.at(xNum, yNum) = Vec3b(255, 255, 255);
}
else
{
img.at(xNum, yNum) = Vec3b(0, 0, 0);
}
}
imshow("椒盐噪声", img);
medianBlur(img, result1, 3);
imshow("椒盐去噪", result1);
GaussianBlur(img, result2, Size(3, 3), 0);
imshow("高斯去噪", result2);
//高斯噪声
Mat res1; Mat res2;
Mat src = Mat::zeros(img.size(), img.type());
randn(src, Scalar(25,15,45), Scalar(60,40,30));
add(dst, src, dst);
imshow("高斯", dst);
bilateralFilter(dst, res1, 0, 100, 10);
imshow("双边模糊", res1);
fastNlMeansDenoisingColored(dst, res2, 3, 3, 7, 21);
imshow("非局部去噪",res2);
}
实际图像可能不平整有噪声
步骤:
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-WSFkBplS-1656318416407)(D:\opencvSourse\openImg\梯度提取边缘算子.png)]
基于阈值T,得到边缘(梯度>T保留, <丢弃)
问题: 基于T的会不够连贯
非最大抑制: 求出角度, 如果中心像素大于2侧梯度值则保留, 否则丢弃, 然后进行阈值连接
阈值连接: T1 / T2 = 2 ≈ 2~3, 大于T1全部保留, 小于T2全部丢弃, t1至T2之间的如果可以连接则保留, 否则丢弃
参数:
void myCanny(int min, void* img)
{
Mat newImg = *(Mat*)img;
Mat dst; Mat dst1;
Canny(newImg, dst, min, 200, 3, false);
bitwise_and(newImg, newImg, dst1, dst);
imshow("边缘提取", dst1);
}
void Person::canny(Mat& img)
{
string barName = "提取范围";
string winName = "边缘提取";
namedWindow(winName, WINDOW_AUTOSIZE);
imshow("原图", img);
int min = 50;
int max = 120;
createTrackbar(barName, winName, &min, max, myCanny,(void*)&img);
myCanny(0, (void*)&img);
}
必须是灰色图像
对机器视觉或者工业领域
灰度图像 : 单通道, 取值范围0~255
二值图像: 单通道, 要么是0要么是255, 以黑色作为背景
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-IfSlKbwl-1656318416408)(D:\opencvSourse\openImg\二值分割5钟方法.png)]
二值化
阈值化
API:
threshold: 只接受灰色图像
void Person::myThreshold(Mat& img)
{
Mat dst;
cvtColor(img, img, COLOR_BGR2GRAY);
imshow("原图灰色图像", img);
//二值化
threshold(img, dst, 127, 255, THRESH_BINARY);
imshow("二值化", dst);
//反二值化
threshold(img, dst, 127, 255, THRESH_BINARY_INV);
imshow("反二值化", dst);
//阈值化切割, 小于阈值化保持原数据, 否则为T
threshold(img, dst, 127, 255, THRESH_TRUNC);
imshow("阈值化切割", dst);
//阈值化,大于T的保持原数据, 其他的为0
threshold(img, dst, 127, 255, THRESH_TOZERO);
imshow("阈值化", dst);
//反阈值化,
threshold(img, dst, 127, 255, THRESH_TOZERO_INV);
imshow("反阈值化", dst);
}
//返回二值化分割
Mat Person::myThresh(const Mat& img)
{
Mat newImg = img.clone();
GaussianBlur(newImg, newImg, Size(3, 3), 0);
cvtColor(newImg, newImg, COLOR_BGR2GRAY);
Mat dst;
threshold(newImg, dst, 0, 255, THRESH_BINARY | THRESH_OTSU);
return dst;
}
概述: 全局阈值, 自适应阈值, 缺点: 不适用与光线不均匀的情况
通过mean, 取0号下标得均值,这个均值设为T
OTSU
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-kr1Qrs0g-1656318416409)(D:\opencvSourse\openImg\OTSU解释.png)]
三角法
void Person::tThreshold(Mat& img)
{
Mat dst;
cvtColor(img, img, COLOR_BGR2GRAY);
imshow("原图", img);
Scalar m = mean(img);
threshold(img, dst, m[0], 255, THRESH_BINARY);
imshow("均值", dst);
//otsu
double otsu = threshold(img, dst, 0, 255, THRESH_BINARY | THRESH_OTSU);
imshow("otsu", dst);
//三角法
double angle = threshold(img, dst, 0, 255, THRESH_BINARY | THRESH_TRIANGLE);
imshow("三角法", dst);
cout << "otsu切割阈值T: " << otsu << "\t" << "三角法: " << angle << endl
<<"\t" << "均值:"<
自适应阈值
概述: 全局阈值的局限性: 对光线照度不均匀的图像容易错误的二值化分割, 自适应阈值对图像模糊求差然后二值化,叫做自适应的高斯分割或者是自适应均值分割, 相比于自适应阈值, 会更好的保留细节
相对于全局自适应, 会提取更多的梯度
原图-模糊后的图+偏执常量 > 0 = 255, 否则=0
//自适应阈值分割
void Person::myAdaptive(Mat &img)
{
Mat dst;
cvtColor(img, img, COLOR_BGR2GRAY);
imshow("原图", img);
threshold(img, dst, 0, 255, THRESH_BINARY | THRESH_OTSU);
imshow("otsu", dst);
//自适应
adaptiveThreshold(img, dst, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 21, 5);
imshow("自适应", dst);
}
如果是白色对象 将过滤点不会进行处理, 只有为黑色的时候才会考虑连通性
opencv是基于块扫描结合决策表(DT) ==> BBDT
概念: 联通组件标记: CCL
算法:
基于像素的扫描的方法
缺点: 有大量重复的扫描, 而且不规则扫描
基于块扫描的方法
两步法扫描
概率: 对前景像素点产生一个临时标记, 通过连通性进行等价队列合并, 然后获取lable
决策表:DT
现在opencv联通组件采用的是BBDT: 块扫描+决策表
api: connectedComponents, 背景必须为黑色, 只能知道有多少个联通组件
api:connectedComponentsWithStats
一般先进行高斯模糊降噪
void Person::connectComp(Mat& img)
{
RNG rn(12345);
imshow("原图", img);
Mat denoi;
GaussianBlur(img, denoi, Size(11, 11), 0);
imshow("去噪", denoi);
Mat grey;
cvtColor(denoi, grey, COLOR_BGR2GRAY);
Mat cutting;
threshold(grey, cutting, 0, 255, THRESH_BINARY | THRESH_OTSU);
imshow("二值化", cutting);
Mat labels = Mat::zeros(img.size(), CV_32S);
int num = connectedComponents(cutting, labels, 8, CV_32S, CCL_DEFAULT);
cout << "数量" << num << endl;
//染色
vectorcolor(num);
color[0] = Vec3b(0, 0, 0);
for (int index = 1; index < num; index++)
{
color[index] = Vec3b(rn.uniform(0, 256), rn.uniform(0, 256), rn.uniform(0, 256));
}
Mat result = Mat::zeros(img.size(), CV_8UC3);
for (int x = 0; x < result.rows; x++)
{
for (int y = 0; y < result.cols; y++)
{
result.at(x, y) = color[labels.at(x, y)];
}
}
//显示详细信息
Mat stats; Mat centroids;
int num1 = connectedComponentsWithStats(cutting, labels, stats, centroids, 8, CV_32S, CCL_DEFAULT);
string numn = to_string(num1);
for (int i = 1; i < num1; i++)
{
//center
double centerX = centroids.at(i, 0);
double centerY = centroids.at(i, 1);
//ractangle
int racLeft = stats.at(i, CC_STAT_LEFT);
int racTop = stats.at(i, CC_STAT_TOP);
int racWidth = stats.at(i, CC_STAT_WIDTH);
int racHeight = stats.at(i, CC_STAT_HEIGHT);
int area = stats.at(i, CC_STAT_AREA);
cout << "面积" << i << area << endl;
string s = to_string(area);
circle(result, Point(centerX, centerY), 3, Scalar(255, 20, 20), 2, 8);
Rect rect = Rect(racLeft, racTop, racWidth, racHeight);
rectangle(result, rect, Scalar(0, 0, 200), 2, LINE_8);
putText(result, s, Point(centerX, centerY), FONT_HERSHEY_PLAIN, 1.0, Scalar(0, 255, 0), 2, 8);
putText(result, numn, Point(50,50), FONT_HERSHEY_PLAIN, 3, Scalar(0, 255, 0), 2, 8);
}
imshow("切割染色", result);
}
基本概率: 理解为图像边界, 主要针对二值图像, 轮廓是一系列点的集合
基于联通组件, 反映图像拓扑结构
算法:
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-FeJh7Je9-1656318416410)(D:\opencvSourse\openImg\轮廓发现.png)]
API:
findContours
轮廓: 是点的集合vector
多个组件, 组件里面是像素
层次: vec4i
拓扑结构: list或者tree或者最外层最大的轮廓(external) retr
编码方式:
chain_approx_simple
hirearchy里面只存储: 4个顶点位置
chain_approx_none
hirearchy里面存储所有轮廓的点位
drawContours
contourldx: 绘制的是哪一个轮廓,如果是-1就是绘制全部
void Person::myContour(Mat& img)
{
//binaryimg是一个处理过的二值化图像
Mat binaryImg = this->myThresh(img).clone();
Mat dst = Mat::zeros(img.size(),CV_8UC3);
Mat dst1 = dst.clone();
Mat dst2 = dst.clone();
imshow("二值化", binaryImg);
vector>contours;
vectorhierarchy;
findContours(binaryImg, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point());
for (int i = 0; i < contours.size(); i++)
{
drawContours(dst, contours, i, Scalar(0, 255, 0), 2, LINE_8);
}
imshow("轮廓", dst);
findContours(binaryImg, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point());
drawContours(dst1, contours, -1, Scalar(255, 0, 0), 2, 8);
imshow("最大轮廓", dst1);
}
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-yC6XUZCC-1656318416411)(D:\opencvSourse\openImg\轮廓面积和周长计算.png)]
contourArea: 轮廓面积 contourArea[contour1]
arcLength: 轮廓周长(contour1, 是否闭合)
boundingRect: 最大外接矩形
nimAreaRect: 最小矩形 , 数据类型是RotatedRect
//轮廓细节
void Person::contourDetails(Mat& img)
{
Mat result = Mat::zeros(img.size(), CV_8UC3);
imshow("原图", img);
GaussianBlur(img, img, Size(3, 3), 0);
cvtColor(img, img, COLOR_BGR2GRAY);
Mat dst;
threshold(img, dst, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
imshow("二值化分割", dst);
vector>contours;
vectorhierarchy;
//Mat result = Mat::zeros(img.size(), CV_8UC3);
findContours(dst, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point());
/*double minLength;
cout << "输入最小周长" << endl;
cin >> minLength;
double minArea;
cout << "输入最小面积" << endl;
cin >> minArea;*/
for (int i = 0; i < contours.size(); i++)
{
if (arcLength(contours[i],true) < 100 || contourArea(contours[i]) < 10) continue;
//最大外接矩形
Rect maxRect = boundingRect(contours[i]);
rectangle(result, maxRect, Scalar(200, 20, 20), 2, 8);
//最小外接矩形
RotatedRect minRect = minAreaRect(contours[i]);
ellipse(result, minRect, Scalar(20, 200, 20), 2, 8);
Point2f pts[4];
//通过点连接成线绘制最小外接矩形
minRect.points(pts);
for (int i = 0; i < 4; i++)
{
line(result, pts[i], pts[(i + 1)%4], Scalar(20, 20, 200), 2, 8);
}
drawContours(result, contours, -1, Scalar(100, 100, 100), 2, 8);
cout << "轮廓" << i << "面积: " << contourArea(contours[i]) << "\t" << "轮廓" << i << "周长: " << arcLength(contours[i], true) << endl;
}
imshow("输出", result);
}
作用: 可以匹配大小不一致, 旋转不一致的图像
API
Moments : 几何矩 Moments(contours[i])
作用: 计算弧矩, 计算中心位置
根据原理推中心矩: mm.m10 / mm.00 = x || mm.m01 / mm.00 = y
HuMoments: 弧矩(Moments, mat对象) : 选择不变性, 缩放不变性
matchShapes: 比较弧矩 参数3:contours_match_l1/l2/l3, 第一种比较效果比较好
void contoursFn( Mat &dst, const vector> &imgContours, const vector>& srcContours)
{
Moments srcMm = moments(srcContours[0]);
Mat srcHu;
HuMoments(srcMm, srcHu);
for (int i = 0; i < imgContours.size(); i++)
{
Mat imgHu;
Moments imgMm = moments(imgContours[i]);
double pX = imgMm.m10 / imgMm.m00;
double pY = imgMm.m01 / imgMm.m00;
circle(dst, Point(pX, pY), 3, Scalar(200, 200, 20), 2, LINE_8);
HuMoments(imgMm, imgHu);
double ss = matchShapes(imgHu, srcHu, CONTOURS_MATCH_I1,0);
//cout << ss << endl;
if (ss < 2.0)
{
cout << ss << endl;
drawContours(dst, imgContours, i, Scalar(20, 20, 200), 2, 8);
}
else
{
cout << "匹配不成功" << endl;
}
}
imshow("axx", dst);
/*for (int i = 0; i < srcContours.size(); i++)
{
moImg.push_back(moments(srcContours[i]));
}*/
}
void Person::contourComp(Mat& img, Mat& src)
{
namedWindow("匹配轮廓图", WINDOW_FREERATIO);
imshow("匹配轮廓图", src);
Mat binaryImg = this->myThresh(img);
Mat binarysrc = this->myThresh(src);
vector> imgContours;
vector> srcContours;
vector hierarchy;
vector hierarchy1;
findContours(binaryImg, imgContours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point());
findContours(binarysrc, srcContours, hierarchy1, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point());
contoursFn(img, imgContours, srcContours);
}
轮廓拟合和逼近(进行轮廓发现之后通过api越来越接近真实的情况)
概念:
轮廓逼近, 本质是减少编码点, 轮廓逼近的越厉害, 编码点增多
拟合圆, 生成最相似的圆或者椭圆
API:
approxPolyDP: 轮廓逼近, 一般用来区分图形
参数:
参数1: contours[i]
参数2: mat对象 里面有存放每个点,
参数3: 精度,值越低精度越高(编码点越多)(一般为4)
参数4:是否为闭合区
fitEllipse
void Person::contourProx(Mat& img)
{
imshow("原图", img);
Mat binaryImg = this->myThresh(img);
vector> contours;
vector hierarchy;
findContours(binaryImg, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point());
for (int i = 0; i < contours.size(); i++)
{
Mat poly;
approxPolyDP(contours[i], poly, 4, true);
cout << "图形:" << i << "行数 " << poly.rows << "列数: " << poly.cols << endl;
double len = arcLength(contours[i], true);
double Area = contourArea(contours[i]);
Moments mm = moments(contours[i]);
double pX = mm.m10 / mm.m00;
double pY = mm.m01 / mm.m00;
//if (poly.rows = 4)
//{
// putText(img, "矩形", Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// string Sarea = "面积: " + to_string(Area);
// string Slen = "周长: " + to_string(len);
// /*putText(img, Sarea, Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// putText(img, Slen, Point(pX, pY - 20), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);*/
// circle(img, Point(pX, pY), 4, Scalar(20, 20, 200), 2, LINE_8);
//}
//if (poly.rows = 3)
//{
// putText(img, "三角形", Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// string Sarea = "面积: " + to_string(Area);
// string Slen = "周长: " + to_string(len);
// /*putText(img, Sarea, Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// putText(img, Slen, Point(pX, pY - 20), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);*/
// circle(img, Point(pX, pY), 4, Scalar(20, 20, 200), 2, LINE_8);
//}
//if (poly.rows = 6)
//{
// putText(img, "6边形", Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// string Sarea = "面积: " + to_string(Area);
// string Slen = "周长: " + to_string(len);
///* putText(img, Sarea, Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// putText(img, Slen, Point(pX, pY - 20), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);*/
// circle(img, Point(pX, pY), 4, Scalar(20, 20, 200), 2, LINE_8);
//}
//if (poly.rows > 12)
//{
// putText(img, "圆", Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// string Sarea = "面积: " + to_string(Area);
// string Slen = "周长: " + to_string(len);
///* putText(img, Sarea, Point(pX, pY - 10), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
// putText(img, Slen, Point(pX, pY - 20), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);*/
// circle(img, Point(pX, pY), 4, Scalar(20, 20, 200), 2, LINE_8);
//}
putText(img, "我", Point(pX, pY - 20), FONT_HERSHEY_PLAIN, 1, Scalar(20, 200, 20), 2, 8);
}
imshow("判断轮廓图像", img);
}
图像拟合
//返回二值化轮廓
vector> Person::contourOne(Mat& img)
{
Mat binary = this->myBinary(img);
vector> contours;
vector hierarchy;
findContours(binary, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point());
return contours;
}
//拟合
void Person::myFitEllipse(Mat& img)
{
imshow("原图", img);
vector> contours = this->contourOne(img);
for (int i = 0; i < contours.size(); i++)
{
RotatedRect rotRect = fitEllipse(contours[i]);
Point center = rotRect.center;
string height = to_string(rotRect.size.height);
string width = to_string(rotRect.size.width);
string are = to_string(rotRect.size.area());
ellipse(img, rotRect, Scalar(255, 0, 10), 2, 8);
circle(img, center, 3, Scalar(0, 255, 0), 2, LINE_8);
}
imshow("处理图", img);
}
对噪声很敏感, 需要降噪
r = x0*cosθ + y0 * sinθ ==>如果点坐标在同一条直线: r和θ是相同的
API:
HoughLines : 得出来的是极坐标空间参数 vector(p,θ) 直线
霍夫直线检测出的来的结果是vec3F, 距离,角度,累加
参数:
参数2: 输出 vector
, 这个vec3f
下标0: 代表R(距离)
下标1: 代表θ
下标2: 代表累加值
参数3: 步长
参数4: 角度: cv_pi / xx
参数5: 阈值, 有多少个点集中在一起 算直线
//InputArray image:输入图像,必须是8位单通道图像。
//OutputArray lines:检测到的线条参数集合。
//double rho: 累加器的距离
//double theta:累加器的角度。
//int threshold:累加计数值的阈值参数,当参数空间某个交点的累加计数的值超过该阈值,则认为该交点对应了图像空间的一条直线。
//double srn:默认值为0,用于在多尺度霍夫变换中作为参数rho的除数,rho=rho/srn。
//double stn:默认值为0,用于在多尺度霍夫变换中作为参数theta的除数,theta=theta/stn。
//如果srn和stn同时为0,就表示HoughLines函数执行标准霍夫变换,否则就是执行多尺度霍夫变换
代码:
void Person::Houline(Mat& img)
{
imshow("原图", img);
Mat binary = this->myBinary(img);
vectorlines;
HoughLines(binary, lines, 1, CV_PI / 180, 160, 0, 0);
Point p1; Point p2;
for (int i = 0; i < lines.size(); i++)
{
double step = lines[i][0];
double angle = lines[i][1];
double add = lines[i][2];
cout << "直线" << i << "\t" << "距离: " << step << "角度: " << angle << i << "累加点: " << add << endl;
double x = cos(angle);
double y = sin(angle);
double x0 = step * x;
double y0 = step * y;
p1.x = cvRound(x0 + 1000 * -y);
p1.y = cvRound(y0 + 1000 * x);
p2.x = cvRound(x0 - 1000 * -y);
p2.y = cvRound(y0 - 1000 * x);
line(img, p1, p2, Scalar(0, 0, 255), 2, 8);
}
imshow("霍夫直线", img);
}
HoughLinesP() 线段
参数2: 为2个点的坐标的容器: vector
参数3, 4 步长, 角度
参数5: 阈值, 有这么多点以上连接的线段
参数6: 检测出的最小长度(minLineLength)
参数7: 线段之间的间隔, 如果超出则为新的线段
void Person::houlineP(Mat& img)
{
Mat result = Mat::zeros(img.size(), img.type());
imshow("原图", img);
Mat binary = this->myBinary(img); //之前有封装一个返回二值化的函数的对象
vector lines;
imshow("binary",binary);
HoughLinesP(binary, lines, 1, CV_PI / 180, 80,30,10);
//Point p1; Point p2;
for (int i = 0; i < lines.size(); i++)
{
/*p1.x = lines[i][0];
p1.y = lines[i][1];
p2.x = lines[i][2];
p2.y = lines[i][3];*/
//line(result, p1, p2, Scalar(0, 0, 255), 1, 8);
line(result, Point(lines[i][0], lines[i][1]), Point(lines[i][2], lines[i][3]), Scalar(255, 0, 0), 1, 8);
}
imshow("霍夫直线", result);
}
x = x0 + rcos(θ); y = y0+rsin(θ) 一直圆心(x0,y0)和r
基于梯度去寻找, 不然计算量太大了
接收的必须是灰色图像, 而且对噪声很敏感, 需要进行降噪
圆的参数方程:
圆的参数(X0, Y0, r), 圆任意三个点, 以这些点位圆心 r为半径, 相交的一个点
基于梯度或者边缘,轮廓进行查找
api: HoughCircles
vector
void Person::houghCir(Mat &img)
{
imshow("原图", img);
Mat result = Mat::zeros(img.size(), CV_8UC3);
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(15,15), 2, 2);
vector cir;
int dp = 2;
double cirSpace = 5;
int add = 100;
int maxThreshold = 100;
double cirMin = 15; double cirMax = 100;
HoughCircles(gray, cir, HOUGH_GRADIENT, dp, cirSpace, add, maxThreshold, cirMin, cirMax);
for (int i = 0; i < cir.size(); i++)
{
int cenX = round(cir[i][0]);
int cenY = round(cir[i][1]);
int radius = round(cir[i][2]);
circle(result, Point(cenX, cenY), radius, Scalar(0, 0, 200), 2, 8);
}
imshow("霍夫找园", result);
}
图像形态学操作
概念: 可以对灰度图像和二值化图像处理
原理:对处于卷积核像素的进行排序 腐蚀, 用最小像素来替换中心像素, 膨胀是用最大像素来替换中心像素
作用: 断开或者连接前景对象
getStructuringElement: 获取类型() 形态学
API:
腐蚀: erode
膨胀: dilate
参数: 第3个参数是结构元素(形态学)
void Person::erodeDilate(Mat& img)
{
imshow("原图", img);
Mat result;
Mat result1;
Mat binary = this->myBinary(img);
Mat rect = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
erode(img, result1, rect);
imshow("腐蚀", result1);
dilate(img, result, rect);
imshow("膨胀", result);
}
只对起作用的区域产生变化, 对其他没作用到的不会产生变化
开操作 = 腐蚀+膨胀, 删除小的干扰块
闭操作 = 膨胀+腐蚀, 填充闭合区域
void Person::erodeDilate(Mat& img)
{
imshow("原图", img);
Mat result;
Mat result1;
Mat binary = this->myBinary(img);
Mat rect = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
erode(img, result1, rect);
imshow("腐蚀", result1);
dilate(img, result, rect);
imshow("膨胀", result);
}
API: morphologyEx
参数3:morph_open
参数6: 连续操作(iterations):
作用: 速度会高于直接提升kernel大小
void Person::openCloss(Mat& img)
{
Mat dst;
imshow("原图", img);
Mat binary = this->myBinary(img);
imshow("二值化", binary);
Mat kernel = getStructuringElement(MORPH_RECT, Size(15, 1), Point(-1, -1));
morphologyEx(binary, dst, MORPH_OPEN, kernel, Point(-1, -1), 1);
imshow("腐蚀", dst);
}
黑帽和顶帽
击中击不中变换(morph_hitmiss)
通过特点的元素去匹配 如果匹配成功则击中
MORPH_CROSS: 十字交叉元素
void Person::gradi(Mat& img)
{
if (img.empty())
{
cout << "图片路径错误" << endl;
return;
}
imshow("原图", img);
Mat gray; Mat binary;
cvtColor(img, gray, COLOR_BGR2GRAY);
Mat Ksize = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
Mat grad; Mat inside; Mat extral; Mat src; Mat src1;
erode(gray, src, Ksize, Point(-1, -1));
dilate(gray, src1, Ksize, Point(-1, -1));
subtract(src1, src, grad);
subtract(gray, src, inside);
subtract(src1, gray, extral);
imshow("形态学", grad);
threshold(grad, grad, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
imshow("基本形态学梯度", grad);
}
API:morph_TOPHAT, morph_BLACKHAT
void Person::morph1(Mat& img)
{
imshow("原图", img);
Mat binary = this->myBinary(img);
Mat dst;
Mat rect = getStructuringElement(MORPH_ELLIPSE, Size(14, 14), Point(-1, -1));
imshow("二值化", binary);
//MORPH_TOPHAT
morphologyEx(binary, dst, MORPH_BLACKHAT, rect);
imshow("顶帽", dst);
}
API:morph_hitmiss
通过特点的元素去匹配 如果匹配成功则击中/通过结构元素去寻找, 如果形状一致则被击中
MORPH_CROSS: 十字交叉元素
void Person::morph2(Mat& img)
{
imshow("原图", img);
Mat dst;
Mat binary = this->myBinary(img);
Mat rect = getStructuringElement(MORPH_CROSS, Size(12, 12), Point(-1, -1));
morphologyEx(binary, dst, MORPH_HITMISS, rect);
imshow("匹配模板", dst);
}
VideoCapture xx(0): 打开当前摄像头, 也可以打开usb摄像头
capture.isOpened: 确认是否打开摄像头, 为1=打开
namedWindow默认打开的摄像头是640*480
capture.read(mat) || capture >> mat: 读取摄像头
capture内置属性
VideoWirter: 保存视频/写入
一个文件保存视频流最大是2g
注意点: 如果不是c++语言要记得销毁: 读取.release(), 写入.release(), 如果没有可能会造成视频保存打不开, 保存摄像头一般设置为25.否则打不开
void Person::cvtVideos(string src)
{
VideoCapture capture(src);
if (!capture.isOpened())
{
cout << "视频路径错误" << endl;
return;
}
Mat img;
while (true)
{
Mat hsv; Mat mask; Mat result;
bool b = capture.read(img);
if (!b) break;
imshow("加载视频", img);
cvtColor(img, hsv, COLOR_BGR2HSV);
inRange(hsv, Scalar(35, 43, 46), Scalar(77, 255, 255), hsv);
imshow("hsv", hsv);
bitwise_not(hsv, mask);
imshow("mask", mask);
bitwise_and(img, img, result,mask);
imshow("roi", result);
int key = waitKey(25);
if (key == 27)
{
break;
}
}
capture.release();
}
色彩空间分布
绝大数的普遍的
特点设备的
HSV色彩空间
对各种颜色分辨很清楚
Lab色彩空间
主要的颜色在L和B上面 只有2个通道
YCbCr色彩空间
对皮肤能更好的显示
inrang: rgb转换lab设置的参数,
h:0~180, s:0~255
void Person::cvtVideos(string src)
{
VideoCapture capture(src);
if (!capture.isOpened())
{
cout << "视频路径错误" << endl;
return;
}
Mat img;
while (true)
{
Mat hsv; Mat mask; Mat result;
bool b = capture.read(img);
if (!b) break;
imshow("加载视频", img);
cvtColor(img, hsv, COLOR_BGR2HSV);
inRange(hsv, Scalar(35, 43, 46), Scalar(77, 255, 255), hsv);
imshow("hsv", hsv);
bitwise_not(hsv, mask);
imshow("mask", mask);
bitwise_and(img, img, result,mask);
imshow("roi", result);
int key = waitKey(25);
if (key == 27)
{
break;
}
}
capture.release();
}
注意点: 模型和样品的bins必须完全一直
API: calcBackProject : 反向投影
会受到2个因素影响, 直方图的bins会影响到, bins越大, 匹配越细微, bins一般设置为48左右
void Person::calcBack(Mat sample, Mat target)
{
if (sample.empty() || target.empty())
{
cout << "img路径错误" << endl;
return;
}
imshow("样本", sample); imshow("模板", target);
Mat hsvSample; Mat hsvTarget;
cvtColor(sample, hsvSample, COLOR_BGR2HSV);
cvtColor(target, hsvTarget, COLOR_BGR2HSV);
int channles[] = { 0,1 };
Mat hist;
int hBins = 48; int sBins = 48;
int bins[] = { hBins,sBins };
float hRangs[] = { 0,180 };
float sRangs[] = { 0,255 };
const float* rangs[] = { hRangs,sRangs };
calcHist(&hsvSample, 1, channles, Mat(), hist, 2, bins, rangs, true, false);
Mat norm;
normalize(hist, norm, 0, 255,NORM_MINMAX,-1,Mat());
Mat dst;
calcBackProject(&hsvTarget, 1, channles, norm, dst, rangs, 1.0, true);
imshow("直方图反向投影", dst);
}
色彩空间分布
绝大数的普遍的
特点设备的
HSV色彩空间
对各种颜色分辨很清楚
Lab色彩空间
主要的颜色在L和B上面 只有2个通道
YCbCr色彩空间
对皮肤能更好的显示
inrang: rgb转换lab设置的参数,
h:0~180, s:0~255
void Person::cvtVideos(string src)
{
VideoCapture capture(src);
if (!capture.isOpened())
{
cout << "视频路径错误" << endl;
return;
}
Mat img;
while (true)
{
Mat hsv; Mat mask; Mat result;
bool b = capture.read(img);
if (!b) break;
imshow("加载视频", img);
cvtColor(img, hsv, COLOR_BGR2HSV);
inRange(hsv, Scalar(35, 43, 46), Scalar(77, 255, 255), hsv);
imshow("hsv", hsv);
bitwise_not(hsv, mask);
imshow("mask", mask);
bitwise_and(img, img, result,mask);
imshow("roi", result);
int key = waitKey(25);
if (key == 27)
{
break;
}
}
capture.release();
}
注意点: 模型和样品的bins必须完全一直
API: calcBackProject : 反向投影
会受到2个因素影响, 直方图的bins会影响到, bins越大, 匹配越细微, bins一般设置为48左右
void Person::calcBack(Mat sample, Mat target)
{
if (sample.empty() || target.empty())
{
cout << "img路径错误" << endl;
return;
}
imshow("样本", sample); imshow("模板", target);
Mat hsvSample; Mat hsvTarget;
cvtColor(sample, hsvSample, COLOR_BGR2HSV);
cvtColor(target, hsvTarget, COLOR_BGR2HSV);
int channles[] = { 0,1 };
Mat hist;
int hBins = 48; int sBins = 48;
int bins[] = { hBins,sBins };
float hRangs[] = { 0,180 };
float sRangs[] = { 0,255 };
const float* rangs[] = { hRangs,sRangs };
calcHist(&hsvSample, 1, channles, Mat(), hist, 2, bins, rangs, true, false);
Mat norm;
normalize(hist, norm, 0, 255,NORM_MINMAX,-1,Mat());
Mat dst;
calcBackProject(&hsvTarget, 1, channles, norm, dst, rangs, 1.0, true);
imshow("直方图反向投影", dst);
}