## OpenCV Canny算法自实现
一.简介
最近一段时间开始学习opencv,先从造轮子开始的,有一些内容我也想通过博客分享出来。比如说这次的Canny边缘滤波。很多大牛的博客都将其中的算法原理讲解的十分清晰,在此我将不再重复。有兴趣的可以直接访问我下面贴出的链接,都是总结的比较到位的。
fengye2two https://blog.csdn.net/fengye2two/article/details/79190759
爱鱼 https://www.cnblogs.com/mightycode/p/6394810.html
mitutao https://www.cnblogs.com/love6tao/p/5152020.html
我自实现的Canny算法能够应用,但是有很多改进的空间。比如说其中应用的GaussianBlur以及filter2D等也可以通过自实现的方式来完成;再者就是我实现的算法的最后一步,并没有通过堆栈跟队列的方式来实现。在以后的博客中我会将这些坑填上。希望看到本博客的各位能够指出我的错误,在此我表示由心的感谢。
废话不多说,第二部分就是我贴出的代码。
二.代码部分
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
void TraceEdge(int w, int h,double low_gradvalue, Mat& image_restrain, Mat& image_sobel_xy) {
int xNum[8] = { 1,1,0,-1,-1,-1,0,1 };
int yNum[8] = { 0,1,1,1,0,-1,-1,-1 };
int xx = 0;
int yy = 0;
bool change = true;
while (change)
{
change = false;
for (int k = 0; k < 8; k++)
{
xx = w + xNum[k];
yy = h + yNum[k];
// 如果该象素为可能的边界点,又没有处理过
// 并且梯度大于阈值
double curgrayvalue = image_restrain.at<double>(xx, yy);
double curgrdvalue = image_sobel_xy.at<double>(xx, yy);
if (curgrayvalue == 200 && curgrdvalue > low_gradvalue)
{
change = true;
// 把该点设置成为边界点
image_restrain.at<double>(xx, yy) = 255;
h = yy;
w = xx;
break;
}
}
}
}
int main() {
Rect rect;
Mat image = imread("pic.jpg");
image.convertTo(image, CV_64FC3);//如果装换成64FC3则无法使用imread读取图像
Mat image_gray = Mat::zeros(image.rows, image.cols, CV_64FC1);
Mat image_gaussian = Mat::zeros(image.rows, image.cols, CV_64FC1);
Mat image_sobel_x = Mat::zeros(image.rows, image.cols, CV_64FC1);
Mat image_sobel_y = Mat::zeros(image.rows, image.cols, CV_64FC1);
Mat image_sobel_xy = Mat::zeros(image.rows, image.cols, CV_64FC1);//梯度结果;需要对此结果进行非极大值抑制
Mat image_restrain = Mat::zeros(image.rows, image.cols, CV_64FC1);//用来存储非极大值抑制之后的结果
Mat image_sobel_angle = Mat::zeros(image.rows, image.cols, CV_64FC1);
Mat image_canny = Mat::zeros(image.rows, image.cols, CV_64FC1);
/*+++++++++++++++++++++++++++第一步:灰度处理++++++++++++++++++++++++++*/
for (int i = 0; i < image.rows; i++) {
for (int j = 0; j < image.cols; j++) {
image_gray.at<double>(i, j) = (0.114 * image.at<Vec3d>(i, j)[0] + 0.299 * image.at<Vec3d>(i, j)[1] + 0.587 * image.at<Vec3d>(i, j)[2]);
}
}
image.convertTo(image, CV_8UC3);
imshow("原图", image); //he function may scale the image, depending on its depth:If the image is 8 - bit unsigned, it is displayed as is.
/*If the image is 16 - bit unsigned or 32 - bit integer, the pixels are divided by 256. That is, the value range[0, 255 * 256] is mapped to[0, 255].
If the image is 32 - bit floating - point, the pixel values are multiplied by 255. That is, the value range[0, 1] is mapped to[0, 255].*/
//image_gray.convertTo(image_gray,CV_8UC1);
//imshow("灰度图", image_gray);
/*+++++++++++++++++++++++++++++++++第一步:结束++++++++++++++++++++++++++++++++++++++++*/
/*+++++++++++++++++++++++++++++++++第二步:高斯滤波(7*7)模板++++++++++++++++++++++++++++++++++++++++*/
GaussianBlur(image_gray, image_gaussian, Size(7, 7), 50, 50);//直接调用的opencv的高斯滤波模板,其中sigma = 50;注意调用高斯滤波会自动将dst的
//图片类型转换为src的图片类型
/*+++++++++++++++++++++++++++++++++第二步:结束++++++++++++++++++++++++++++++++++++++++*/
/*+++++++++++++++++++++++++++++++++第三步:利用系统自带的filter2D+sobel掩码模板进行卷积++++++++++++++++++++++++++++++++++++++++*/
//x方向的sobel算子
Mat Sx = (Mat_<double>(3,3)<<-1, 0, 1,-2, 0, 2,-1, 0,1);
//y方向的sobel算子
Mat Sy = (Mat_<double>(3, 3) << 1, 2, 1, 0, 0, 0, -1, -2, -1);
filter2D(image_gaussian,image_sobel_x,-1,Sx);//filter2d具有转变图像类型的功能
filter2D(image_gaussian,image_sobel_y,-1,Sy);
for (int i = 0; i < image_sobel_x.rows; i++)
for (int j = 0; j < image_sobel_x.cols; j++) {
image_sobel_xy.at<double>(i, j) = sqrt((pow(image_sobel_x.at<double>(i, j), 2)) + pow(image_sobel_y.at<double>(i, j), 2));
image_sobel_angle.at<double>(i, j) = atan2((image_sobel_y.at<double>(i, j)), (image_sobel_x.at<double>(i, j))) * 180 / 3.1415;//double必须用CV_64C1
if (image_sobel_angle.at<double>(i, j) < 0)
image_sobel_angle.at<double>(i, j) += 360;
}
/*+++++++++++++++++++++++++++++++++第三步:结束++++++++++++++++++++++++++++++++++++++++*/
/*+++++++++++++++++++++++++++++++++第四步:非极大值抑制++++++++++++++++++++++++++++++++++++++++*/
double G1;
double G2;//用来存储线性插值之后的结果
int g1 = 0;
int g2 = 0;
int g3 = 0;
int g4 = 0;//存储四个方向上的梯度值
double tan_angle = 0;//计算插值比例系数
for(int i = 1; i < image_gray.rows - 1 ;i++)//注意此处要排除边界
for (int j = 1; j < image_gray.cols - 1; j++) {
//cout << tan_angle;//如果出错可能出在这里??????????????
if (image_sobel_xy.at<double>(i, j) == 0)
image_restrain.at<double>(i, j) = 0;//如果不为零将其考虑进去会怎样?
else {
if ((image_sobel_angle.at<double>(i, j) >= 90 && image_sobel_angle.at<double>(i, j) < 135) || (image_sobel_angle.at<double>(i, j) >= 270 && image_sobel_angle.at<double>(i, j) < 315)) {
g1 = image_sobel_xy.at<double>(i - 1, j - 1);
g2 = image_sobel_xy.at<double>(i - 1, j);
g3 = image_sobel_xy.at<double>(i + 1, j);
g4 = image_sobel_xy.at<double>(i + 1, j + 1);
tan_angle = fabs(image_sobel_x.at<double>(i, j) / fabs(image_sobel_y.at<double>(i, j)));
G1 = g1 * tan_angle + (1 - tan_angle) * g2;
G2 = g4 * tan_angle + (1 - tan_angle) * g3;
}
else if ((image_sobel_angle.at<double>(i, j) >= 135 && image_sobel_angle.at<double>(i, j) < 180) | (image_sobel_angle.at<double>(i, j) >= 315 & image_sobel_angle.at<double>(i, j) < 360)) {
g1 = image_sobel_xy.at<double>(i - 1, j - 1);
g2 = image_sobel_xy.at<double>(i, j - 1);
g3 = image_sobel_xy.at<double>(i, j + 1);
g4 = image_sobel_xy.at<double>(i + 1, j + 1);
tan_angle = fabs(image_sobel_y.at<double>(i, j) /fabs(image_sobel_x.at<double>(i, j)));
G1 = g1 * tan_angle + (1 - tan_angle) * g2;
G2 = g4 * tan_angle + (1 - tan_angle) * g3;
}
else if ((image_sobel_angle.at<double>(i, j) >= 45 & image_sobel_angle.at<double>(i, j) < 90) | (image_sobel_angle.at<double>(i, j) >= 225 & image_sobel_angle.at<double>(i, j) < 270)) {
g1 = image_sobel_xy.at<double>(i - 1, j);
g2 = image_sobel_xy.at<double>(i - 1, j + 1);
g3 = image_sobel_xy.at<double>(i + 1, j);
g4 = image_sobel_xy.at<double>(i + 1, j - 1);
tan_angle = fabs(image_sobel_x.at<double>(i, j) / fabs(image_sobel_y.at<double>(i, j)));
G1 = g1 * tan_angle + (1 - tan_angle) * g2;
G2 = g4 * tan_angle + (1 - tan_angle) * g3;
}
else if ((image_sobel_angle.at<double>(i, j) >= 0 & image_sobel_angle.at<double>(i, j) < 45) | (image_sobel_angle.at<double>(i, j) >= 180 & image_sobel_angle.at<double>(i, j) < 225)) {
g1 = image_sobel_xy.at<double>(i - 1, j + 1);
g2 = image_sobel_xy.at<double>(i, j + 1);
g3 = image_sobel_xy.at<double>(i + 1, j - 1);
g4 = image_sobel_xy.at<double>(i, j - 1);
tan_angle = fabs(image_sobel_y.at<double>(i, j) / fabs(image_sobel_x.at<double>(i, j)));
G1 = g1 * tan_angle + (1 - tan_angle) * g2;
G2 = g4 * tan_angle + (1 - tan_angle) * g3;
}
}
if (image_sobel_xy.at<double>(i, j) >= G1 && image_sobel_xy.at<double>(i, j) >= G2)
image_restrain.at<double>(i, j) = 200;//非极大值抑制之后的结果
else
image_restrain.at<double>(i, j) = 0;
}
/*+++++++++++++++++++++++++++++++++第四步:结束++++++++++++++++++++++++++++++++++++++++*/
/*+++++++++++++++++++++++++++++++++第五步:双阈值确定++++++++++++++++++++++++++++++++++++++++*/
/*++++++++++++++++++++双阈值确定的是梯度(sobel_xy)的阈值,分为三步++++++++++++++++++++*/
/*+++++++++++++++++++++++第一步:查看共有多少像素N++++++++++++++++++++++++++++++*/
double high_factor = 0.9;
int low_factor = 0.5;
int pixel_table[1024];//直方图
int high_gradvalue = 0;//梯度高阈值
int max_grad = 0;
for (int i = 0; i < 1024; i++)
pixel_table[i] = 0;
for (int i = 0; i < image_sobel_xy.rows; i++)
for (int j = 0; j < image_sobel_xy.cols; j++) {//此处的统计有两种方法;方法一:认为所有的sobel获得的梯度都应该被统计进去:
//方法二:仅认为梯度是极大值点的像素才能被统计进去。我认为第二种方法正确
//因为通过了第四步的非极大值抑制,有很多点已经排除在外了,如果使用第一种方法
if (image_restrain.at<double>(i, j) == 200) { //,相当于又将被排除的点重新考虑了一次。
int t = image_sobel_xy.at<double>(i, j);
pixel_table[t]++;
}//直方图统计完成
int pixel_sum = 0;
for (int i = 0; i < 1024; i++) {
if (pixel_table[i] != 0)
max_grad = i;
pixel_sum += pixel_table[i];//统计被非极大抑制之后边界像素的个数
}
int high_index = high_factor * pixel_sum;//获得高梯度阈值像素的索引值
/*++++++++++++第二步:选择第0.7N梯度值作为梯度高阈值++++++++++++++++++++++*/
int count = 0;
for (int i = 0; i < 1024; i++) {
count += pixel_table[i];
if (count >= high_index) {
high_gradvalue = i;
break;
}
}
}
/*++++++++++++++++++++++++++第三步:选择低梯度阈值:低梯度阈值是高梯度阈值的一半++++++++++++++++++++++++++++++++*/
int low_gradvalue = high_gradvalue * low_factor;//低梯度阈值是高梯度阈值的一半
/*+++++++++++++++++++++++++++++++++第五步:结束++++++++++++++++++++++++++++++++++++++++*/
/*+++++++++++++++++++++++++++++++++第六步:邻域追踪++++++++++++++++++++++++++++++++++++++++*/
//通过比较梯度(sobel_xy)是否满足阈值,去决定image_restrain中被二值化(200,0)的点的去留,三值化sobel_xy中的值为(0,low_gradvalue,high_gradcalue)
/*++++++++++++++++++++非堆栈法有一定的改进空间++++++++++++++++++++++++++++++++++*/
通过邻域追踪抑制孤立点
for (int i = 1; i < image.rows - 1; i++)
for (int j = 1; j < image.cols - 1; j++)
if (image_sobel_xy.at<double>(i, j) == low_gradvalue) {
rect.x = j - 1;
rect.y = i - 1;
rect.width = 3;
rect.height = 3;
for(int i1 = 0;i1 <3;i1++)
for(int j1 = 0;j1 <3;j++)
if (image_sobel_xy(rect).at<double>(i1, j1) == high_gradvalue) {
image_canny.at<double>(i, j) = 255;
break;
}
}
for (int i = 1; i < image_sobel_xy.rows - 1; i++)//双阈值过滤sobel
for (int j = 1; j < image_sobel_xy.cols - 1; j++) {
if (image_sobel_xy.at<double>(i, j) >= high_gradvalue && image_restrain.at<double>(i, j) == 200) {
image_restrain.at<double>(i, j) = 255;
TraceEdge(i, j, low_gradvalue, image_restrain, image_sobel_xy);
}
}
for (int i1 = 1; i1 < image_sobel_xy.rows - 1; i1++)//双阈值过滤sobel
for (int j1 = 1; j1 < image_sobel_xy.cols - 1; j1++){
if (image_restrain.at<double>(i1, j1) != 255)
image_restrain.at<double>(i1, j1) = 0;
}
/*++++++++++++++++++++非堆栈法结束++++++++++++++++++++++++++++++++++*/
/*++++++++++++++++++++改进后的堆栈法++++++++++++++++++++++++++++++++++*/
/*++++++++++++++++++++堆栈法结束++++++++++++++++++++++++++++++++++*/
/*+++++++++++++++++++++++++++++++++第六步:结束++++++++++++++++++++++++++++++++++++++++*/
Mat a = imread("pic.jpg", 0);
Mat b = Mat::zeros(a.rows, a.cols, CV_8UC1);
Canny(a, b, 200,100);//调用opencv自带的canny函数
imshow("canny", b);
image_gray.convertTo(image_gray,CV_8UC1);
imshow("灰度图", image_gray);
image_gaussian.convertTo(image_gaussian, CV_8UC1);
imshow("高斯滤波", image_gaussian);
image_sobel_x.convertTo(image_sobel_x, CV_8UC1);
image_sobel_y.convertTo(image_sobel_y, CV_8UC1);
image_sobel_xy.convertTo(image_sobel_xy, CV_8UC1);
imshow("sobel_x", image_sobel_x);
imshow("sobel_y", image_sobel_y);
imshow("sobel_xy", image_sobel_xy);
image_restrain.convertTo(image_restrain, CV_8UC1);
imshow("image_restrain", image_restrain);
image_canny.convertTo(image_canny, CV_8UC1);
imshow("image_canny", image_canny);
waitKey();
while(1);
return 0;
}