Hough Line Transform用来做直线检测
前提条件 – 边缘检测已经完成
平面空间到极坐标空间转换
相关API:
标准的霍夫变换 cv::HoughLines从平面坐标转换到霍夫空间,最终输出是 表示极坐标空间
霍夫变换直线概率 cv::HoughLinesP最终输出是直角坐标系下直线的两个点坐标
cv::HoughLines(
InputArray src, // 输入图像,必须8-bit的灰度图像
OutputArray lines, // 输出的极坐标来表示直线
double rho, // 生成极坐标时候的像素扫描步长
double theta, //生成极坐标时候的角度步长,一般取值CV_PI/180
int threshold, // 阈值,只有获得足够交点的极坐标点才被看成是直线
double srn=0;// 是否应用多尺度的霍夫变换,如果不是设置0表示经典霍夫变换
double stn=0;//是否应用多尺度的霍夫变换,如果不是设置0表示经典霍夫变换
double min_theta=0; // 表示角度扫描范围 0 ~180之间, 默认即可
double max_theta=CV_PI
) // 一般情况是有经验的开发者使用,需要自己反变换到平面空间
cv::HoughLinesP(
InputArray src, // 输入图像,必须8-bit的灰度图像
OutputArray lines, // 输出的极坐标来表示直线
double rho, // 生成极坐标时候的像素扫描步长
double theta, //生成极坐标时候的角度步长,一般取值CV_PI/180
int threshold, // 阈值,只有获得足够交点的极坐标点才被看成是直线
double minLineLength=0;// 最小直线长度
double maxLineGap=0;// 最大间隔
)
#include
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
Mat src, src_gray, dst;
src = imread("D:/vcprojects/images/lines.png");
if (!src.data) {
printf("could not load image...\n");
return -1;
}
char INPUT_TITLE[] = "input image";
char OUTPUT_TITLE[] = "hough-line-detection";
namedWindow(INPUT_TITLE, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_TITLE, CV_WINDOW_AUTOSIZE);
imshow(INPUT_TITLE, src);
// extract edge
Canny(src, src_gray, 150, 200);
cvtColor(src_gray, dst, CV_GRAY2BGR);
imshow("edge image", src_gray);
vector<Vec2f> lines;
HoughLines(src_gray, lines, 1, CV_PI / 180, 150, 0, 0);
for (size_t i = 0; i < lines.size(); i++) {
float rho = lines[i][0]; // 极坐标中的r长度
float theta = lines[i][1]; // 极坐标中的角度
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
// 转换为平面坐标的四个点
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(dst, pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
}
/*
vector plines;
HoughLinesP(src_gray, plines, 1, CV_PI / 180.0, 10, 0, 10);
Scalar color = Scalar(0, 0, 255);
for (size_t i = 0; i < plines.size(); i++) {
Vec4f hline = plines[i];
line(dst, Point(hline[0], hline[1]), Point(hline[2], hline[3]), color, 3, LINE_AA);
}*/
imshow(OUTPUT_TITLE, dst);
waitKey(0);
return 0;
}
因为霍夫圆检测对噪声比较敏感,所以首先要对图像做中值滤波。
基于效率考虑,Opencv中实现的霍夫变换圆检测是基于图像梯度的实现,分为两步:
1. 检测边缘,发现可能的圆心
2. 基于第一步的基础上从候选圆心开始计算最佳半径大小
#include
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
Mat src, dst;
src = imread("D:/vcprojects/images/circle.png");
if (!src.data) {
printf("could not load image...\n");
return -1;
}
char INPUT_TITLE[] = "input image";
char OUTPUT_TITLE[] = "hough circle demo";
namedWindow(INPUT_TITLE, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_TITLE, CV_WINDOW_AUTOSIZE);
imshow(INPUT_TITLE, src);
// 中值滤波
Mat moutput;
medianBlur(src, moutput, 3);
cvtColor(moutput, moutput, CV_BGR2GRAY);
// 霍夫圆检测
vector<Vec3f> pcircles;
HoughCircles(moutput, pcircles, CV_HOUGH_GRADIENT, 1, 10, 100, 30, 5, 50);
src.copyTo(dst);
for (size_t i = 0; i < pcircles.size(); i++) {
Vec3f cc = pcircles[i];
circle(dst, Point(cc[0], cc[1]), cc[2], Scalar(0, 0, 255), 2, LINE_AA);
circle(dst, Point(cc[0], cc[1]), 2, Scalar(198, 23, 155), 2, LINE_AA);
}
imshow(OUTPUT_TITLE, dst);
waitKey(0);
return 0;
}
简单点说就是把输入图像中各个像素按照一定的规则映射到另外一张图像的对应位置上去,形成一张新的图像。
API介绍cv::remap:
Remap(
InputArray src,// 输入图像
OutputArray dst,// 输出图像
InputArray map1,// x 映射表 CV_32FC1/CV_32FC2
InputArray map2,// y 映射表
int interpolation,// 选择的插值方法,常见线性插值,可选择立方等
int borderMode,// BORDER_CONSTANT
const Scalar borderValue// color
)
#include
#include
#include
using namespace cv;
Mat src, dst, map_x, map_y;
const char* OUTPUT_TITLE = "remap demo";
int index = 0;
void update_map(void);
int main(int argc, char** argv) {
src = imread("D:/vcprojects/images/test.png");
if (!src.data) {
printf("could not load image...\n");
return -1;
}
char input_win[] = "input image";
namedWindow(input_win, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_TITLE, CV_WINDOW_AUTOSIZE);
imshow(input_win, src);
map_x.create(src.size(), CV_32FC1);
map_y.create(src.size(), CV_32FC1);
int c = 0;
while (true) {
c = waitKey(500);
if ((char)c == 27) {
break;
}
index = c % 4;
update_map();
remap(src, dst, map_x, map_y, INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 255, 255));
imshow(OUTPUT_TITLE, dst);
}
return 0;
}
void update_map(void) {
for (int row = 0; row < src.rows; row++) {
for (int col = 0; col < src.cols; col++) {
switch (index) {
case 0:
if (col > (src.cols * 0.25) && col <= (src.cols*0.75) && row > (src.rows*0.25) && row <= (src.rows*0.75)) {
map_x.at<float>(row, col) = 2 * (col - (src.cols*0.25));
map_y.at<float>(row, col) = 2 * (row - (src.rows*0.25));
}
else {
map_x.at<float>(row, col) = 0;
map_y.at<float>(row, col) = 0;
}
break;
case 1:
map_x.at<float>(row, col) = (src.cols - col - 1);
map_y.at<float>(row, col) = row;
break;
case 2:
map_x.at<float>(row, col) = col;
map_y.at<float>(row, col) = (src.rows - row - 1);
break;
case 3:
map_x.at<float>(row, col) = (src.cols - col - 1);
map_y.at<float>(row, col) = (src.rows - row - 1);
break;
}
}
}
}
图像直方图,是指对整个图像像在灰度范围内的像素值(0~255)统计出现频率次数,据此生成的直方图,称为图像直方图-直方图。直方图反映了图像灰度的分布情况。是图像的统计学特征。
API:equalizeHist(
InputArray src,//输入图像,必须是8-bit的单通道图像
OutputArray dst// 输出结果
)
#include
#include
#include
using namespace cv;
int main(int argc, char** argv) {
Mat src, dst;
src = imread("D:/vcprojects/images/test.png");
if (!src.data) {
printf("could not load image...\n");
return -1;
}
cvtColor(src, src, CV_BGR2GRAY);
equalizeHist(src, dst);
char INPUT_T[] = "input image";
char OUTPUT_T[] = "result image";
namedWindow(INPUT_T, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_T, CV_WINDOW_AUTOSIZE);
imshow(INPUT_T, src);
imshow(OUTPUT_T, dst);
waitKey(0);
return 0;
}
**
上述直方图概念是基于图像像素值,其实对图像梯度、每个像素的角度、等一切图像的属性值,我们都可以建立直方图。这个才是直方图的概念真正意义,不过是基于图像像素灰度直方图是最常见的。 直方图最常见的几个属性:
**
split(// 把多通道图像分为多个单通道图像
const Mat &src, //输入图像
Mat* mvbegin)// 输出的通道图像数组
calcHist(
const Mat* images,//输入图像指针
int images,// 图像数目
const int* channels,// 通道数
InputArray mask,// 输入mask,可选,不用
OutputArray hist,//输出的直方图数据
int dims,// 维数
const int* histsize,// 直方图级数
const float* ranges,// 值域范围
bool uniform,// true by default
bool accumulate// false by defaut
)
#include
#include
#include
using namespace std;
using namespace cv;
int main(int argc, char** argv) {
Mat src = imread("D:/vcprojects/images/test.png");
if (!src.data) {
printf("could not load image...\n");
return -1;
}
char INPUT_T[] = "input image";
char OUTPUT_T[] = "histogram demo";
namedWindow(INPUT_T, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_T, CV_WINDOW_AUTOSIZE);
imshow(INPUT_T, src);
// 分通道显示
vector<Mat> bgr_planes;
split(src, bgr_planes);
//imshow("single channel demo", bgr_planes[0]);
// 计算直方图
int histSize = 256;
float range[] = {
0, 256 };
const float *histRanges = {
range };
Mat b_hist, g_hist, r_hist;
calcHist(&bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRanges, true, false);
calcHist(&bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRanges, true, false);
calcHist(&bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRanges, true, false);
// 归一化
int hist_h = 400;
int hist_w = 512;
int bin_w = hist_w / histSize;
Mat histImage(hist_w, hist_h, CV_8UC3, Scalar(0, 0, 0));
normalize(b_hist, b_hist, 0, hist_h, NORM_MINMAX, -1, Mat());
normalize(g_hist, g_hist, 0, hist_h, NORM_MINMAX, -1, Mat());
normalize(r_hist, r_hist, 0, hist_h, NORM_MINMAX, -1, Mat());
// render histogram chart
for (int i = 1; i < histSize; i++) {
line(histImage, Point((i - 1)*bin_w, hist_h - cvRound(b_hist.at<float>(i - 1))),
Point((i)*bin_w, hist_h - cvRound(b_hist.at<float>(i))), Scalar(255, 0, 0), 2, LINE_AA);
line(histImage, Point((i - 1)*bin_w, hist_h - cvRound(g_hist.at<float>(i - 1))),
Point((i)*bin_w, hist_h - cvRound(g_hist.at<float>(i))), Scalar(0, 255, 0), 2, LINE_AA);
line(histImage, Point((i - 1)*bin_w, hist_h - cvRound(r_hist.at<float>(i - 1))),
Point((i)*bin_w, hist_h - cvRound(r_hist.at<float>(i))), Scalar(0, 0, 255), 2, LINE_AA);
}
imshow(OUTPUT_T, histImage);
waitKey(0);
return 0;
}
对输入的两张图像计算得到直方图H1与H2,归一化到相同的尺度空间
然后可以通过计算H1与H2的之间的距离得到两个直方图的相似程度进
而比较图像本身的相似程度。Opencv提供的比较方法有四种:
Correlation 相关性比较
Chi-Square 卡方比较
Intersection 十字交叉性
Bhattacharyya distance 巴氏距离
步骤:
首先把图像从RGB色彩空间转换到HSV色彩空间cvtColor
计算图像的直方图,然后归一化到[0~1]之间calcHist和normalize;
使用上述四种比较方法之一进行比较compareHist
#include
#include
#include
using namespace std;
using namespace cv;
string convertToString(double d);
int main(int argc, char** argv) {
Mat base, test1, test2;
Mat hsvbase, hsvtest1, hsvtest2;
base = imread("D:/vcprojects/images/test.jpg");
if (!base.data) {
printf("could not load image...\n");
return -1;
}
test1 = imread("D:/vcprojects/images/lena.png");
test2 = imread("D:/vcprojects/images/lenanoise.png");
cvtColor(base, hsvbase, CV_BGR2HSV);
cvtColor(test1, hsvtest1, CV_BGR2HSV);
cvtColor(test2, hsvtest2, CV_BGR2HSV);
int h_bins = 50; int s_bins = 60;
int histSize[] = {
h_bins, s_bins };
// hue varies from 0 to 179, saturation from 0 to 255
float h_ranges[] = {
0, 180 };
float s_ranges[] = {
0, 256 };
const float* ranges[] = {
h_ranges, s_ranges };
// Use the o-th and 1-st channels
int channels[] = {
0, 1 };
MatND hist_base;
MatND hist_test1;
MatND hist_test2;
calcHist(&hsvbase, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false);
normalize(hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat());
calcHist(&hsvtest1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false);
normalize(hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat());
calcHist(&hsvtest2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false);
normalize(hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat());
double basebase = compareHist(hist_base, hist_base, CV_COMP_INTERSECT);
double basetest1 = compareHist(hist_base, hist_test1, CV_COMP_INTERSECT);
double basetest2 = compareHist(hist_base, hist_test2, CV_COMP_INTERSECT);
double tes1test2 = compareHist(hist_test1, hist_test2, CV_COMP_INTERSECT);
printf("test1 compare with test2 correlation value :%f", tes1test2);
Mat test12;
test2.copyTo(test12);
putText(base, convertToString(basebase), Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 255), 2, LINE_AA);
putText(test1, convertToString(basetest1), Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 255), 2, LINE_AA);
putText(test2, convertToString(basetest2), Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 255), 2, LINE_AA);
putText(test12, convertToString(tes1test2), Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 255), 2, LINE_AA);
namedWindow("base", CV_WINDOW_AUTOSIZE);
namedWindow("test1", CV_WINDOW_AUTOSIZE);
namedWindow("test2", CV_WINDOW_AUTOSIZE);
imshow("base", base);
imshow("test1", test1);
imshow("test2", test2);
imshow("test12", test12);
waitKey(0);
return 0;
}
string convertToString(double d) {
ostringstream os;
if (os << d)
return os.str();
return "invalid conversion";
}
反向投影是反映直方图模型在目标图像中的分布情况
简单点说就是用直方图模型去目标图像中寻找是否有相似的对象。通常用HSV色彩空间的HS两个通道直方图模型
#include
#include
#include
using namespace std;
using namespace cv;
Mat src; Mat hsv; Mat hue;
int bins = 12;
void Hist_And_Backprojection(int, void*);
int main(int argc, char** argv) {
src = imread("D:/vcprojects/images/t1.jpg");
if (src.empty()) {
printf("could not load image...\n");
return -1;
}
const char* window_image = "input image";
namedWindow(window_image, CV_WINDOW_NORMAL);
namedWindow("BackProj", CV_WINDOW_NORMAL);
namedWindow("Histogram", CV_WINDOW_NORMAL);
cvtColor(src, hsv, CV_BGR2HSV);
hue.create(hsv.size(), hsv.depth());
int nchannels[] = {
0, 0 };
mixChannels(&hsv, 1, &hue, 1, nchannels, 1);
createTrackbar("Histogram Bins:", window_image, &bins, 180, Hist_And_Backprojection);
Hist_And_Backprojection(0, 0);
imshow(window_image, src);
waitKey(0);
return 0;
}
void Hist_And_Backprojection(int, void*) {
float range[] = {
0, 180 };
const float *histRanges = {
range };
Mat h_hist;
calcHist(&hue, 1, 0, Mat(), h_hist, 1, &bins, &histRanges, true, false);
normalize(h_hist, h_hist, 0, 255, NORM_MINMAX, -1, Mat());
Mat backPrjImage;
calcBackProject(&hue, 1, 0, h_hist, backPrjImage, &histRanges, 1, true);
imshow("BackProj", backPrjImage);
int hist_h = 400;
int hist_w = 400;
Mat histImage(hist_w, hist_h, CV_8UC3, Scalar(0, 0, 0));
int bin_w = (hist_w / bins);
for (int i = 1; i < bins; i++) {
rectangle(histImage,
Point((i - 1)*bin_w, (hist_h - cvRound(h_hist.at<float>(i - 1) * (400 / 255)))),
//Point(i*bin_w, (hist_h - cvRound(h_hist.at(i) * (400 / 255)))),
Point(i*bin_w, hist_h),
Scalar(0, 0, 255), -1);
}
imshow("Histogram", histImage);
return;
}