本例完成的工作如下:
1. 使用cvHoughCircles返回灰度图中找到的圆序列(例6-1);
2. 载入一个包括清晰直线和圆的图像,比如一辆侧面看的自行车,调用霍夫直线变换和霍夫圆变换处理这幅图像(第六章 练习8);
3. 利用霍夫变换识别不同周长的任意形状(第六章 练习9);
具体代码如下:
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
IplImage *Image,*Ihough; //图像
IplImage *Input1, *Ihough_line, *Ihough_circle; //图像
IplImage *Input2, *IHough; //图像
if ((Image = cvLoadImage("D:\\Template\\OpenCV\\Template37_Hough_Circle_Line\\Debug\\2.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -1;
if ((Input1 = cvLoadImage("D:\\Template\\OpenCV\\Template37_Hough_Circle_Line\\Debug\\1.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -2;
if ((Input2 = cvLoadImage("D:\\Template\\OpenCV\\Template37_Hough_Circle_Line\\Debug\\7.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -3;
Ihough=cvCreateImage(cvGetSize(Image), Image->depth, Image->nChannels);
Ihough_line = cvCreateImage(cvGetSize(Input1), Image->depth, Image->nChannels);
Ihough_circle = cvCreateImage(cvGetSize(Input1), Image->depth, Image->nChannels);
IHough = cvCreateImage(cvGetSize(Input2), Image->depth, Image->nChannels);
CvMemStorage *storage0 = cvCreateMemStorage(0); //分配存储区域
CvMemStorage *storage1 = cvCreateMemStorage(0); //分配存储区域
CvMemStorage *storage2 = cvCreateMemStorage(0); //分配存储区域
CvMemStorage *storage3 = cvCreateMemStorage(0); //分配存储区域
CvMemStorage *storage4 = cvCreateMemStorage(0); //分配存储区域
//cvSmooth(Image, Image, CV_GAUSSIAN, 3, 3); //高斯平滑
cvCanny(Image, Ihough, 50, 150); //边缘检测
cvCanny(Input1, Ihough_line, 50, 150); //边缘检测
cvCanny(Input1, Ihough_circle, 50, 150); //边缘检测
cvCanny(Input2, IHough, 50, 150); //边缘检测
CvSeq *results = cvHoughCircles(Ihough, storage0, CV_HOUGH_GRADIENT, 2, Ihough->width / 10, 50, 200, 0, 0);
//CvSeq *results = cvHoughCircles(Image, storage, CV_HOUGH_GRADIENT, 2, Image->width / 10);
//输入 存储区域 霍夫变换方式 累加器分辨率 两圆间隔最小值 边缘阈值 累加器阈值(认定为圆)半径最小值 最大值
for (int i = 0; i <= results->total; i++) //序列元素个数
{
float *p = (float*)cvGetSeqElem(results, i); //提取序列元素
CvPoint pt1 = cvPoint(cvRound(p[0]),cvRound(p[1])); //四舍五入取整
cvCircle(Ihough, pt1, cvRound(p[2]), CV_RGB(100, 100, 100), 8); //画圆
}
CvSeq *result1 = cvHoughLines2(Ihough_line, storage1, CV_HOUGH_PROBABILISTIC, 1, CV_PI / 180, 50, 100, 10);
//输入 存储区域 霍夫变换方式 累加器分辨率(像素) 累加器分辨率(弧度) 累加器阈值(认定为直线)
//直线最小长度 分离线段分隔点数
//对于SHT和MSHT方法来说,返回的是Rho值和theta值,如下还原
//for (int i = 0; i < MIN(line->total, 100); i++)
//{
// float* lines = (float*)cvGetSeqElem(line, i);
// float rho = lines[0];
// float theta = lines[1];
// CvPoint pt1, pt2;
// double a = cos(theta), b = sin(theta);
// double x0 = a*rho, y0 = b*rho;
// pt1.x = cvRound(x0 + 1000 * (-b));
// pt1.y = cvRound(y0 + 1000 * (a));
// pt2.x = cvRound(x0 - 1000 * (-b));
// pt2.y = cvRound(y0 - 1000 * (a));
// cvLine(result, pt1, pt2, CV_RGB(255, 0, 0));
//}
//对于PPHT方法,返回的就是直线端点的坐标
for (int i = 0; i <= result1->total; i++) //序列元素个数
{
CvPoint* line = (CvPoint*)cvGetSeqElem(result1, i);
cvLine(Ihough_line, line[0], line[1], CV_RGB(100,100,100), 4); //画线
cout << "2 : " << line[0].x << " " << line[0].y << endl;
cout << line[1].x << " " << line[1].y << endl << endl;
}
CvSeq *result2 = cvHoughCircles(Ihough_line, storage2, CV_HOUGH_GRADIENT, 2, Ihough->width / 10, 50, 200, 50, 100);
//输入 存储区域 霍夫变换方式 累加器分辨率 两圆间隔最小值 边缘阈值 累加器阈值(认定为圆)半径最小值 最大值
for (int i = 0; i <= result2->total; i++) //序列元素个数
{
float *circle = (float*)cvGetSeqElem(result2, i); //提取序列元素
CvPoint pt2 = cvPoint(cvRound(circle[0]), cvRound(circle[1])); //四舍五入取整
cvCircle(Ihough_circle, pt2, cvRound(circle[2]), CV_RGB(100, 100, 100), 8); //画圆
}
CvSeq *result3 = cvHoughLines2(IHough, storage3, CV_HOUGH_PROBABILISTIC, 1, CV_PI / 180,1);
//输入 存储区域 霍夫变换方式 累加器分辨率(像素) 累加器分辨率(弧度) 累加器阈值(认定为直线)
//(直线最小长度 分离线段分隔点数)
//对于PPHT方法,返回的就是直线端点的坐标
for (int i = 0; i <= result3->total; i++) //序列元素个数
{
CvPoint* Line = (CvPoint*)cvGetSeqElem(result3, i);
cvLine(IHough, Line[0], Line[1], CV_RGB(100, 100, 100), 4); //画线
}
cvNamedWindow("Image", 1);
cvNamedWindow("cvHoughCircles", 1);
cvNamedWindow("Input1", 1);
cvNamedWindow("Ihough_line", 1);
cvNamedWindow("Ihough_circle", 1);
cvNamedWindow("Input2", 1);
cvNamedWindow("IHough", 1);
cvShowImage("Image", Image);
cvShowImage("cvHoughCircles", Ihough);
cvShowImage("Input1", Input1);
cvShowImage("Ihough_line", Ihough_line);
cvShowImage("Ihough_circle", Ihough_circle);
cvShowImage("Input2", Input2);
cvShowImage("IHough", IHough);
cvWaitKey(0);
cvReleaseImage(&Image);
cvReleaseImage(&Ihough);
cvReleaseImage(&Input1);
cvReleaseImage(&Ihough_line);
cvReleaseImage(&Ihough_circle);
cvReleaseImage(&Input2);
cvReleaseImage(&IHough);
cvDestroyWindow("Image");
cvDestroyWindow("cvHoughCircles");
cvDestroyWindow("Input1");
cvDestroyWindow("Ihough_line");
cvDestroyWindow("Ihough_circle");
cvDestroyWindow("Input2");
cvDestroyWindow("IHough");
return 0;
}
本例完成的工作如下:
1. 使用cvWarpAffine实现仿射变换(例6-2);
2. 使用cvWarpPerspective实现透视变换(例6-3);
3. 使用cvLogPolar实现对数极变换(例6-4);
4. 载入一幅图像,进行透视变换,然后旋转,在一步之内完成,与本例2在同一图像中实现(第六章 练习17);
具体代码如下:
#include
#include
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
IplImage* src, *Iaffine, *Irot, *Iperspective; //变换图像与原图像
IplImage*Ipolar, *Ipolar_inverse; //极坐标变换图像
CvPoint2D32f srcTri[3], dstTri[3]; //仿射变换的两个点集
CvPoint2D32f srcQuad[4], dstQuad[4]; //透视变换的两个点集
CvMat* rot_matrix = cvCreateMat(2, 3, CV_32FC1); //仿射变换矩阵(旋转)
CvMat* affine_matrix = cvCreateMat(2, 3, CV_32FC1); //仿射变换矩阵
CvMat* perspective_matrix = cvCreateMat(3, 3, CV_32FC1); //透视变换矩阵
if (!(src = cvLoadImage("D:\\Template\\OpenCV\\Template38_Affine_Perspective_Polar\\Debug\\3.jpg")))
return -1;
Iaffine = cvCloneImage(src); //克隆图像
Iaffine->origin = src->origin; //设置相同原点
cvZero(Iaffine); //清零
Irot = cvCloneImage(src); //克隆图像
Irot->origin = src->origin; //设置相同原点
cvZero(Irot); //清零
Iperspective = cvCloneImage(src); //克隆图像
Iperspective->origin = src->origin; //设置相同原点
cvZero(Iperspective); //清零
Ipolar = cvCloneImage(src); //克隆图像
Ipolar->origin = src->origin; //设置相同原点
cvZero(Ipolar); //清零
Ipolar_inverse = cvCloneImage(src); //克隆图像
Ipolar_inverse->origin = src->origin; //设置相同原点
cvZero(Ipolar_inverse); //清零
//旋转参数
double angle = -35.0;
double scale = 0.75;
CvPoint2D32f center = cvPoint2D32f(src->width / 2, src->height / 2);
//极坐标变换参数
double M =25; //缩放比例
//仿射变换点
srcTri[0].x = 0; //左上
srcTri[0].y = 0;
srcTri[1].x = src->width - 1; //右上
srcTri[1].y = 0;
srcTri[2].x = 0; //左下
srcTri[2].y = src->height - 1;
dstTri[0].x = src->width*0.05; //左上
dstTri[0].y = src->height*0.33;
dstTri[1].x = src->width*0.9; //右上
dstTri[1].y = src->height*0.25;
dstTri[2].x = src->width*0.2; //左下
dstTri[2].y = src->height*0.7;
//透视变换点
srcQuad[0].x = 0; //左上
srcQuad[0].y = 0;
srcQuad[1].x = src->width - 1; //右上
srcQuad[1].y = 0;
srcQuad[2].x = 0; //左下
srcQuad[2].y = src->height - 1;
srcQuad[3].x = src->width - 1; //右下
srcQuad[3].y = src->height - 1;
dstQuad[0].x = src->width*0.9; //左上
dstQuad[0].y = src->height*0.25;
dstQuad[1].x = src->width*0.8; //右上
dstQuad[1].y = src->height*0.9;
dstQuad[2].x = src->width*0.05; //左下
dstQuad[2].y = src->height*0.33;
dstQuad[3].x = src->width*0.2; //右下
dstQuad[3].y = src->height*0.7;
//dstQuad[0].x = src->width*0.05; //左上
//dstQuad[0].y = src->height*0.33;
//dstQuad[1].x = src->width*0.9; //右上
//dstQuad[1].y = src->height*0.25;
//dstQuad[2].x = src->width*0.2; //左下
//dstQuad[2].y = src->height*0.7;
//dstQuad[3].x = src->width*0.8; //右下
//dstQuad[3].y = src->height*0.9;
cvGetAffineTransform(srcTri, dstTri, affine_matrix); //仿射变换矩阵
cv2DRotationMatrix(center,angle,scale,rot_matrix); //仿射(旋转)变换矩阵
cvGetPerspectiveTransform(srcQuad, dstQuad, perspective_matrix); //透视变换矩阵
cvWarpAffine(src, Iaffine, affine_matrix); //仿射变换
cvWarpAffine(src, Irot, rot_matrix); //仿射变换(旋转)
cvWarpPerspective(src, Iperspective, perspective_matrix); //透视变换
//极坐标变换
cvLogPolar(src, Ipolar, center, M, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS);
//极坐标逆变换
cvLogPolar(Ipolar, Ipolar_inverse, center, M, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP);
cvNamedWindow("Original", 1);
cvNamedWindow("Iaffine", 1);
cvNamedWindow("Irot", 1);
cvNamedWindow("Iperspective", 1);
cvNamedWindow("Ipolar", 1);
cvNamedWindow("Ipolar_inverse", 1);
cvShowImage("Original", src);
cvShowImage("Iaffine", Iaffine);
cvShowImage("Irot", Irot);
cvShowImage("Iperspective", Iperspective);
cvShowImage("Ipolar", Ipolar);
cvShowImage("Ipolar_inverse", Ipolar_inverse);
cvWaitKey();
cvReleaseImage(&src);
cvReleaseImage(&Iaffine);
cvReleaseImage(&Irot);
cvReleaseImage(&Iperspective);
cvReleaseImage(&Ipolar);
cvReleaseImage(&Ipolar_inverse);
cvReleaseMat(&affine_matrix);
cvReleaseMat(&rot_matrix);
cvReleaseMat(&perspective_matrix);
cvDestroyWindow("Original");
cvDestroyWindow("Iaffine");
cvDestroyWindow("Irot");
cvDestroyWindow("Iperspective");
cvDestroyWindow("Ipolar");
cvDestroyWindow("Ipolar_inverse");
}
本例完成的工作如下:
1. 使用cvFilter2D()创建一个滤波器,只检测一幅图像里的60°角的直线,显示结果返回灰度图中找到的圆序列(第六章 练习1)(此处笔者为编程方便,将60°改为45°检测,原理相同);
2. 可分核。利用行[(1/16,2/16,1/16),[(2/16,4/16,2/16),[(1/16,2/16,1/16)]和在中间的参考点创建一个3*3的高斯核,在一幅图像上运行此核,并显示结果(第六章 练习2 a);
3. 创建参考点在中心的两个和两个核:一个“交叉”[(1/4,2/4,1/4),另一个“下降”[(1/4,2/4,1/4)],载入相同的图像,使用cvFilter2D()对图像做两次卷积,第一次用第一个一维核,第二次用第二个一维核(第六章 练习2 b)(“交叉”应为“横向”,“下降”应为“纵向”);
4. 从图6-5显示的滤波器里建立一个可分核,并显示结果(第六章 练习3);
具体代码如下:
#include
#include
#include
using namespace std;
CvPoint Current_Point; //值为255点当前点 全局变量才可通过普通成员引用变更其值
/******************遍历图像,指针算法********************/
//bool find_point(IplImage *img, char val,CvPoint* P_point)
bool find_point(IplImage *img, char val)
{
char* ptr = NULL;
//uchar* ptr = NULL;
/********** 错,CvMat中为uchar* IplImage中为char* ********/
if (img->nChannels == 1)
{
ptr = img->imageData;
//ptr = (uchar*)img->imageData;
/********** 错,CvMat中为uchar* IplImage中为char* ********/
if (ptr != NULL)
{
for (int i = 0; i < img->height; i++) //矩阵指针行寻址
{
ptr = (img->imageData + i*(img->widthStep)); //i 行 j 列
//ptr = (uchar*)img->imageData + i*img->widthStep; //index1 行 index2 列
/********** 错,mat中为uchar* IplImage中为char* ********/
for (int j = 0; j < img->width; j++) //矩阵指针列寻址
{
//if (ptr[j] == 255) /********错误 ptr对应的值为char型********/
if (ptr[j] == val) //判断某点像素是否为255
{
//P_point->x = j; //列 ****Notice x为列坐标,若为行坐标会出现问题
//P_point->y = i; //行
Current_Point.x = j; /********局部变量此方式 无法实现赋值********/
Current_Point.y = i;
//cout << " j: " << j << " i: " << i << endl;
//cout << " X: " << P_point->x << " Y: " << P_point->y << endl;
//cout << " j: " <
//cout << " X: " << Current_Point.x << " Y: " << Current_Point.y << endl;
return true;
}
}
}
}
}
return false;
}
int main(int argc, char* argv[])
{
IplImage *Input, *Iconvolution, *Ouput; //图像
IplImage *In1, *Ifilter1, *Ifilter2, *Ifilter3; //图像
IplImage *In2, *Ifilter4, *Ifilter5, *Ifilter6; //图像
double threshold = 254; //阈值
int threshold_type = CV_THRESH_TOZERO; //阈值类型
CvPoint Last_Point; //值为255点的上一点
// CvPoint Current_Point; //当前点,局部变量时,只能通过指针引用变更
int Last_Area = 0; //上一个区域面积
int Current_Area = 0; //当前区域面积
CvConnectedComp comp; //被填充区域统计属性
Last_Point = cvPoint(0, 0); //初始化上一点
Current_Point = cvPoint(0, 0); //初始化当前点
/**************定义卷积核,方法一*****************/
CvMat *kernel1; //卷积获取45°直线的核
kernel1 = cvCreateMat(3, 3, CV_32FC1);
cvmSet(kernel1, 0, 0, 0);
cvmSet(kernel1, 0, 1, 0);
cvmSet(kernel1, 0, 2, 1);
cvmSet(kernel1, 1, 0, 0);
cvmSet(kernel1, 1, 1, 1);
cvmSet(kernel1, 1, 2, 0);
cvmSet(kernel1, 2, 0, 1);
cvmSet(kernel1, 2, 1, 0);
cvmSet(kernel1, 2, 2, 0);
///**************定义卷积核,方法二*****************/
float k1[9] = { 0.0625, 0.0625 * 2, 0.0625,
0.0625 * 2, 0.0625 * 4, 0.0625 * 2,
0.0625, 0.0625 * 2, 0.0625 };
float k2[3] = { 0.25, 0.25 * 2, 0.25};
float k3[3] = { 0.25, 0.25 * 2, 0.25};
//float k4[9] = { 1, -2, 1,
// 2, -4, 2,
// 1, -2, 1 };
//float k5[3] = { 1, -2, 1 };
//float k6[3] = { 1, 2, 1 };
float k4[9] = { 1, -2, 1,
2, -4, 2,
1, -2, 1 };
float k5[3] = { 1, -2, 1 };
float k6[3] = { 1, 2, 1 };
//创建矩阵头,不分配空间数据散放
CvMat kernel3 = cvMat(3, 3, CV_32FC1, k1); //可分核1
CvMat kernel4 = cvMat(1, 3, CV_32FC1, k2);
CvMat kernel5 = cvMat(3, 1, CV_32FC1, k3);
CvMat kernel6 = cvMat(3, 3, CV_32FC1, k4); //可分核2
CvMat kernel7 = cvMat(1, 3, CV_32FC1, k5);
CvMat kernel8 = cvMat(3, 1, CV_32FC1, k6);
if ((Input = cvLoadImage("D:\\Template\\OpenCV\\Template39_Convolution\\Debug\\2.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -1;
if ((In1 = cvLoadImage("D:\\Template\\OpenCV\\Template39_Convolution\\Debug\\3.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -2;
if ((In2 = cvLoadImage("D:\\Template\\OpenCV\\Template39_Convolution\\Debug\\4.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -3;
Iconvolution = cvCreateImage(cvGetSize(Input), IPL_DEPTH_8U, 1); //卷积滤波图像
Ouput = cvCreateImage(cvGetSize(Input), IPL_DEPTH_8U, 1); //输出图像
Ifilter1 = cvCreateImage(cvGetSize(In1), IPL_DEPTH_8U, 1); //卷积滤波图像1
Ifilter2 = cvCreateImage(cvGetSize(In1), IPL_DEPTH_8U, 1); //卷积滤波图像2
Ifilter3 = cvCreateImage(cvGetSize(In1), IPL_DEPTH_8U, 1); //卷积滤波图像3
Ifilter4 = cvCreateImage(cvGetSize(In2), IPL_DEPTH_8U, 1); //卷积滤波图像4
Ifilter5 = cvCreateImage(cvGetSize(In2), IPL_DEPTH_8U, 1); //卷积滤波图像5
Ifilter6 = cvCreateImage(cvGetSize(In2), IPL_DEPTH_8U, 1); //卷积滤波图像6
cvFilter2D(Input, Iconvolution, kernel1); //卷积
cvFilter2D(Iconvolution, Iconvolution, kernel1);
cvFilter2D(Iconvolution, Iconvolution, kernel1);
cvThreshold(Iconvolution, Ouput, threshold, 0, threshold_type); //二值阈值化
//(统一像素值,以便漫水填充)
cvNamedWindow("Ouput", 1);
do
{
if (find_point(Ouput, 0)) //找像素值为0的像素点
{
cout << " X: " << Current_Point.x << " Y: " << Current_Point.y << endl;
cvFloodFill(Ouput, Current_Point, cvScalar(100), cvScalar(0), cvScalar(0),
&comp, 8 | CV_FLOODFILL_FIXED_RANGE); //对值为0的点进行漫水填充,值100
Current_Area = comp.area; //当前区域面积
if (Last_Area//当前区域大于上一区域,上一区域清0
{
if (Last_Area>0)
cvFloodFill(Ouput, Last_Point, cvScalar(255), cvScalar(0), cvScalar(0),
&comp, 8 | CV_FLOODFILL_FIXED_RANGE); //上一区域赋值255
cvShowImage("Ouput", Ouput);
cvWaitKey(500);
Last_Area = Current_Area; //当前区域赋值给上一区域
Last_Point = Current_Point; //当前点赋值给上一点
//memcpy(&Last_Point, &Current_Point, sizeof(CvPoint)); //错误,此方法复制无法正常使用掩码
}
else //当前区域小于等于上一区域,当前区域赋值255
{
if (Current_Area>0)
cvFloodFill(Ouput, Current_Point, cvScalar(255), cvScalar(0), cvScalar(0),
&comp, 8 | CV_FLOODFILL_FIXED_RANGE); //当前区域赋值0
cvShowImage("Ouput", Ouput);
cvWaitKey(500);
}
}
else //剩余最大区域赋值255,设置背景为黑色,对象白色
{
cvFloodFill(Ouput, cvPoint(0,0), cvScalar(0), cvScalar(0), cvScalar(0), &comp, 8 | CV_FLOODFILL_FIXED_RANGE);
cvFloodFill(Ouput, Last_Point, cvScalar(255), cvScalar(0), cvScalar(0), &comp, 8 | CV_FLOODFILL_FIXED_RANGE);
cvShowImage("Ouput", Ouput);
cvSaveImage("D:\\Template\\OpenCV\\Template39_Convolution\\Debug\\out.jpg", Ouput);
cvWaitKey(500);
break;
}
} while (true);
cvFilter2D(In1, Ifilter1, &kernel3); //卷积3*3核
cvFilter2D(In1, Ifilter2, &kernel4); //一维横向核
cvFilter2D(Ifilter2, Ifilter3, &kernel5); //一维纵向核
cvFilter2D(In2, Ifilter4, &kernel6); //卷积3*3核
cvFilter2D(In2, Ifilter5, &kernel7); //一维横向核
cvFilter2D(Ifilter5, Ifilter6, &kernel8); //一维纵向核
cvNamedWindow("Input", 1);
cvNamedWindow("Iconvolution", 1);
cvNamedWindow("In1", 1);
cvNamedWindow("Ifilter1", 1);
cvNamedWindow("Ifilter2", 1);
cvNamedWindow("Ifilter3", 1);
cvNamedWindow("In2", 1);
cvNamedWindow("Ifilter4", 1);
cvNamedWindow("Ifilter5", 1);
cvNamedWindow("Ifilter6", 1);
cvShowImage("Input", Input);
cvShowImage("Iconvolution", Iconvolution);
cvShowImage("Ouput", Ouput);
cvShowImage("In1", In1);
cvShowImage("Ifilter1", Ifilter1);
cvShowImage("Ifilter2", Ifilter2);
cvShowImage("Ifilter3", Ifilter3);
cvShowImage("In2", In2);
cvShowImage("Ifilter4", Ifilter4);
cvShowImage("Ifilter5", Ifilter5);
cvShowImage("Ifilter6", Ifilter6);
cvWaitKey(0);
cvReleaseMat(&kernel1);
cvReleaseImage(&Input);
cvReleaseImage(&Iconvolution);
cvReleaseImage(&Ouput);
cvReleaseImage(&In1);
cvReleaseImage(&Ifilter1);
cvReleaseImage(&Ifilter2);
cvReleaseImage(&Ifilter3);
cvReleaseImage(&In2);
cvReleaseImage(&Ifilter4);
cvReleaseImage(&Ifilter5);
cvReleaseImage(&Ifilter6);
cvDestroyWindow("Input");
cvDestroyWindow("Iconvolution");
cvDestroyWindow("Ouput");
cvDestroyWindow("In1");
cvDestroyWindow("Ifilter1");
cvDestroyWindow("Ifilter2");
cvDestroyWindow("Ifilter3");
cvDestroyWindow("In2");
cvDestroyWindow("Ifilter4");
cvDestroyWindow("Ifilter5");
cvDestroyWindow("Ifilter6");
return 0;
}
尚待解决的问题1——本例工作4的可分核未构造成功
float k4[9] = { 1, -2, 1,
2, -4, 2,
1, -2, 1 };
float k5[3] = { 1, -2, 1 };
float k6[3] = { 1, 2, 1 };
//创建矩阵头,不分配空间数据散放
CvMat kernel6 = cvMat(3, 3, CV_32FC1, k4); //可分核2
CvMat kernel7 = cvMat(1, 3, CV_32FC1, k5);
CvMat kernel8 = cvMat(3, 1, CV_32FC1, k6);
本例完成的工作如下:
1. 画一系列同心圆形成一个靶心,构造一系列进入靶心的直线;
2. 用一个3*3的中孔大小,得到图像的x方向导数和y方向导数,中孔大小依次变为5*5,9*9和13*13,显示结果(第六章 练习4);
3. 创建一幅新图像,其中只有45°直线,背景为黑,直线为白。给出一系列中孔尺寸,得到图像的一阶x方向导数(dx)和一阶y方向导数(dy)。dx和dy图像组成了输入图像的梯度。扫描整幅图像寻找角度幅值最大或者最大附近的位置,记录这些位置的角度,将角度求平均,记录为直线的测量角。用3*3中孔的Soble滤波器完成(第六章 练习5 a);
4. 用5*5中孔的Soble滤波器完成(第六章 练习5 b);
5. 用9*9中孔的Soble滤波器完成(第六章 练习5 c);
6. 载入一幅正面人脸图,眼睛是睁开的,并且占据了图像的大部分区域,写代码找出眼睛的瞳孔(第六章 练习6);
具体代码如下:
#include
#include
#include
#define PI 3.14
using namespace std;
/******************遍历图像,指针算法********************/
double Find_point(IplImage *img1, IplImage *img2, CvPoint* P_point)
{
bool enable = true;
char* ptr1 = NULL;
char* ptr2 = NULL;
double Last_Amplitude = 0.0; //上一点幅度
double Current_Amplitude = 0.0; //当前幅度
double subtraction = 0.0; //两次最大幅值波动允许的范围
double sum1=0.0; //x、y方向一阶导数平方和
double rad=0.0; //角度
double radian=0.0; //角度和
int num=0; //次数
if (img1->nChannels == 1)
{
ptr1 = img1->imageData;
ptr2 = img2->imageData;
if ((ptr1 != NULL) && (ptr2 != NULL))
{
for (int i = 0; i < img1->height; i++) //矩阵指针行寻址
{
ptr1 = (img1->imageData + i*(img1->widthStep)); //x方向上导数,i 行 j 列
ptr2 = (img2->imageData + i*(img2->widthStep)); //y方向上导数,i 行 j 列
for (int j = 0; j < img1->width; j++) //矩阵指针列寻址
{
sum1 = ptr1[j] * ptr1[j] + ptr2[j] * ptr2[j];
Current_Amplitude = sqrt(sum1);
subtraction = abs(Current_Amplitude - Last_Amplitude); //计算两次幅值波动
if (Current_Amplitude > Last_Amplitude) //判断某点幅值是否大于上一点
{
Last_Amplitude = Current_Amplitude; //当前幅值赋值给上一个幅值
if (subtraction >=2)
{
radian = 0;
num = 0;
}
P_point->x = j; //列 ****Notice x为列坐标,若为行坐标会出现问题
P_point->y = i; //行
rad = atan((double)ptr2[j] / ptr1[j]);
radian += rad;
num++;
cout << " j: " << j << " i: " << i << endl;
cout << " 采集的最大幅值个数: " << num << " Last_Amplitude: " << Last_Amplitude << endl;
cout << " 弧度和: " << radian << endl;
enable = false; //标志位排除Current_Amplitude>Last_Amplitude,且差值小于2时的两次加和情况
}
else if ((subtraction >0) && (subtraction < 2) && enable) //判断幅值是否在正常波动范围内
{
P_point->x = j; //列 ****Notice x为列坐标,若为行坐标会出现问题
P_point->y = i; //行
rad = atan((double)ptr2[j] / ptr1[j]);
radian += rad;
num++;
cout << " X: " << P_point->x << " Y: " << P_point->y << endl;
cout << " 采集的最大幅值次数: " << num << " Current_Amplitude: " << Current_Amplitude << endl;
cout << " 弧度和: " << radian << endl;
}
enable = true;
}
}
radian = radian / num;
return radian;
}
}
return false;
}
int main(int argc, char* argv[])
{
IplImage *Input1, *Isobel_x_3, *Isobel_y_3; //图像
IplImage *Input2, *ISobel_x, *ISobel_y; //图像
IplImage *Isobel_x_5, *Isobel_y_5; //图像
IplImage *Isobel_x_9, *Isobel_y_9; //图像
IplImage *Isobel_x_13, *Isobel_y_13; //图像
IplImage *Input3, *Ilaplace; //图像
CvPoint Point; //幅值值为最大点
double Radian = 0.0; //弧度
Point = cvPoint(0, 0); //初始化当前点
if ((Input1 = cvLoadImage("D:\\Template\\OpenCV\\Template40_Sobel_Laplace\\Debug\\1.png", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -1;
if ((Input2 = cvLoadImage("D:\\Template\\OpenCV\\Template40_Sobel_Laplace\\Debug\\out.png", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -2;
if ((Input3 = cvLoadImage("D:\\Template\\OpenCV\\Template40_Sobel_Laplace\\Debug\\8.jpg")) == 0)
return -3;
Isobel_x_3 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //x方向求导图像3
Isobel_y_3 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //y方向求导图像3
Isobel_x_5 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //x方向求导图像5
Isobel_y_5 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //y方向求导图像5
Isobel_x_9 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //x方向求导图像9
Isobel_y_9 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //y方向求导图像9
Isobel_x_13 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //x方向求导图像13
Isobel_y_13 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //y方向求导图像13
ISobel_x = cvCreateImage(cvGetSize(Input2), IPL_DEPTH_64F, 1); //x方向求导图像
ISobel_y = cvCreateImage(cvGetSize(Input2), IPL_DEPTH_64F, 1); //y方向求导图像
Ilaplace = cvCreateImage(cvGetSize(Input3), IPL_DEPTH_16S, 3); //Laplace变换图像
cvSobel(Input1, Isobel_x_3, 1, 0, CV_SCHARR); //使用3*3Sobel算子求x方向微分
cvSobel(Input1, Isobel_y_3, 0, 1, CV_SCHARR); //使用3*3Sobel算子求y方向微分
cvSobel(Input1, Isobel_x_5, 1, 0, 5); //使用3*3Sobel算子求x方向微分
cvSobel(Input1, Isobel_y_5, 0, 1, 5); //使用3*3Sobel算子求y方向微分
cvSobel(Input1, Isobel_x_9, 1, 0, 9); //使用3*3Sobel算子求x方向微分
cvSobel(Input1, Isobel_y_9, 0, 1, 9); //使用3*3Sobel算子求y方向微分
cvSobel(Input1, Isobel_x_13, 1, 0, 13); //使用3*3Sobel算子求x方向微分
cvSobel(Input1, Isobel_y_13, 0, 1, 13); //使用3*3Sobel算子求y方向微分
cvSobel(Input2, ISobel_x, 1, 0, CV_SCHARR); //使用3*3Sobel算子求x方向微分
cvSobel(Input2, ISobel_y, 0, 1, CV_SCHARR); //使用3*3Sobel算子求y方向微分
Radian = Find_point(ISobel_x, ISobel_y, &Point); //找幅值最大的像素点
Radian = (double)(Radian / PI)*180;
cout << " Sobel 3*3算子,图像中直线的角度: " << Radian << endl << endl;
Radian = 0.0; //初始化弧度
Point = cvPoint(0, 0); //初始化当前点
cvSobel(Input2, ISobel_x, 1, 0, 5); //使用5*5Sobel算子求x方向微分
cvSobel(Input2, ISobel_y, 0, 1, 5); //使用5*5Sobel算子求y方向微分
Radian = Find_point(ISobel_x, ISobel_y, &Point); //找幅值最大的像素点
Radian = (double)(Radian / PI) * 180;
cout << " Sobel 5*5算子,图像中直线的角度: " << Radian << endl << endl;
Radian = 0.0; //初始化弧度
Point = cvPoint(0, 0); //初始化当前点
cvSobel(Input2, ISobel_x, 1, 0, 9); //使用9*9Sobel算子求x方向微分
cvSobel(Input2, ISobel_y, 0, 1, 9); //使用9*9Sobel算子求y方向微分
Radian = Find_point(ISobel_x, ISobel_y, &Point); //找幅值最大的像素点
Radian = (double)(Radian / PI) * 180;
cout << " Sobel 9*9算子,图像中直线的角度: " << Radian << endl << endl;
Radian = 0.0; //初始化弧度
Point = cvPoint(0, 0); //初始化当前点
cvSobel(Input2, ISobel_x, 1, 0, 13); //使用13*13Sobel算子求x方向微分
cvSobel(Input2, ISobel_y, 0, 1, 13); //使用13*13Sobel算子求y方向微分
Radian = Find_point(ISobel_x, ISobel_y, &Point); //找幅值最大的像素点
Radian = (double)(Radian / PI) * 180;
cout << " Sobel 13*13算子,图像中直线的角度: " << Radian << endl << endl;
cvLaplace(Input3, Ilaplace, 15); //拉普拉斯变换
cvNamedWindow("Input1", 1);
cvNamedWindow("Input2", 1);
cvNamedWindow("Input3", 1);
cvNamedWindow("Isobel_x_3", 1);
cvNamedWindow("Isobel_y_3", 1);
cvNamedWindow("Isobel_x_5", 1);
cvNamedWindow("Isobel_y_5", 1);
cvNamedWindow("Isobel_x_9", 1);
cvNamedWindow("Isobel_y_9", 1);
cvNamedWindow("Isobel_x_13", 1);
cvNamedWindow("Isobel_y_13", 1);
cvNamedWindow("Ilaplace", 1);
cvShowImage("Input1", Input1);
cvShowImage("Input2", Input2);
cvShowImage("Input3", Input3);
cvShowImage("Isobel_x_3", Isobel_x_3);
cvShowImage("Isobel_y_3", Isobel_y_3);
cvShowImage("Isobel_x_5", Isobel_x_5);
cvShowImage("Isobel_y_5", Isobel_y_5);
cvShowImage("Isobel_x_9", Isobel_x_9);
cvShowImage("Isobel_y_9", Isobel_y_9);
cvShowImage("Isobel_x_13", Isobel_x_13);
cvShowImage("Isobel_y_13", Isobel_y_13);
cvShowImage("Ilaplace", Ilaplace);
cvWaitKey(0);
cvReleaseImage(&Input1);
cvReleaseImage(&Input2);
cvReleaseImage(&Input3);
cvReleaseImage(&Isobel_x_3);
cvReleaseImage(&Isobel_y_3);
cvReleaseImage(&Isobel_x_5);
cvReleaseImage(&Isobel_y_5);
cvReleaseImage(&Isobel_x_9);
cvReleaseImage(&Isobel_y_9);
cvReleaseImage(&Isobel_x_13);
cvReleaseImage(&Isobel_y_13);
cvReleaseImage(&Ilaplace);
cvDestroyWindow("Input1");
cvDestroyWindow("Input2");
cvDestroyWindow("Input3");
cvDestroyWindow("Isobel_x_3");
cvDestroyWindow("Isobel_y_3");
cvDestroyWindow("Isobel_x_5");
cvDestroyWindow("Isobel_y_5");
cvDestroyWindow("Isobel_x_9");
cvDestroyWindow("Isobel_y_9");
cvDestroyWindow("Isobel_x_13");
cvDestroyWindow("Isobel_y_13");
cvDestroyWindow("Ilaplace");
return 0;
}
本例完成的工作如下:
1. 载入一幅合适的线性结构图像,设置cvCanny()中的低阈值和高阈值,用三种不同的高:低的阈值设置,分别是1.5:1,2.75:1和4:1,当高阈值设置小于50时,显示结果(第六章 练习7 a);
2. 当高阈值设置大于50小于100时,显示结果(第六章 练习7 b);
3. 当高阈值设置大于100小于150时,显示结果(第六章 练习7 c);
4. 当高阈值设置大于150小于200时,显示结果(第六章 练习7 d);
5. 当高阈值设置大于200小于250时,显示结果(第六章 练习7 e);
具体代码如下:
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
IplImage *Input1, *Icanny_1, *Icanny_2; //图像
IplImage *Icanny_3, *Icanny_4, *Icanny_5; //图像
IplImage *Icanny_6, *Icanny_7, *Icanny_8; //图像
IplImage *Icanny_9, *Icanny_10, *Icanny_11; //图像
IplImage *Icanny_12, *Icanny_13, *Icanny_14; //图像
IplImage *Icanny_15; //图像
if ((Input1 = cvLoadImage("D:\\Template\\OpenCV\\Template41_Canny\\Debug\\9.jpg", CV_LOAD_IMAGE_GRAYSCALE)) == 0)
return -1;
Icanny_1 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_2 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_3 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_4 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_5 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_6 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_7 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_8 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_9 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_10 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_11= cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_12 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_13 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_14 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
Icanny_15 = cvCreateImage(cvGetSize(Input1), IPL_DEPTH_8U, 1); //边缘检测图像
//高阈值小于50
cvCanny(Input1, Icanny_1,10,15,3); //边缘检测 1.5 : 1
cvCanny(Input1, Icanny_2, 10, 27.5, 3); //边缘检测 2.75 : 1
cvCanny(Input1, Icanny_3, 10, 40, 3); //边缘检测 4 : 1
//50~100
cvCanny(Input1, Icanny_4, 50, 75, 3); //边缘检测 1.5 : 1
cvCanny(Input1, Icanny_5, 20, 55, 3); //边缘检测 2.75 : 1
cvCanny(Input1, Icanny_6, 20, 80, 3); //边缘检测 4 : 1
//100~150
cvCanny(Input1, Icanny_7, 75, 112.5, 3); //边缘检测 1.5 : 1
cvCanny(Input1, Icanny_8, 40, 110, 3); //边缘检测 2.75 : 1
cvCanny(Input1, Icanny_9, 35, 140, 3); //边缘检测 4 : 1
//150~200
cvCanny(Input1, Icanny_10, 120, 180, 3); //边缘检测 1.5 : 1
cvCanny(Input1, Icanny_11, 60, 165, 3); //边缘检测 2.75 : 1
cvCanny(Input1, Icanny_12, 45, 180, 3); //边缘检测 4 : 1
//200~250
cvCanny(Input1, Icanny_13, 150, 225, 3); //边缘检测 1.5 : 1
cvCanny(Input1, Icanny_14, 90, 247.5, 3); //边缘检测 2.75 : 1
cvCanny(Input1, Icanny_15, 55, 220, 3); //边缘检测 4 : 1
cvNamedWindow("Input1", 1);
cvNamedWindow("Icanny_1", 1);
cvNamedWindow("Icanny_2", 1);
cvNamedWindow("Icanny_3", 1);
cvNamedWindow("Icanny_4", 1);
cvNamedWindow("Icanny_5", 1);
cvNamedWindow("Icanny_6", 1);
cvNamedWindow("Icanny_7", 1);
cvNamedWindow("Icanny_8", 1);
cvNamedWindow("Icanny_9", 1);
cvNamedWindow("Icanny_10", 1);
cvNamedWindow("Icanny_11", 1);
cvNamedWindow("Icanny_12", 1);
cvNamedWindow("Icanny_13", 1);
cvNamedWindow("Icanny_14", 1);
cvNamedWindow("Icanny_15", 1);
cvShowImage("Input1", Input1);
cvShowImage("Icanny_1", Icanny_1);
cvShowImage("Icanny_2", Icanny_2);
cvShowImage("Icanny_3", Icanny_3);
cvShowImage("Icanny_4", Icanny_4);
cvShowImage("Icanny_5", Icanny_5);
cvShowImage("Icanny_6", Icanny_6);
cvShowImage("Icanny_7", Icanny_7);
cvShowImage("Icanny_8", Icanny_8);
cvShowImage("Icanny_9", Icanny_9);
cvShowImage("Icanny_10", Icanny_10);
cvShowImage("Icanny_11", Icanny_11);
cvShowImage("Icanny_12", Icanny_12);
cvShowImage("Icanny_13", Icanny_13);
cvShowImage("Icanny_14", Icanny_14);
cvShowImage("Icanny_15", Icanny_15);
cvWaitKey(0);
cvReleaseImage(&Input1);
cvReleaseImage(&Icanny_1);
cvReleaseImage(&Icanny_2);
cvReleaseImage(&Icanny_3);
cvReleaseImage(&Icanny_4);
cvReleaseImage(&Icanny_5);
cvReleaseImage(&Icanny_6);
cvReleaseImage(&Icanny_7);
cvReleaseImage(&Icanny_8);
cvReleaseImage(&Icanny_9);
cvReleaseImage(&Icanny_10);
cvReleaseImage(&Icanny_11);
cvReleaseImage(&Icanny_12);
cvReleaseImage(&Icanny_13);
cvReleaseImage(&Icanny_14);
cvReleaseImage(&Icanny_15);
cvDestroyWindow("Input1");
cvDestroyWindow("Icanny_1");
cvDestroyWindow("Icanny_2");
cvDestroyWindow("Icanny_3");
cvDestroyWindow("Icanny_4");
cvDestroyWindow("Icanny_5");
cvDestroyWindow("Icanny_6");
cvDestroyWindow("Icanny_7");
cvDestroyWindow("Icanny_8");
cvDestroyWindow("Icanny_9");
cvDestroyWindow("Icanny_10");
cvDestroyWindow("Icanny_11");
cvDestroyWindow("Icanny_12");
cvDestroyWindow("Icanny_13");
cvDestroyWindow("Icanny_14");
cvDestroyWindow("Icanny_15");
return 0;
}
本例完成的工作如下:
1. cvLogPolar()函数将正方形转化为波浪线的图表,中心点在正方形中心(第六章 练习10 );
2. cvLogPolar()函数中心点在正方形的一角(第六章 练习10 a);
3. cvLogPolar()函数对圆进行变换,中心点在圆里边靠近边缘(第六章 练习10 b);
4. 中心点正好在圆的外边(第六章 练习10 c);
5. 分别画出大矩形、小矩形,分别对他们进行对数极坐标变换,编写二维转换器,在结果对数极坐标范围内取中心点,将各种形状转换为可识别的形式(离散傅里叶变换)(第六章 练习11 12);
具体代码如下:
#include
#include
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
IplImage* src1, *Ipolar1, *Ipolar2;
IplImage* src2, *Ipolar3, *Ipolar4;
IplImage* src3, *Ipolar5, *Ipolar6;
IplImage* Ipolar7;
if (!(src1 = cvCreateImage(cvSize(250, 250), IPL_DEPTH_32F, 1)))
return -1;
cvZero(src1); //清零
src2 = cvCloneImage(src1); //克隆图像
src2->origin = src1->origin; //设置相同原点
cvZero(src2);
src3 = cvCloneImage(src1); //克隆图像
src3->origin = src1->origin; //设置相同原点
cvZero(src3);
cvRectangle(src1, cvPoint(100, 100), cvPoint(200, 200), CV_RGB(255, 255, 255));
//cvSaveImage("D:\\Template\\OpenCV\\Template42_Polar\\Debug\\out.jpg", src1);
cvRectangle(src3, cvPoint(125, 125), cvPoint(175, 175), CV_RGB(255, 255, 255));
cvCircle(src2, cvPoint(150, 150), 50, CV_RGB(255, 255, 255));
Ipolar1 = cvCloneImage(src1); //克隆图像
Ipolar1->origin = src1->origin; //设置相同原点
cvZero(Ipolar1);
Ipolar1 = cvCloneImage(src1); //克隆图像
Ipolar1->origin = src1->origin; //设置相同原点
cvZero(Ipolar1); //清零
Ipolar2 = cvCloneImage(src1); //克隆图像
Ipolar2->origin = src1->origin; //设置相同原点
cvZero(Ipolar2); //清零
Ipolar3 = cvCloneImage(src2); //克隆图像
Ipolar3->origin = src2->origin; //设置相同原点
cvZero(Ipolar3); //清零
Ipolar4 = cvCloneImage(src2); //克隆图像
Ipolar4->origin = src2->origin; //设置相同原点
cvZero(Ipolar4); //清零
Ipolar5 = cvCloneImage(src3); //克隆图像
Ipolar5->origin = src3->origin; //设置相同原点
cvZero(Ipolar5);
Ipolar6 = cvCloneImage(src3); //克隆图像
Ipolar6->origin = src3->origin; //设置相同原点
cvZero(Ipolar6);
Ipolar7 = cvCloneImage(src3); //克隆图像
Ipolar7->origin = src3->origin; //设置相同原点
cvZero(Ipolar7);
//极坐标变换参数
double M =50; //缩放比例
CvPoint2D32f center1 = cvPoint2D32f(150, 150);
CvPoint2D32f center2 = cvPoint2D32f(100, 100);
CvPoint2D32f center3 = cvPoint2D32f(150, 98);
CvPoint2D32f center4 = cvPoint2D32f(150, 102);
//极坐标变换
cvLogPolar(src1, Ipolar1, center1, M, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS);
//极坐标变换
cvLogPolar(src1, Ipolar2, center2, M, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS);
//极坐标变换
cvLogPolar(src2, Ipolar3, center3, M, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS);
//极坐标变换
cvLogPolar(src2, Ipolar4, center4, M, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS);
//极坐标变换
cvLogPolar(src3, Ipolar5, center1, M, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS);
cvDFT(Ipolar5, Ipolar6, CV_DXT_FORWARD); //二维离散傅里叶变换,平移不变性
cvDFT(Ipolar1, Ipolar7, CV_DXT_FORWARD);
cvNamedWindow("src1", 1);
cvNamedWindow("src2", 1);
cvNamedWindow("src3", 1);
cvNamedWindow("Ipolar1", 1);
cvNamedWindow("Ipolar2", 1);
cvNamedWindow("Ipolar3", 1);
cvNamedWindow("Ipolar4", 1);
cvNamedWindow("Ipolar5", 1);
cvNamedWindow("Ipolar6", 1);
cvNamedWindow("Ipolar7", 1);
cvShowImage("src1", src1);
cvShowImage("src2", src2);
cvShowImage("src3", src3);
cvShowImage("Ipolar1", Ipolar1);
cvShowImage("Ipolar2", Ipolar2);
cvShowImage("Ipolar3", Ipolar3);
cvShowImage("Ipolar4", Ipolar4);
cvShowImage("Ipolar5", Ipolar5);
cvShowImage("Ipolar6", Ipolar6);
cvShowImage("Ipolar7", Ipolar7);
cvWaitKey();
cvReleaseImage(&src1);
cvReleaseImage(&src2);
cvReleaseImage(&src3);
cvReleaseImage(&Ipolar1);
cvReleaseImage(&Ipolar2);
cvReleaseImage(&Ipolar3);
cvReleaseImage(&Ipolar4);
cvReleaseImage(&Ipolar5);
cvReleaseImage(&Ipolar6);
cvReleaseImage(&Ipolar7);
cvDestroyWindow("src1");
cvDestroyWindow("src2");
cvDestroyWindow("src3");
cvDestroyWindow("Ipolar1");
cvDestroyWindow("Ipolar2");
cvDestroyWindow("Ipolar3");
cvDestroyWindow("Ipolar4");
cvDestroyWindow("Ipolar5");
cvDestroyWindow("Ipolar6");
cvDestroyWindow("Ipolar7");
}
尚待解决的问题2——本例中在进行大、小矩形对数极坐标变换过程中,矩形的大小变化,不仅表现为极坐标系下的波浪线的平移,同时还伴随有波浪线的粗细变化,导致最后傅里叶变换的图像有误差,无法准确匹配两个矩形
结果如下图:
本例完成的工作如下:
1. 对一个小的高斯分布和图像分别作傅里叶变换。将其相乘,对结果进行逆傅里叶变换(第六章 练习13);
2. 随着滤波器的增大,傅里叶空间运行比普通空间快很多,比较傅里叶变换和卷积的运行时间(第六章 练习13 );
具体代码如下:
#include
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
clock_t startTime, endTime; //记录程序运行时间
clock_t DFT_startTime, DFT_endTime; //记录程序运行时间
CvMat* A_Mat = cvCreateMat(250, 250, CV_32FC1); //创建图像矩阵
CvMat* Filter_Mat = cvCreateMat(250, 250, CV_32FC1); //创建图像矩阵
CvPoint center = cvPoint(150, 150); //定义绘图参数
CvPoint rec1 = cvPoint(50, 50);
CvPoint rec2 = cvPoint(100, 100);
int radius = 50;
CvScalar color = CV_RGB(255, 255, 255);
cvCircle(A_Mat, center, radius, color,4); //画一个圆
cvRectangle(A_Mat, rec1, rec2, color,4); //画矩形
/**************定义卷积核,方法一*****************/
CvMat *kernel1;
//kernel1 = cvCreateMat(5, 5, CV_32FC1); //创建矩阵分配存储空间
kernel1 = cvCreateMat(15, 15, CV_32FC1); //创建矩阵分配存储空间
cvmSet(kernel1, 0, 0, 0.0625 * 1);
cvmSet(kernel1, 0, 1, 0.0625 * 2);
cvmSet(kernel1, 0, 2, 0.0625 * 3);
cvmSet(kernel1, 0, 3, 0.0625 * 4);
cvmSet(kernel1, 0, 4, 0.0625 * 5);
cvmSet(kernel1, 1, 0, 0.0625 * 1);
cvmSet(kernel1, 1, 1, 0.0625 * 2);
cvmSet(kernel1, 1, 2, 0.0625 * 3);
cvmSet(kernel1, 1, 3, 0.0625 * 4);
cvmSet(kernel1, 1, 4, 0.0625 * 5);
cvmSet(kernel1, 2, 0, 0.0625 * 1);
cvmSet(kernel1, 2, 1, 0.0625 * 2);
cvmSet(kernel1, 2, 2, 0.0625 * 3);
cvmSet(kernel1, 2, 3, 0.0625 * 4);
cvmSet(kernel1, 2, 4, 0.0625 * 5);
cvmSet(kernel1, 3, 0, 0.0625 * 1);
cvmSet(kernel1, 3, 1, 0.0625 * 2);
cvmSet(kernel1, 3, 2, 0.0625 * 3);
cvmSet(kernel1, 3, 3, 0.0625 * 4);
cvmSet(kernel1, 3, 4, 0.0625 * 5);
cvmSet(kernel1, 4, 0, 0.0625 * 1);
cvmSet(kernel1, 4, 1, 0.0625 * 2);
cvmSet(kernel1, 4, 2, 0.0625 * 3);
cvmSet(kernel1, 4, 3, 0.0625 * 4);
cvmSet(kernel1, 4, 4, 0.0625 * 5);
startTime = clock(); //开始计时
cvFilter2D(A_Mat, Filter_Mat, kernel1); //卷积操作
endTime = clock(); //结束计时
cout << "普通卷积Totle Time : " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
DFT_startTime = clock(); //开始计时
int dft_M = cvGetOptimalDFTSize(A_Mat->rows + kernel1->rows - 1); //获取合适的数组尺寸
int dft_N = cvGetOptimalDFTSize(A_Mat->cols + kernel1->cols - 1);
CvMat* A_DFT = cvCreateMat(dft_M, dft_N, CV_32FC1); //图像DFT变换矩阵
CvMat* dft_kernel = cvCreateMat(dft_M, dft_N, CV_32FC1); //核DFT变换矩阵
CvMat* A_InvDFT = cvCreateMat(dft_M, dft_N, CV_32FC1); //图像DFT逆变换矩阵
CvMat temp; //矩阵头
cvGetSubRect(A_DFT, &temp, cvRect(0, 0, A_Mat->cols, A_Mat->rows)); //获取指向指定区域的数据指针
cvCopy(A_Mat, &temp); //将图像矩阵复制到图像DFT变换矩阵中
cvGetSubRect(A_DFT, &temp, cvRect(A_Mat->cols, 0, A_DFT->cols - A_Mat->cols, A_Mat->rows));
cvZero(&temp); //其余行清零(不是必要的,可设置nonezero_rows参数)
cvDFT(A_DFT, A_DFT, CV_DXT_FORWARD, A_Mat->rows); //图像矩阵DFT变换(设置nonezero_rows参数,忽略0行)
//对核矩阵进行同样变换
cvGetSubRect(dft_kernel, &temp, cvRect(0, 0, kernel1->cols, kernel1->rows));
cvCopy(kernel1, &temp);
cvGetSubRect(dft_kernel, &temp, cvRect(kernel1->cols, 0, dft_kernel->cols - kernel1->cols, kernel1->rows));
cvZero(&temp);
cvDFT(dft_kernel, dft_kernel, CV_DXT_FORWARD, kernel1->rows);
cvMulSpectrums(A_DFT, dft_kernel, A_DFT, 0); //频谱乘法,实现卷积
cvDFT(A_DFT, A_InvDFT, CV_DXT_INVERSE_SCALE, A_InvDFT->rows); //DFT逆变换
DFT_endTime = clock(); //结束计时
cout << "傅里叶变换Totle Time : " << (double)(DFT_endTime - DFT_startTime) / CLOCKS_PER_SEC << "s" << endl;
cvNamedWindow("Input", 1);
cvNamedWindow("Filter_Mat", 1);
cvNamedWindow("I_DFT", 1);
cvNamedWindow("I_InvDFT", 1);
cvShowImage("Input", A_Mat);
cvShowImage("Filter_Mat", Filter_Mat);
cvShowImage("I_DFT", A_DFT);
cvShowImage("I_InvDFT", A_InvDFT);
cvWaitKey(0);
cvReleaseMat(&A_Mat);
cvReleaseMat(&Filter_Mat);
cvReleaseMat(&kernel1);
cvReleaseMat(&A_DFT);
cvReleaseMat(&dft_kernel);
cvReleaseMat(&A_InvDFT);
cvDestroyWindow("Input");
cvDestroyWindow("Filter_Mat");
cvDestroyWindow("I_DFT");
cvDestroyWindow("I_InvDFT");
return 0;
}
运行结果如下图:
图1 5*5的滤波器时所得结果:
图1 15*15的滤波器时所得结果:
本例完成的工作如下:
1. 载入一幅图像,转换成灰度图,然后得到它的积分图(第六章 练习14);
2. 利用积分图的性质找到图像的横向和纵向边缘(利用细长的矩形,在适当位置减去和加上它们(第六章 练习14);
具体代码如下:
#include
#include
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
IplImage* src1, *Iintegral;
if (!(src1 = cvLoadImage("D:\\Template\\OpenCV\\Template44_Integration\\\Debug\\3.jpg", CV_LOAD_IMAGE_GRAYSCALE)))
return -1;
//src3 = cvCloneImage(src1); //克隆图像
//src3->origin = src1->origin; //设置相同原点
//cvZero(src3);
//cvRectangle(src1, cvPoint(100, 100), cvPoint(200, 200), CV_RGB(255, 255, 255));
//cvSaveImage("D:\\Template\\OpenCV\\Template42_Polar\\Debug\\out.jpg", src1);
//cvRectangle(src3, cvPoint(125, 125), cvPoint(175, 175), CV_RGB(255, 255, 255));
//cvCircle(src2, cvPoint(150, 150), 50, CV_RGB(255, 255, 255));
Iintegral = cvCreateImage(cvSize(src1->width+1,src1->height+1),
IPL_DEPTH_32F, src1->nChannels);
cvIntegral(src1, Iintegral);
cvNamedWindow("src1", 1);
cvNamedWindow("Iintegral", 1);
cvShowImage("src1", src1);
cvShowImage("Iintegral", Iintegral);
cvWaitKey();
cvReleaseImage(&src1);
cvReleaseImage(&Iintegral);
cvDestroyWindow("src1");
cvDestroyWindow("Iintegral");
}
尚待解决的问题3——本例中工作2未能实现,未能理解使用积分图像何种性质来加减细长矩形,进行长、短边缘检测
本例完成的工作如下:
1. 载入一幅图像,分割为三个通道,分别对三个通道进行直方图均衡化操作,之后合并三通道显示结果(第六章 练习16);
2. 直方图均衡化横向拉伸了图像的色域,颜色更为鲜亮;
具体代码如下:
#include
#include
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
IplImage* src1;
if (!(src1 = cvLoadImage("D:\\Template\\OpenCV\\Template44_EqualizeHist\\\Debug\\3.jpg")))
return -1;
IplImage *mag_R = cvCreateImage(cvSize(src1->width, src1->height), IPL_DEPTH_8U, 1);
IplImage *mag_G = cvCreateImage(cvSize(src1->width, src1->height), IPL_DEPTH_8U, 1);
IplImage *mag_B = cvCreateImage(cvSize(src1->width, src1->height), IPL_DEPTH_8U, 1);
IplImage *IequalizeHist = cvCloneImage(src1);
cvSplit(src1, mag_B, mag_G, mag_R, NULL);
cvEqualizeHist(mag_B, mag_B);
cvEqualizeHist(mag_R, mag_R);
cvEqualizeHist(mag_G, mag_G);
cvMerge(mag_B, mag_G, mag_R, NULL, IequalizeHist);
cvNamedWindow("src1", 1);
cvNamedWindow("IequalizeHist", 1);
cvShowImage("src1", src1);
cvShowImage("IequalizeHist", IequalizeHist);
cvWaitKey();
cvReleaseImage(&src1);
cvReleaseImage(&IequalizeHist);
cvReleaseImage(&mag_B);
cvReleaseImage(&mag_R);
cvReleaseImage(&mag_G);
cvDestroyWindow("src1");
cvDestroyWindow("IequalizeHist");
}