opencv学习历程3

傅里叶变换例子

#include
#include
#include
#include

#include

using namespace std;
using namespace cv;

void fourierTransform(Mat img, Mat& result);
int main(int argc, char *argv[])
{
    //注意这里也要以灰度图的形式读取下来!!!
    Mat img = imread(argv[1], IMREAD_GRAYSCALE);
    Mat result;
    if(img.empty())
    {
        cout<<"can't open the image"<<endl;
        return -1;
    }
    fourierTransform(img, result);
    imshow("input image", img);
    imshow("spectrum magnitude", result);
    waitKey(0);
    return 0;
}
void fourierTransform(Mat img, Mat& result)
{
//DFT最优的图像大小是 2 3 5的倍数,因而首先扩展图像的边缘
    int m = getOptimalDFTSize(img.rows);
    int n = getOptimalDFTSize(img.cols);
    Mat padded;
    copyMakeBorder(img, padded, 0, m-img.rows, 0, n-img.cols, BORDER_CONSTANT, Scalar::all(0) );
    //频域中变化更加精确,因而采用32位浮点数来存储数值; CV32F代表32位浮点数
    //傅里叶变化包含幅度谱和相位谱
    Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
    Mat complexI;
    //将planes数组合并成一个二通道的图像,分辨代表实数和虚数部分
    merge(planes, 2, complexI);
    //DFT变换
    dft(complexI, complexI);
    //计算幅度谱
    split(complexI, planes);
    magnitude(planes[0], planes[1], result);
    //由于傅里叶变换后值太大,所以一般采用对数形式表示
    result = result + Scalar::all(1);
    log(result, result);

    //由于一开始进行了扩充,因而在这要进行裁剪
    //-2是1111 1110 因而&-2相当于取最接近该数的偶数
    //Rect(int _x,int _y,int _width,int _height);
    result = result(Rect(0,0, result.cols&-2, result.rows&-2));
    int cx = result.cols/2;
    int cy = result.rows/2;
    //将低频成分移动至图像中间
    Mat q0(result, Rect(0, 0, cx, cy));
    Mat q1(result, Rect(cx, 0, cx, cy));
    Mat q2(result, Rect(0, cy, cx, cy));
    Mat q3(result, Rect(cx, cy, cx, cy));

    //变换左上角和右下角象限
    Mat tmp;
    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);

    //变换右上角和左下角象限
    q1.copyTo(tmp);
    q2.copyTo(q1);
    tmp.copyTo(q2);

    //此时幅度谱的范围仍然超出了0到1 因而要进行归一化
    normalize(result, result, 0, 1, CV_MINMAX);

}

图像的矩moments

图像的矩是描述图像的重要特征之一,oepncv计算的包括空间矩, 中心矩, 和中心归一化矩阵;部分原理可以参考此, 在此仅给出代码用到的部分:

  1. 对于01二值化的图像, m 00 m00 m00即为轮廓的面积;
  2. 质心计算公式
    x = m 10 m 00 , y = m 01 m 00 x = \frac{m10}{m00}, y=\frac{m01}{m00} x=m00m10,y=m00m01
void thresh_callback(int, void*)
{
    Mat edge_img;
    Canny(img_gray, edge_img, thresh, 2*thresh, 3);

    //提取轮廓
    vector<vector<Point> > contours;
    findContours(edge_img, contours, RETR_TREE, CHAIN_APPROX_SIMPLE);

    //存储图像的3阶特征钜
    vector<Moments> mu(contours.size());
    for(size_t i=0; i<contours.size(); i++)
    {
        mu[i] = moments(contours[i]);
    }

    //计算质心的位置 (m10/m00, m01/m00)
    vector<Point2f> mc(contours.size());
    for(size_t i=0; i<contours.size() ;i++)
    {
        mc[i] = Point2f(static_cast<float>(mu[i].m10/(mu[i].m00+1e-5)),
                            static_cast<float>(mu[i].m01/(mu[i].m00+1e-5)));
        cout<<"mc["<<i<<"]"<<mc[i]<<endl;
    }
        Mat drawing = Mat::zeros( edge_img.size(), CV_8UC3 );


    for( size_t i = 0; i< contours.size(); i++ )
    {
        Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
        drawContours( drawing, contours, (int)i, color, 2 );
        circle( drawing, mc[i], 4, color, -1 );
    }

    imshow( "Contours", drawing );
    cout << "\t Info: Area and Contour Length \n";

    // 对于二值化得图像 m00即为轮廓的面积
    for( size_t i = 0; i < contours.size(); i++ )
    {
        cout << " * Contour[" << i << "] - Area (M_00) = " << std::fixed << std::setprecision(2) << mu[i].m00
             << " - Area OpenCV: " << contourArea(contours[i]) << " - Length: " << arcLength( contours[i], true ) << endl;
    }
}

Point Polygon Test

//检测一个点是否在多边形内部
double cv::pointPolygonTest ( InputArray  contour,
		Point2f  	pt,
		bool  	measureDist 
	) 	
// 当measureDist=false, 点位于轮廓内部,返回+1, 位于轮廓外部返回-1, 位于轮廓上返回0
// 当measureDist=ture, 返回带符号的距离

基于距离变换和分水岭算法的图像分割 (Image Segmentation with Distance Transform and Watershed Algorithm)

简单的来说,分水岭算法就是寻找图像的局部最小点开始注水, 直至到达图像的局部最大点,即是所说的
分水岭,如此便可讲图像进行分割,但是这样方法存在的局限就是,由于存在噪点,会导致部分局部最小值和最大是无意义的,因此opencv中提供的是带有market的方法,在注水后未被标记的点会被淹没,从而减少了部分噪点带了的影响。
参考链接1
参考链接2
具体代码解释:

#include 
#include 
#include 
#include 
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
    // 即在图像
    CommandLineParser parser( argc, argv, "{@input | ../data/cards.png | input image}" );
    Mat src = imread( parser.get<String>( "@input" ) );
    if( src.empty() )
    {
        cout << "Could not open or find the image!\n" << endl;
        cout << "Usage: " << argv[0] << " " << endl;
        return -1;
    }
    imshow("Source Image", src);
    
   //将白色的背景转化位黑色,从而使得在距离变换中更容易区分前景
    for ( int i = 0; i < src.rows; i++ ) {
        for ( int j = 0; j < src.cols; j++ ) {
            if ( src.at<Vec3b>(i, j) == Vec3b(255,255,255) )
            {
                src.at<Vec3b>(i, j)[0] = 0;
                src.at<Vec3b>(i, j)[1] = 0;
                src.at<Vec3b>(i, j)[2] = 0;
            }
        }
    }
    // 展示背景变换后图像
    imshow("Black Background Image", src);
    
    // 使用laplace变换锐化边缘, laplace变换中可能存在负值,因而要采用比CV_8U更深的数据类型
    Mat kernel = (Mat_<float>(3,3) <<
                  1,  1, 1,
                  1, -8, 1,
                  1,  1, 1); // an approximation of second derivative, a quite strong kernel
    Mat imgLaplacian;
    filter2D(src, imgLaplacian, CV_32F, kernel);
    Mat sharp;
    src.convertTo(sharp, CV_32F);
    Mat imgResult = sharp - imgLaplacian;

    // 将原图从CV_32F在转化位CV_8U类型
    imgResult.convertTo(imgResult, CV_8UC3);
    imgLaplacian.convertTo(imgLaplacian, CV_8UC3);
    imshow( "New Sharped Image", imgResult );
    
    // 创建二值图像,从而应用距离变换算法
    Mat bw;
    cvtColor(imgResult, bw, COLOR_BGR2GRAY);
    threshold(bw, bw, 40, 255, THRESH_BINARY | THRESH_OTSU);
    imshow("Binary Image", bw);

   
    // 应用距离变换函数并正则化结果
    Mat dist;
    distanceTransform(bw, dist, DIST_L2, 3);
    normalize(dist, dist, 0, 1.0, NORM_MINMAX);
    imshow("Distance Transform Image", dist);
    
  // 阈值距离变换的结果,距离变换即寻找非零值与零值的最近距离, 得到标记点,同时应用腐蚀操作,将标记点都分开
    threshold(dist, dist, 0.4, 1.0, THRESH_BINARY);
    Mat kernel1 = Mat::ones(3, 3, CV_8U);
    dilate(dist, dist, kernel1);
    imshow("Peaks", dist);

// 将dist的数据深度转换位CV_8U从而寻找轮廓,得到markets/seeds
    Mat dist_8u;
    dist.convertTo(dist_8u, CV_8U);
    vector<vector<Point> > contours;
    findContours(dist_8u, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
    Mat markers = Mat::zeros(dist.size(), CV_32S);
    // Draw the foreground markers
    for (size_t i = 0; i < contours.size(); i++)
    {
        drawContours(markers, contours, static_cast<int>(i), Scalar(static_cast<int>(i)+1), -1);
    }
    // Draw the background marker
    circle(markers, Point(5,5), 3, Scalar(255), -1);
    imshow("Markers", markers*10000);
// 应用分水岭算法,经过watershed得到的markers中不同区域间的值即分水岭为-1,未被标记的位0,其余的保持不变,为1,2,...contours.size()
    watershed(imgResult, markers);
    Mat mark;
    markers.convertTo(mark, CV_8U);

// 给结果图上色
    bitwise_not(mark, mark);
    //    imshow("Markers_v2", mark); // uncomment this if you want to see how the mark

    // Generate random colors
    vector<Vec3b> colors;
    for (size_t i = 0; i < contours.size(); i++)
    {
        int b = theRNG().uniform(0, 256);
        int g = theRNG().uniform(0, 256);
        int r = theRNG().uniform(0, 256);
        colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
    }
    // Create the result image
    Mat dst = Mat::zeros(markers.size(), CV_8UC3);
    // Fill labeled objects with random colors
    for (int i = 0; i < markers.rows; i++)
    {
        for (int j = 0; j < markers.cols; j++)
        {
            int index = markers.at<int>(i,j);
            // index=-1代表的分水岭部分, index=0代表的未标记部分
            if (index > 0 && index <= static_cast<int>(contours.size()))
            {
                dst.at<Vec3b>(i,j) = colors[index-1];
            }
        }[添加链接描述](https://docs.opencv.org/master/d1/dfd/tutorial_motion_deblur_filter.html)
    }
    // Visualize the final image
    imshow("Final Result", dst);
    waitKey();
    return 0;
}

Out-of-focus Deblur Filter

官网给的例子是采用Wiener Filter进行频域滤波,这里只记录下频率滤波的函数, 代码含义和上面傅里叶变化例子类似, 除此之外他还给出了motiom image deblur的例子, motion image deblur:

void filter2DFreq(const Mat& inputImg, Mat& outputImg, const Mat& H)
{
    Mat planes[2] = { Mat_<float>(inputImg.clone()), Mat::zeros(inputImg.size(), CV_32F) };
    Mat complexI;
    merge(planes, 2, complexI);
    dft(complexI, complexI, DFT_SCALE);
    Mat planesH[2] = { Mat_<float>(H.clone()), Mat::zeros(H.size(), CV_32F) };
    Mat complexH;
    merge(planesH, 2, complexH);
    Mat complexIH;
    mulSpectrums(complexI, complexH, complexIH, 0);
    idft(complexIH, complexIH);
    split(complexIH, planes);
    outputImg = planes[0];
}

你可能感兴趣的:(opencv)