图象滤波器 Coherence-enhance shock filter 实现

一:源码

https://github.com/Itseez/opencv/blob/master/samples/python2/coherence.py

def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print i,

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print 'done'
    return img


二:

opencv  mat.reshape c是行优先, matlab 列优先

三:

http://www.rosoo.net/a/201004/9157.html#CornerEigenValsAndVecs



#!/usr/bin/env python

'''
Texture flow direction estimation.

Sample shows how cv2.cornerEigenValsAndVecs function can be used
to estimate image texture flow direction.

Usage:
    texture_flow.py [<image>]
'''

import numpy as np
import cv2

if __name__ == '__main__':
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = 'data/starry_night.jpg'

    img = cv2.imread(fn)
    if img is None:
        print 'Failed to load image file:', fn
        sys.exit(1)

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    h, w = img.shape[:2]

    eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3)
    eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
    flow = eigen[:,:,2]

    vis = img.copy()
    vis[:] = (192 + np.uint32(vis)) / 2
    d = 12
    points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
    for x, y in points:
       vx, vy = np.int32(flow[y, x]*d)
       cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
    cv2.imshow('input', img)
    cv2.imshow('flow', vis)
    cv2.waitKey()

int main (int argc, char** argv)
{
    cv::TickMeter tm;
    tm.start();
    cv::Mat img = cv::imread(argv[1]);
    cv::Mat gray = cv::Mat();
    cv::cvtColor(img, gray, CV_BGR2GRAY);
    // to preserve the original image
    cv::Mat flow = gray.clone();
    int width = img.cols;
    int height = img.rows;
    int graySize = width * height;
    // "brighten" the flow image 
    // C++ version of:
    // vis[:] = (192 + np.uint32(vis)) / 2
    for (unsigned int i=0; i<graySize; ++i)
    {
         flow.data[i] = (uchar)((192 + (int)flow.data[i]) / 2);
    }
    cv::Mat eigen = cv::Mat(height, width, CV_32FC(6));
    cv::cornerEigenValsAndVecs(gray, eigen, 15, 3);
    // this is the equivalent to all the numpy's reshaping etc. to 
    // generate the flow arrays
    // simply use channel 4 and 5 as the actual flow array in C++
    std::vector<cv::Mat> channels;
    cv::split(eigen, channels);

    int d = 12;
    cv::Scalar col(0, 0, 0);
    // C++ version of:
    // points =  np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
    // including the actual line drawing part
    for (unsigned int y=(d/2); y<flow.rows; y+=d)
    {
         for (unsigned int x=(d/2); x<flow.cols; x+=d)
         {
             if (x < flow.cols && y < flow.rows)
             {
                 cv::Point p(x, y);
                 float dx = channels[4].at<float>(p) * (d/2);
                 float dy = channels[5].at<float>(p) * (d/2);
                 cv::Point p0(p.x - dx, p.y - dy);
                 cv::Point p1(p.x + dx, p.y + dy);
                 cv::line(flow, p0, p1, col, 1);
              }
         }
    }
    tm.stop();
    std::cout<<"Flow image generated in "<<tm.getTimeMilli()<<" ms."<<std::endl;
    cv::imshow("FLOW", flow);
    cv::waitKey();
    return 0;
}


你可能感兴趣的:(ios,opencv,shock,fil)