Opencv源码解析(2)算法

 前些天发现了一个巨牛的人工智能学习网站,通俗易懂,风趣幽默,忍不住分享一下给大家:点击跳转

目录

一,直方图均衡

1,直方图统计

2,灰度变换

3,直方图均衡

二,可分离滤波器

1,可分离滤波器的工厂

2,ocvSepFilter、sepFilter2D

3,Sobel

三,相位相关法 phaseCorrelate

1,phaseCorrelate

2,汉宁窗

四,匹配器

1,纯虚类DescriptorMatcher

2,子类FlannBasedMatcher

3,knnMatch算法


一,直方图均衡

opencv-4.2.0\modules\imgproc\src\histogram.cpp 中的代码:

1,直方图统计

class EqualizeHistCalcHist_Invoker : public cv::ParallelLoopBody
{
public:
    enum {HIST_SZ = 256};

    EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, cv::Mutex* histogramLock)
        : src_(src), globalHistogram_(histogram), histogramLock_(histogramLock)
    { }

    void operator()( const cv::Range& rowRange ) const CV_OVERRIDE
    {
        int localHistogram[HIST_SZ] = {0, };

        const size_t sstep = src_.step;

        int width = src_.cols;
        int height = rowRange.end - rowRange.start;

        if (src_.isContinuous())
        {
            width *= height;
            height = 1;
        }

        for (const uchar* ptr = src_.ptr(rowRange.start); height--; ptr += sstep)
        {
            int x = 0;
            for (; x <= width - 4; x += 4)
            {
                int t0 = ptr[x], t1 = ptr[x+1];
                localHistogram[t0]++; localHistogram[t1]++;
                t0 = ptr[x+2]; t1 = ptr[x+3];
                localHistogram[t0]++; localHistogram[t1]++;
            }

            for (; x < width; ++x)
                localHistogram[ptr[x]]++;
        }

        cv::AutoLock lock(*histogramLock_);

        for( int i = 0; i < HIST_SZ; i++ )
            globalHistogram_[i] += localHistogram[i];
    }

    static bool isWorthParallel( const cv::Mat& src )
    {
        return ( src.total() >= 640*480 );
    }

private:
    EqualizeHistCalcHist_Invoker& operator=(const EqualizeHistCalcHist_Invoker&);

    cv::Mat& src_;
    int* globalHistogram_;
    cv::Mutex* histogramLock_;
};

类继承了ParallelLoopBody,可以做并行加速。

灰度级HIST_SZ = 256

构造函数保存三个参数。

仿函数是统计直方图。

isWorthParallel函数是判断是否启用并行加速。

2,灰度变换

class EqualizeHistLut_Invoker : public cv::ParallelLoopBody
{
public:
    EqualizeHistLut_Invoker( cv::Mat& src, cv::Mat& dst, int* lut )
        : src_(src),
          dst_(dst),
          lut_(lut)
    { }

    void operator()( const cv::Range& rowRange ) const CV_OVERRIDE
    {
        const size_t sstep = src_.step;
        const size_t dstep = dst_.step;

        int width = src_.cols;
        int height = rowRange.end - rowRange.start;
        int* lut = lut_;

        if (src_.isContinuous() && dst_.isContinuous())
        {
            width *= height;
            height = 1;
        }

        const uchar* sptr = src_.ptr(rowRange.start);
        uchar* dptr = dst_.ptr(rowRange.start);

        for (; height--; sptr += sstep, dptr += dstep)
        {
            int x = 0;
            for (; x <= width - 4; x += 4)
            {
                int v0 = sptr[x];
                int v1 = sptr[x+1];
                int x0 = lut[v0];
                int x1 = lut[v1];
                dptr[x] = (uchar)x0;
                dptr[x+1] = (uchar)x1;

                v0 = sptr[x+2];
                v1 = sptr[x+3];
                x0 = lut[v0];
                x1 = lut[v1];
                dptr[x+2] = (uchar)x0;
                dptr[x+3] = (uchar)x1;
            }

            for (; x < width; ++x)
                dptr[x] = (uchar)lut[sptr[x]];
        }
    }

    static bool isWorthParallel( const cv::Mat& src )
    {
        return ( src.total() >= 640*480 );
    }

private:
    EqualizeHistLut_Invoker& operator=(const EqualizeHistLut_Invoker&);

    cv::Mat& src_;
    cv::Mat& dst_;
    int* lut_;
};

构造函数保存三个参数。

仿函数是根据灰度变换表lut,把原图变成目标图。

3,直方图均衡

void cv::equalizeHist( InputArray _src, OutputArray _dst )
{
    CV_INSTRUMENT_REGION();

    CV_Assert( _src.type() == CV_8UC1 );

    if (_src.empty())
        return;

    CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
               ocl_equalizeHist(_src, _dst))

    Mat src = _src.getMat();
    _dst.create( src.size(), src.type() );
    Mat dst = _dst.getMat();

    CV_OVX_RUN(!ovx::skipSmallImages(src.cols, src.rows),
               openvx_equalize_hist(src, dst))

    Mutex histogramLockInstance;

    const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ;
    int hist[hist_sz] = {0,};
    int lut[hist_sz];

    EqualizeHistCalcHist_Invoker calcBody(src, hist, &histogramLockInstance);
    EqualizeHistLut_Invoker      lutBody(src, dst, lut);
    cv::Range heightRange(0, src.rows);

    if(EqualizeHistCalcHist_Invoker::isWorthParallel(src))
        parallel_for_(heightRange, calcBody);
    else
        calcBody(heightRange);

    int i = 0;
    while (!hist[i]) ++i;

    int total = (int)src.total();
    if (hist[i] == total)
    {
        dst.setTo(i);
        return;
    }

    float scale = (hist_sz - 1.f)/(total - hist[i]);
    int sum = 0;

    for (lut[i++] = 0; i < hist_sz; ++i)
    {
        sum += hist[i];
        lut[i] = saturate_cast(sum * scale);
    }

    if(EqualizeHistLut_Invoker::isWorthParallel(src))
        parallel_for_(heightRange, lutBody);
    else
        lutBody(heightRange);
}

先是直方图统计,然后是对于纯色图片的特殊处理(直方图均衡结果等于原图),再是计算灰度变换表lut,最后把原图变成目标图。

二,可分离滤波器

1,可分离滤波器的工厂

Ptr createSeparableLinearFilter(
        int _srcType, int _dstType,
        InputArray __rowKernel, InputArray __columnKernel,
        Point _anchor, double _delta,
        int _rowBorderType, int _columnBorderType,
        const Scalar& _borderValue)
{
    Mat _rowKernel = __rowKernel.getMat(), _columnKernel = __columnKernel.getMat();
    _srcType = CV_MAT_TYPE(_srcType);
    _dstType = CV_MAT_TYPE(_dstType);
    int sdepth = CV_MAT_DEPTH(_srcType), ddepth = CV_MAT_DEPTH(_dstType);
    int cn = CV_MAT_CN(_srcType);
    CV_Assert( cn == CV_MAT_CN(_dstType) );
    int rsize = _rowKernel.rows + _rowKernel.cols - 1;
    int csize = _columnKernel.rows + _columnKernel.cols - 1;
    if( _anchor.x < 0 )
        _anchor.x = rsize/2;
    if( _anchor.y < 0 )
        _anchor.y = csize/2;
    int rtype = getKernelType(_rowKernel,
        _rowKernel.rows == 1 ? Point(_anchor.x, 0) : Point(0, _anchor.x));
    int ctype = getKernelType(_columnKernel,
        _columnKernel.rows == 1 ? Point(_anchor.y, 0) : Point(0, _anchor.y));
    Mat rowKernel, columnKernel;

    bool isBitExactMode = false;
    int bdepth = std::max(CV_32F,std::max(sdepth, ddepth));
    int bits = 0;

    if( sdepth == CV_8U &&
        ((rtype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL &&
          ctype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL &&
          ddepth == CV_8U) ||
         ((rtype & (KERNEL_SYMMETRICAL+KERNEL_ASYMMETRICAL)) &&
          (ctype & (KERNEL_SYMMETRICAL+KERNEL_ASYMMETRICAL)) &&
          (rtype & ctype & KERNEL_INTEGER) &&
          ddepth == CV_16S)) )
    {
        int bits_ = ddepth == CV_8U ? 8 : 0;
        bool isValidBitExactRowKernel = createBitExactKernel_32S(_rowKernel, rowKernel, bits_);
        bool isValidBitExactColumnKernel = createBitExactKernel_32S(_columnKernel, columnKernel, bits_);
        if (!isValidBitExactRowKernel)
        {
            CV_LOG_DEBUG(NULL, "createSeparableLinearFilter: bit-exact row-kernel can't be applied: ksize=" << _rowKernel.total());
        }
        else if (!isValidBitExactColumnKernel)
        {
            CV_LOG_DEBUG(NULL, "createSeparableLinearFilter: bit-exact column-kernel can't be applied: ksize=" << _columnKernel.total());
        }
        else
        {
            bdepth = CV_32S;
            bits = bits_;
            bits *= 2;
            _delta *= (1 << bits);
            isBitExactMode = true;
        }
    }
    if (!isBitExactMode)
    {
        if( _rowKernel.type() != bdepth )
            _rowKernel.convertTo( rowKernel, bdepth );
        else
            rowKernel = _rowKernel;
        if( _columnKernel.type() != bdepth )
            _columnKernel.convertTo( columnKernel, bdepth );
        else
            columnKernel = _columnKernel;
    }

    int _bufType = CV_MAKETYPE(bdepth, cn);
    Ptr _rowFilter = getLinearRowFilter(
        _srcType, _bufType, rowKernel, _anchor.x, rtype);
    Ptr _columnFilter = getLinearColumnFilter(
        _bufType, _dstType, columnKernel, _anchor.y, ctype, _delta, bits );

    return Ptr( new FilterEngine(Ptr(), _rowFilter, _columnFilter,
        _srcType, _dstType, _bufType, _rowBorderType, _columnBorderType, _borderValue ));
}

前2个参数是输入输出图像的格式,接下来2个参数是核分离出来的行向量和列向量。

函数返回一个FilterEngine对象,其中保存了一些需要的信息。

2,ocvSepFilter、sepFilter2D

static void ocvSepFilter(int stype, int dtype, int ktype,
                         uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step,
                         int width, int height, int full_width, int full_height,
                         int offset_x, int offset_y,
                         uchar * kernelx_data, int kernelx_len,
                         uchar * kernely_data, int kernely_len,
                         int anchor_x, int anchor_y, double delta, int borderType)
{
    Mat kernelX(Size(kernelx_len, 1), ktype, kernelx_data);
    Mat kernelY(Size(kernely_len, 1), ktype, kernely_data);
    Ptr f = createSeparableLinearFilter(stype, dtype, kernelX, kernelY,
                                                      Point(anchor_x, anchor_y),
                                                      delta, borderType & ~BORDER_ISOLATED);
    Mat src(Size(width, height), stype, src_data, src_step);
    Mat dst(Size(width, height), dtype, dst_data, dst_step);
    f->apply(src, dst, Size(full_width, full_height), Point(offset_x, offset_y));
};

先创建FilterEngine对象,然后调用它的apply方法进行滤波。

void sepFilter2D(int stype, int dtype, int ktype,
                 uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step,
                 int width, int height, int full_width, int full_height,
                 int offset_x, int offset_y,
                 uchar * kernelx_data, int kernelx_len,
                 uchar * kernely_data, int kernely_len,
                 int anchor_x, int anchor_y, double delta, int borderType)
{

    bool res = replacementSepFilter(stype, dtype, ktype,
                                    src_data, src_step, dst_data, dst_step,
                                    width, height, full_width, full_height,
                                    offset_x, offset_y,
                                    kernelx_data, kernelx_len,
                                    kernely_data, kernely_len,
                                    anchor_x, anchor_y, delta, borderType);
    if (res)
        return;
    ocvSepFilter(stype, dtype, ktype,
                 src_data, src_step, dst_data, dst_step,
                 width, height, full_width, full_height,
                 offset_x, offset_y,
                 kernelx_data, kernelx_len,
                 kernely_data, kernely_len,
                 anchor_x, anchor_y, delta, borderType);
}

调用ocvSepFilter

3,Sobel

void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
                int ksize, double scale, double delta, int borderType )
{
    CV_INSTRUMENT_REGION();

    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
    if (ddepth < 0)
        ddepth = sdepth;
    int dtype = CV_MAKE_TYPE(ddepth, cn);
    _dst.create( _src.size(), dtype );

    int ktype = std::max(CV_32F, std::max(ddepth, sdepth));

    Mat kx, ky;
    getDerivKernels( kx, ky, dx, dy, ksize, false, ktype );
    if( scale != 1 )
    {
        // usually the smoothing part is the slowest to compute,
        // so try to scale it instead of the faster differentiating part
        if( dx == 0 )
            kx *= scale;
        else
            ky *= scale;
    }

    CV_OCL_RUN(ocl::isOpenCLActivated() && _dst.isUMat() && _src.dims() <= 2 && ksize == 3 &&
               (size_t)_src.rows() > ky.total() && (size_t)_src.cols() > kx.total(),
               ocl_sepFilter3x3_8UC1(_src, _dst, ddepth, kx, ky, delta, borderType));

    CV_OCL_RUN(ocl::isOpenCLActivated() && _dst.isUMat() && _src.dims() <= 2 && (size_t)_src.rows() > kx.total() && (size_t)_src.cols() > kx.total(),
               ocl_sepFilter2D(_src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType))

    Mat src = _src.getMat();
    Mat dst = _dst.getMat();

    Point ofs;
    Size wsz(src.cols, src.rows);
    if(!(borderType & BORDER_ISOLATED))
        src.locateROI( wsz, ofs );

    CALL_HAL(sobel, cv_hal_sobel, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, ddepth, cn,
             ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, dx, dy, ksize, scale, delta, borderType&~BORDER_ISOLATED);

    CV_OVX_RUN(true,
               openvx_sobel(src, dst, dx, dy, ksize, scale, delta, borderType))

    //CV_IPP_RUN_FAST(ipp_Deriv(src, dst, dx, dy, ksize, scale, delta, borderType));

    sepFilter2D(src, dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
}

前三个参数是输入图像、输出图像及深度,接下来2个参数是微分的阶。

三,相位相关法 phaseCorrelate

phaseCorrelate函数是利用相位相关法,给两张图片做频域配准。

1,phaseCorrelate

modules\imgproc\src\phasecorr.cpp

cv::Point2d cv::phaseCorrelate(InputArray _src1, InputArray _src2, InputArray _window, double* response)
{
    CV_INSTRUMENT_REGION();

    Mat src1 = _src1.getMat();
    Mat src2 = _src2.getMat();
    Mat window = _window.getMat();

    CV_Assert( src1.type() == src2.type());
    CV_Assert( src1.type() == CV_32FC1 || src1.type() == CV_64FC1 );
    CV_Assert( src1.size == src2.size);

    if(!window.empty())
    {
        CV_Assert( src1.type() == window.type());
        CV_Assert( src1.size == window.size);
    }

    int M = getOptimalDFTSize(src1.rows);
    int N = getOptimalDFTSize(src1.cols);

    Mat padded1, padded2, paddedWin;

    if(M != src1.rows || N != src1.cols)
    {
        copyMakeBorder(src1, padded1, 0, M - src1.rows, 0, N - src1.cols, BORDER_CONSTANT, Scalar::all(0));
        copyMakeBorder(src2, padded2, 0, M - src2.rows, 0, N - src2.cols, BORDER_CONSTANT, Scalar::all(0));

        if(!window.empty())
        {
            copyMakeBorder(window, paddedWin, 0, M - window.rows, 0, N - window.cols, BORDER_CONSTANT, Scalar::all(0));
        }
    }
    else
    {
        padded1 = src1;
        padded2 = src2;
        paddedWin = window;
    }

    Mat FFT1, FFT2, P, Pm, C;

    // perform window multiplication if available
    if(!paddedWin.empty())
    {
        // apply window to both images before proceeding...
        multiply(paddedWin, padded1, padded1);
        multiply(paddedWin, padded2, padded2);
    }

    // execute phase correlation equation
    // Reference: http://en.wikipedia.org/wiki/Phase_correlation
    dft(padded1, FFT1, DFT_REAL_OUTPUT);
    dft(padded2, FFT2, DFT_REAL_OUTPUT);

    mulSpectrums(FFT1, FFT2, P, 0, true);

    magSpectrums(P, Pm);
    divSpectrums(P, Pm, C, 0, false); // FF* / |FF*| (phase correlation equation completed here...)

    idft(C, C); // gives us the nice peak shift location...

    fftShift(C); // shift the energy to the center of the frame.

    // locate the highest peak
    Point peakLoc;
    minMaxLoc(C, NULL, NULL, NULL, &peakLoc);

    // get the phase shift with sub-pixel accuracy, 5x5 window seems about right here...
    Point2d t;
    t = weightedCentroid(C, peakLoc, Size(5, 5), response);

    // max response is M*N (not exactly, might be slightly larger due to rounding errors)
    if(response)
        *response /= M*N;

    // adjust shift relative to image center...
    Point2d center((double)padded1.cols / 2.0, (double)padded1.rows / 2.0);

    return (center - t);
}

前两个参数是传2张图片,第三个是应用窗函数去除图像的边界效应,文档中推荐使用汉宁窗。

2,汉宁窗

void cv::createHanningWindow(OutputArray _dst, cv::Size winSize, int type)
{
    CV_INSTRUMENT_REGION();

    CV_Assert( type == CV_32FC1 || type == CV_64FC1 );
    CV_Assert( winSize.width > 1 && winSize.height > 1 );

    _dst.create(winSize, type);
    Mat dst = _dst.getMat();

    int rows = dst.rows, cols = dst.cols;

    AutoBuffer _wc(cols);
    double* const wc = _wc.data();

    double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0f * CV_PI / (double)(rows - 1);
    for(int j = 0; j < cols; j++)
        wc[j] = 0.5 * (1.0 - cos(coeff0 * j));

    if(dst.depth() == CV_32F)
    {
        for(int i = 0; i < rows; i++)
        {
            float* dstData = dst.ptr(i);
            double wr = 0.5 * (1.0 - cos(coeff1 * i));
            for(int j = 0; j < cols; j++)
                dstData[j] = (float)(wr * wc[j]);
        }
    }
    else
    {
        for(int i = 0; i < rows; i++)
        {
            double* dstData = dst.ptr(i);
            double wr = 0.5 * (1.0 - cos(coeff1 * i));
            for(int j = 0; j < cols; j++)
                dstData[j] = wr * wc[j];
        }
    }

    // perform batch sqrt for SSE performance gains
    cv::sqrt(dst, dst);
}

四,匹配器

opencv-4.2.0\modules\features2d\src\matchers.cpp中的代码:

1,纯虚类DescriptorMatcher

内含3种匹配算法:

class CV_EXPORTS_W DescriptorMatcher : public Algorithm
{
public:
CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors,
                CV_OUT std::vector& matches, InputArray mask=noArray() ) const;
CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
                   CV_OUT std::vector >& matches, int k,
                   InputArray mask=noArray(), bool compactResult=false ) const;
CV_WRAP void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors,
                      CV_OUT std::vector >& matches, float maxDistance,
                      InputArray mask=noArray(), bool compactResult=false ) const;
CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector& matches,
                        InputArrayOfArrays masks=noArray() );
CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector >& matches, int k,
                           InputArrayOfArrays masks=noArray(), bool compactResult=false );
CV_WRAP void radiusMatch( InputArray queryDescriptors, CV_OUT std::vector >& matches, float maxDistance,
                      InputArrayOfArrays masks=noArray(), bool compactResult=false );
。。。。。。
};

DescriptorMatcher内含纯虚函数clone()

match里面还是调knnMatch,所以实际上是knnMatch和radiusMatch两种算法。

2,子类FlannBasedMatcher

继承DescriptorMatcher

class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher
{
public:
    CV_WRAP FlannBasedMatcher( const Ptr& indexParams=makePtr(),
                       const Ptr& searchParams=makePtr() );
......
};

(1)clone

创建一个实例

(2)算法

算法没有重载,也没有重写,直接是父类的函数。

3,knnMatch算法

void DescriptorMatcher::knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
                                  std::vector >& matches, int knn,
                                  InputArray mask, bool compactResult ) const
{
    CV_INSTRUMENT_REGION();

    Ptr tempMatcher = clone(true);
    tempMatcher->add(trainDescriptors);
    tempMatcher->knnMatch( queryDescriptors, matches, knn, std::vector(1, mask.getMat()), compactResult );
}
void DescriptorMatcher::knnMatch( InputArray queryDescriptors, std::vector >& matches, int knn,
                                  InputArrayOfArrays masks, bool compactResult )
{
    CV_INSTRUMENT_REGION();

    if( empty() || queryDescriptors.empty() )
        return;

    CV_Assert( knn > 0 );

    checkMasks( masks, queryDescriptors.size().height );

    train();
    knnMatchImpl( queryDescriptors, matches, knn, masks, compactResult );
}

核心功能用impl技术存在knnMatchImpl里面了。

你可能感兴趣的:(opencv,算法,人工智能)