cv::mat与numpy相互转换

下面的程序允许使用C++版OpenCV的mat数据与Python的numpy数组无缝连接

  • 主文件
/**
 * example.cpp
 */
 
#include 
#include 
#include 
#include 
#include 
#include 
#include "ndarray_converter.h"

namespace py = pybind11;

void show_image(cv::Mat image)
{
    cv::imshow("image_from_Cpp", image);
    cv::waitKey(0);
}

cv::Mat read_image(std::string image_name)
{
    cv::Mat image = cv::imread(image_name, 1);
    return image;
}

cv::Mat passthru(cv::Mat image)
{
    return image;
}

cv::Mat cloneimg(cv::Mat image)
{
    return image.clone();
}

cv::Mat gaussian_blur_demo(cv::Mat& image) {
    cv::Mat dst;
    cv::GaussianBlur(image, dst, cv::Size(7, 7),1.5,1.5);
    return dst;
}
 
cv::Mat image_filter(cv::Mat& image, cv::Mat& kernel){
    cv::Mat dst;
    cv::filter2D(image, dst, -1, kernel);
    return dst;
}

PYBIND11_MODULE(example,m) 
{
    NDArrayConverter::init_numpy();
    
    m.def("read_image", &read_image, "A function that read an image", 
        py::arg("image"));
    m.def("show_image", &show_image, "A function that show an image", 
        py::arg("image"));
    m.def("passthru", &passthru, "Passthru function", py::arg("image"));
    m.def("clone", &cloneimg, "Clone function", py::arg("image"));
    m.def("gaussian_blur_demo", &gaussian_blur_demo);
    m.def("image_filter", &image_filter);
}

  • 源文件
// borrowed in spirit from https://github.com/yati-sagade/opencv-ndarray-conversion
// MIT License
 
/**
 * ndarray_converter.cpp
 */
 
#include "ndarray_converter.h"
 
#define NPY_NO_DEPRECATED_API NPY_1_15_API_VERSION
#include 
 
#if PY_VERSION_HEX >= 0x03000000
    #define PyInt_Check PyLong_Check
    #define PyInt_AsLong PyLong_AsLong
#endif
 
struct Tmp {
    const char * name;
 
    Tmp(const char * name ) : name(name) {}
};
 
Tmp info("return value");
 
bool NDArrayConverter::init_numpy() {
    // this has to be in this file, since PyArray_API is defined as static
    import_array1(false);
    return true;
}
 
/*
 * The following conversion functions are taken/adapted from OpenCV's cv2.cpp file
 * inside modules/python/src2 folder (OpenCV 3.1.0)
 */
 
static PyObject* opencv_error = 0;
 
static int failmsg(const char *fmt, ...)
{
    char str[1000];
 
    va_list ap;
    va_start(ap, fmt);
    vsnprintf(str, sizeof(str), fmt, ap);
    va_end(ap);
 
    PyErr_SetString(PyExc_TypeError, str);
    return 0;
}
 
class PyAllowThreads
{
public:
    PyAllowThreads() : _state(PyEval_SaveThread()) {}
    ~PyAllowThreads()
    {
        PyEval_RestoreThread(_state);
    }
private:
    PyThreadState* _state;
};
 
class PyEnsureGIL
{
public:
    PyEnsureGIL() : _state(PyGILState_Ensure()) {}
    ~PyEnsureGIL()
    {
        PyGILState_Release(_state);
    }
private:
    PyGILState_STATE _state;
};
 
#define ERRWRAP2(expr) \
try \
{ \
    PyAllowThreads allowThreads; \
    expr; \
} \
catch (const cv::Exception &e) \
{ \
    PyErr_SetString(opencv_error, e.what()); \
    return 0; \
}
 
using namespace cv;
 
class NumpyAllocator : public MatAllocator
{
public:
    NumpyAllocator() { stdAllocator = Mat::getStdAllocator(); }
    ~NumpyAllocator() {}
 
    UMatData* allocate(PyObject* o, int dims, const int* sizes, int type, size_t* step) const
    {
        UMatData* u = new UMatData(this);
        u->data = u->origdata = (uchar*)PyArray_DATA((PyArrayObject*) o);
        npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
        for( int i = 0; i < dims - 1; i++ )
            step[i] = (size_t)_strides[i];
        step[dims-1] = CV_ELEM_SIZE(type);
        u->size = sizes[0]*step[0];
        u->userdata = o;
        return u;
    }
 
    UMatData* allocate(int dims0, const int* sizes, int type, void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const
    {
        if( data != 0 )
        {
            CV_Error(Error::StsAssert, "The data should normally be NULL!");
            // probably this is safe to do in such extreme case
            return stdAllocator->allocate(dims0, sizes, type, data, step, flags, usageFlags);
        }
        PyEnsureGIL gil;
 
        int depth = CV_MAT_DEPTH(type);
        int cn = CV_MAT_CN(type);
        const int f = (int)(sizeof(size_t)/8);
        int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
        depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
        depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
        depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
        int i, dims = dims0;
        cv::AutoBuffer<npy_intp> _sizes(dims + 1);
        for( i = 0; i < dims; i++ )
            _sizes[i] = sizes[i];
        if( cn > 1 )
            _sizes[dims++] = cn;
        PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
        if(!o)
            CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
        return allocate(o, dims0, sizes, type, step);
    }
 
    bool allocate(UMatData* u, int accessFlags, UMatUsageFlags usageFlags) const
    {
        return stdAllocator->allocate(u, accessFlags, usageFlags);
    }
 
    void deallocate(UMatData* u) const
    {
        if(!u)
            return;
        PyEnsureGIL gil;
        CV_Assert(u->urefcount >= 0);
        CV_Assert(u->refcount >= 0);
        if(u->refcount == 0)
        {
            PyObject* o = (PyObject*)u->userdata;
            Py_XDECREF(o);
            delete u;
        }
    }
 
    const MatAllocator* stdAllocator;
};
 
NumpyAllocator g_numpyAllocator;
 
bool NDArrayConverter::toMat(PyObject *o, Mat &m)
{
    bool allowND = true;
    if(!o || o == Py_None)
    {
        if( !m.data )
            m.allocator = &g_numpyAllocator;
        return true;
    }
 
    if( PyInt_Check(o) )
    {
        double v[] = {static_cast<double>(PyInt_AsLong((PyObject*)o)), 0., 0., 0.};
        m = Mat(4, 1, CV_64F, v).clone();
        return true;
    }
    if( PyFloat_Check(o) )
    {
        double v[] = {PyFloat_AsDouble((PyObject*)o), 0., 0., 0.};
        m = Mat(4, 1, CV_64F, v).clone();
        return true;
    }
    if( PyTuple_Check(o) )
    {
        int i, sz = (int)PyTuple_Size((PyObject*)o);
        m = Mat(sz, 1, CV_64F);
        for( i = 0; i < sz; i++ )
        {
            PyObject* oi = PyTuple_GET_ITEM(o, i);
            if( PyInt_Check(oi) )
                m.at<double>(i) = (double)PyInt_AsLong(oi);
            else if( PyFloat_Check(oi) )
                m.at<double>(i) = (double)PyFloat_AsDouble(oi);
            else
            {
                failmsg("%s is not a numerical tuple", info.name);
                m.release();
                return false;
            }
        }
        return true;
    }
 
    if( !PyArray_Check(o) )
    {
        failmsg("%s is not a numpy array, neither a scalar", info.name);
        return false;
    }
 
    PyArrayObject* oarr = (PyArrayObject*) o;
 
    bool needcopy = false, needcast = false;
    int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
    int type = typenum == NPY_UBYTE ? CV_8U :
               typenum == NPY_BYTE ? CV_8S :
               typenum == NPY_USHORT ? CV_16U :
               typenum == NPY_SHORT ? CV_16S :
               typenum == NPY_INT ? CV_32S :
               typenum == NPY_INT32 ? CV_32S :
               typenum == NPY_FLOAT ? CV_32F :
               typenum == NPY_DOUBLE ? CV_64F : -1;
 
    if( type < 0 )
    {
        if( typenum == NPY_INT64 || typenum == NPY_UINT64 || typenum == NPY_LONG )
        {
            needcopy = needcast = true;
            new_typenum = NPY_INT;
            type = CV_32S;
        }
        else
        {
            failmsg("%s data type = %d is not supported", info.name, typenum);
            return false;
        }
    }
 
#ifndef CV_MAX_DIM
    const int CV_MAX_DIM = 32;
#endif
 
    int ndims = PyArray_NDIM(oarr);
    if(ndims >= CV_MAX_DIM)
    {
        failmsg("%s dimensionality (=%d) is too high", info.name, ndims);
        return false;
    }
 
    int size[CV_MAX_DIM+1];
    size_t step[CV_MAX_DIM+1];
    size_t elemsize = CV_ELEM_SIZE1(type);
    const npy_intp* _sizes = PyArray_DIMS(oarr);
    const npy_intp* _strides = PyArray_STRIDES(oarr);
    bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
 
    for( int i = ndims-1; i >= 0 && !needcopy; i-- )
    {
        // these checks handle cases of
        //  a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
        //  b) transposed arrays, where _strides[] elements go in non-descending order
        //  c) flipped arrays, where some of _strides[] elements are negative
        // the _sizes[i] > 1 is needed to avoid spurious copies when NPY_RELAXED_STRIDES is set
        if( (i == ndims-1 && _sizes[i] > 1 && (size_t)_strides[i] != elemsize) ||
            (i < ndims-1 && _sizes[i] > 1 && _strides[i] < _strides[i+1]) )
            needcopy = true;
    }
 
    if( ismultichannel && _strides[1] != (npy_intp)elemsize*_sizes[2] )
        needcopy = true;
 
    if (needcopy)
    {
        //if (info.outputarg)
        //{
        //    failmsg("Layout of the output array %s is incompatible with cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", info.name);
        //    return false;
        //}
 
        if( needcast ) {
            o = PyArray_Cast(oarr, new_typenum);
            oarr = (PyArrayObject*) o;
        }
        else {
            oarr = PyArray_GETCONTIGUOUS(oarr);
            o = (PyObject*) oarr;
        }
 
        _strides = PyArray_STRIDES(oarr);
    }
 
    // Normalize strides in case NPY_RELAXED_STRIDES is set
    size_t default_step = elemsize;
    for ( int i = ndims - 1; i >= 0; --i )
    {
        size[i] = (int)_sizes[i];
        if ( size[i] > 1 )
        {
            step[i] = (size_t)_strides[i];
            default_step = step[i] * size[i];
        }
        else
        {
            step[i] = default_step;
            default_step *= size[i];
        }
    }
 
    // handle degenerate case
    if( ndims == 0) {
        size[ndims] = 1;
        step[ndims] = elemsize;
        ndims++;
    }
 
    if( ismultichannel )
    {
        ndims--;
        type |= CV_MAKETYPE(0, size[2]);
    }
 
    if( ndims > 2 && !allowND )
    {
        failmsg("%s has more than 2 dimensions", info.name);
        return false;
    }
 
    m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
    m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
    m.addref();
 
    if( !needcopy )
    {
        Py_INCREF(o);
    }
    m.allocator = &g_numpyAllocator;
 
    return true;
}
 
PyObject* NDArrayConverter::toNDArray(const cv::Mat& m)
{
    if( !m.data )
        Py_RETURN_NONE;
    Mat temp, *p = (Mat*)&m;
    if(!p->u || p->allocator != &g_numpyAllocator)
    {
        temp.allocator = &g_numpyAllocator;
        ERRWRAP2(m.copyTo(temp));
        p = &temp;
    }
    PyObject* o = (PyObject*)p->u->userdata;
    Py_INCREF(o);
    return o;
}
  • 头文件
/**
 * ndarray_convert.h
 */
 
# ifndef __NDARRAY_CONVERTER_H__
# define __NDARRAY_CONVERTER_H__
 
#include 
#include 
 
 
class NDArrayConverter {
public:
    // must call this first, or the other routines don't work!
    static bool init_numpy();
     
    static bool toMat(PyObject* o, cv::Mat &m);
    static PyObject* toNDArray(const cv::Mat& mat);
};
 
//
// Define the type converter
//
 
#include 
 
namespace pybind11 { namespace detail {
     
template <> struct type_caster<cv::Mat> {
public:
     
    PYBIND11_TYPE_CASTER(cv::Mat, _("numpy.ndarray"));
     
    bool load(handle src, bool) {
        return NDArrayConverter::toMat(src.ptr(), value);
    }
     
    static handle cast(const cv::Mat &m, return_value_policy, handle defval) {
        return handle(NDArrayConverter::toNDArray(m));
    }
};
     
     
}} // namespace pybind11::detail
 
# endif
  • CMakeLists.txt
cmake_minimum_required(VERSION 3.14)
 
project(cv_mat2)
 
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
 
find_package(OpenCV REQUIRED) 
find_package(pybind11 REQUIRED)
set(PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS} /anaconda3/lib/python3.7/site-packages/numpy/core/include)
 
include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${pybind11_INCLUDE_DIR})
include_directories(${PYTHON_INCLUDE_DIRS})
 
SET(SOURCES
  ${CMAKE_CURRENT_SOURCE_DIR}/example.cpp
  ${CMAKE_CURRENT_SOURCE_DIR}/ndarray_converter.cpp
)
 
pybind11_add_module(example ${SOURCES})
target_link_libraries(example PRIVATE ${OpenCV_LIBS})

在MAC上编译会生成如下的动态库:

example.cpython-37m-darwin.so

测试如下:

$ipython
Python 3.7.3 (default, Mar 27 2019, 16:54:48)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.8.0 -- An enhanced Interactive Python. Type '?' for help.

In [1]: import example

In [2]: dir(example)
Out[2]:
['__doc__',
 '__file__',
 '__loader__',
 '__name__',
 '__package__',
 '__spec__',
 'clone',
 'gaussian_blur_demo',
 'image_filter',
 'passthru',
 'read_image',
 'show_image']

In [3]: img = example.read_image('../lady.png')

In [4]: img.shape
Out[4]: (800, 600, 3)

In [5]: type(img)
Out[5]: numpy.ndarray

In [6]: img.dtype
Out[6]: dtype('uint8')

TroubleShooting

  1. error: no matching member function for call to 'allocate'
    使用OpenCV 4.1遇到上述的错误。根据这个答案解决了问题,应该是数据格式更新的锅。
diff --git a/src/core/python/utility/pyboost_cv_mat_converter.cpp b/src/core/python/utility/pyboost_cv_mat_converter.cpp
index 8eab46e..dc034ab 100644
--- a/src/core/python/utility/pyboost_cv_mat_converter.cpp
+++ b/src/core/python/utility/pyboost_cv_mat_converter.cpp
@@ -84,7 +84,7 @@ public:
     }
  
     cv::UMatData* allocate(int dims0, const int* sizes, int type, void* data, size_t* step,
 1.                           int flags, cv::UMatUsageFlags usageFlags) const
 2.                           cv::AccessFlag flags, cv::UMatUsageFlags usageFlags) const
     {
         if (data != 0) {
             CV_Error(cv::Error::StsAssert, "The data should normally be NULL!");
@@ -117,7 +117,7 @@ public:
         return allocate(o, dims0, sizes, type, step);
     }
  
 3.    bool allocate(cv::UMatData* u, int accessFlags, cv::UMatUsageFlags usageFlags) const
 4.    bool allocate(cv::UMatData* u, cv::AccessFlag accessFlags, cv::UMatUsageFlags usageFlags) const
     {
         return stdAllocator->allocate(u, accessFlags, usageFlags);
     }
  1. fatal error: 'numpy/ndarrayobject.h' file not found
    原因是CMakeLists.txt中未加入numpy的头文件。numpy头文件目录可通过以下方式确定
$ipython
In [1]: import numpy
In [2]: numpy.__path__
Out[2]: ['/anaconda3/lib/python3.7/site-packages/numpy']

那么需求另外包含的头文件目录为

include_directories(${PYTHON_INCLUDE_DIRS} /anaconda3/lib/python3.7/site-packages/numpy/core/include)

你可能感兴趣的:(AI,C++,Python)