ImportError: torch.utils.ffi is deprecated. Please use cpp extensions instead.

错误原因

pytorch 版本不匹配

torch.utils.ffipytorch 0.4版本的模块
在之后的版本中已经停止使用了

如果是 create_extension的话可以抢救一下

from torch.utils.ffi import create_extension
修改成
from torch.utils.cpp_extension import BuildExtension

再将下面的调用
xxx = create_extension(...)
改成
xxx = BuildExtension(...)

如果是 _wrap_function的话,在的库中貌似并没有提供对它的支持,可以考虑采用以下方式解决。
在当前工程根目录下创建ffiex.py 文件,把以下代码复制进去,然后将from torch.utils.ffi import _wrap_function修改为 from ffiex import _wrap_function
代码地址

import os
import glob
import tempfile
import shutil
from functools import wraps, reduce
from string import Template
import torch
import torch.cuda
from torch._utils import _accumulate

try:
	import cffi
except ImportError:
	raise ImportError("torch.utils.ffi requires the cffi package")


if cffi.__version_info__ < (1, 4, 0):
	raise ImportError("torch.utils.ffi requires cffi version >= 1.4, but "
					  "got " + '.'.join(map(str, cffi.__version_info__)))


def _generate_typedefs():
	typedefs = []
	for t in ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte']:
		for lib in ['TH', 'THCuda']:
			for kind in ['Tensor', 'Storage']:
				python_name = t + kind
				if t == 'Float' and lib == 'THCuda':
					th_name = 'THCuda' + kind
				else:
					th_name = lib + t + kind
				th_struct = 'struct ' + th_name

				typedefs += ['typedef {} {};'.format(th_struct, th_name)]
				# We have to assemble a string here, because we're going to
				# do this lookup based on tensor.type(), which returns a
				# string (not a type object, as this code was before)
				python_module = 'torch.cuda' if lib == 'THCuda' else 'torch'
				python_class = python_module + '.' + python_name
				_cffi_to_torch[th_struct] = python_class
				_torch_to_cffi[python_class] = th_struct
	return '\n'.join(typedefs) + '\n'
_cffi_to_torch = {}
_torch_to_cffi = {}
_typedefs = _generate_typedefs()


PY_MODULE_TEMPLATE = Template("""
from torch.utils.ffi import _wrap_function
from .$cffi_wrapper_name import lib as _lib, ffi as _ffi

__all__ = []
def _import_symbols(locals):
	for symbol in dir(_lib):
		fn = getattr(_lib, symbol)
		if callable(fn):
			locals[symbol] = _wrap_function(fn, _ffi)
		else:
			locals[symbol] = fn
		__all__.append(symbol)

_import_symbols(locals())
""")


def _setup_wrapper(with_cuda):
	here = os.path.abspath(os.path.dirname(__file__))
	lib_dir = os.path.join(here, '..', '..', 'lib')
	include_dirs = [
		os.path.join(lib_dir, 'include'),
		os.path.join(lib_dir, 'include', 'TH'),
	]

	wrapper_source = '#include \n'
	if with_cuda:
		import torch.cuda
		wrapper_source += '#include \n'
		if os.sys.platform == 'win32':
			cuda_include_dirs = glob.glob(os.getenv('CUDA_PATH', '') + '/include')
			cuda_include_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH', '') + '/include')
		else:
			cuda_include_dirs = glob.glob('/usr/local/cuda/include')
			cuda_include_dirs += glob.glob('/Developer/NVIDIA/CUDA-*/include')
		include_dirs.append(os.path.join(lib_dir, 'include', 'THC'))
		include_dirs.extend(cuda_include_dirs)
	return wrapper_source, include_dirs


def _create_module_dir(base_path, fullname):
	module, _, name = fullname.rpartition('.')
	if not module:
		target_dir = name
	else:
		target_dir = reduce(os.path.join, fullname.split('.'))
	target_dir = os.path.join(base_path, target_dir)
	try:
		os.makedirs(target_dir)
	except os.error:
		pass
	for dirname in _accumulate(fullname.split('.'), os.path.join):
		init_file = os.path.join(base_path, dirname, '__init__.py')
		open(init_file, 'a').close()  # Create file if it doesn't exist yet
	return name, target_dir


def _build_extension(ffi, cffi_wrapper_name, target_dir, verbose):
	try:
		tmpdir = tempfile.mkdtemp()
		ext_suf = '.pyd' if os.sys.platform == 'win32' else '.so'
		libname = cffi_wrapper_name + ext_suf
		outfile = ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname)
		shutil.copy(outfile, os.path.join(target_dir, libname))
	finally:
		shutil.rmtree(tmpdir)


def _make_python_wrapper(name, cffi_wrapper_name, target_dir):
	py_source = PY_MODULE_TEMPLATE.substitute(name=name,
											  cffi_wrapper_name=cffi_wrapper_name)
	with open(os.path.join(target_dir, '__init__.py'), 'w') as f:
		f.write(py_source)


def create_extension(name, headers, sources, verbose=True, with_cuda=False,
					 package=False, relative_to='.', **kwargs):
	base_path = os.path.abspath(os.path.dirname(relative_to))
	name_suffix, target_dir = _create_module_dir(base_path, name)
	if not package:
		cffi_wrapper_name = '_' + name_suffix
	else:
		cffi_wrapper_name = (name.rpartition('.')[0] +
							 '.{0}._{0}'.format(name_suffix))

	wrapper_source, include_dirs = _setup_wrapper(with_cuda)
	include_dirs.extend(kwargs.pop('include_dirs', []))

	if os.sys.platform == 'win32':
		library_dirs = glob.glob(os.getenv('CUDA_PATH', '') + '/lib/x64')
		library_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH', '') + '/lib/x64')

		here = os.path.abspath(os.path.dirname(__file__))
		lib_dir = os.path.join(here, '..', '..', 'lib')

		library_dirs.append(os.path.join(lib_dir))
	else:
		library_dirs = []
	library_dirs.extend(kwargs.pop('library_dirs', []))

	if isinstance(headers, str):
		headers = [headers]
	all_headers_source = ''
	for header in headers:
		with open(os.path.join(base_path, header), 'r') as f:
			all_headers_source += f.read() + '\n\n'

	ffi = cffi.FFI()
	sources = [os.path.join(base_path, src) for src in sources]
	# NB: TH headers are C99 now
	kwargs['extra_compile_args'] = ['-std=c99'] + kwargs.get('extra_compile_args', [])
	ffi.set_source(cffi_wrapper_name, wrapper_source + all_headers_source,
				   sources=sources,
				   include_dirs=include_dirs,
				   library_dirs=library_dirs, **kwargs)
	ffi.cdef(_typedefs + all_headers_source)

	_make_python_wrapper(name_suffix, '_' + name_suffix, target_dir)

	def build():
		_build_extension(ffi, cffi_wrapper_name, target_dir, verbose)
	ffi.build = build
	return ffi

def _wrap_function(function, ffi):
	@wraps(function)
	def safe_call(*args, **kwargs):
		args = tuple(ffi.cast(_torch_to_cffi.get(arg.type(), 'void') + '*', arg._cdata)
					 if isinstance(arg, torch.Tensor) or torch.is_storage(arg)
					 else arg
					 for arg in args)
		args = (function,) + args
		result = torch._C._safe_call(*args, **kwargs)
		if isinstance(result, ffi.CData):
			typeof = ffi.typeof(result)
			if typeof.kind == 'pointer':
				cdata = int(ffi.cast('uintptr_t', result))
				cname = typeof.item.cname
				if cname in _cffi_to_torch:
					# TODO: Maybe there is a less janky way to eval
					# off of this
					return eval(_cffi_to_torch[cname])(cdata=cdata)
		return result
	return safe_call

参考

I solved by adding the source code of torch.utils.ffi to my project without changing my pytorch version.(I use torch 1.4.0).
For example, save the following code to ffiext.py, then you can replace from torch.utils.ffi import _wrap_function with from ffiext import _wrap_function.
You can also find the following source code from	

你可能感兴趣的:(环境配置与bug记录,pytorch,深度学习,python)