问题描述:因为使用高版本的torch,导致torch.utils.ffi被弃用。但是,高版本的torch并未对wrap_function进行重写(供大家直接调用),最稳妥的方法是降低torch的版本,降低到0.4.0。但是降低到0.4.0,cuda的版本也要改变,很麻烦。大家可以试试这个,不行的话,再考虑通过conda建立虚拟环境降低版本。
解决措施:I solved by adding the source code of torch.utils.ffi to my project without changing my pytorch version.(I use torch 1.4.0).
For example, save the following code to ffiext.py
, then you can replace from torch.utils.ffi import _wrap_function
with from ffiext import _wrap_function
.
You can also find the following source code from 'https://s0pytorch0org.icopy.site/docs/0.4.1/_modules/torch/utils/ffi.html'.
import os
import glob
import tempfile
import shutil
from functools import wraps, reduce
from string import Template
import torch
import torch.cuda
from torch._utils import _accumulatetry:import cffi
except ImportError:raise ImportError("torch.utils.ffi requires the cffi package")if cffi.__version_info__ < (1, 4, 0):raise ImportError("torch.utils.ffi requires cffi version >= 1.4, but ""got " + '.'.join(map(str, cffi.__version_info__)))def _generate_typedefs():typedefs = []for t in ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte']:for lib in ['TH', 'THCuda']:for kind in ['Tensor', 'Storage']:python_name = t + kindif t == 'Float' and lib == 'THCuda':th_name = 'THCuda' + kindelse:th_name = lib + t + kindth_struct = 'struct ' + th_nametypedefs += ['typedef {} {};'.format(th_struct, th_name)]# We have to assemble a string here, because we're going to# do this lookup based on tensor.type(), which returns a# string (not a type object, as this code was before)python_module = 'torch.cuda' if lib == 'THCuda' else 'torch'python_class = python_module + '.' + python_name_cffi_to_torch[th_struct] = python_class_torch_to_cffi[python_class] = th_structreturn '\n'.join(typedefs) + '\n'
_cffi_to_torch = {}
_torch_to_cffi = {}
_typedefs = _generate_typedefs()PY_MODULE_TEMPLATE = Template("""
from torch.utils.ffi import _wrap_function
from .$cffi_wrapper_name import lib as _lib, ffi as _ffi__all__ = []
def _import_symbols(locals):for symbol in dir(_lib):fn = getattr(_lib, symbol)if callable(fn):locals[symbol] = _wrap_function(fn, _ffi)else:locals[symbol] = fn__all__.append(symbol)_import_symbols(locals())
""")def _setup_wrapper(with_cuda):here = os.path.abspath(os.path.dirname(__file__))lib_dir = os.path.join(here, '..', '..', 'lib')include_dirs = [os.path.join(lib_dir, 'include'),os.path.join(lib_dir, 'include', 'TH'),]wrapper_source = '#include <TH/TH.h>\n'if with_cuda:import torch.cudawrapper_source += '#include <THC/THC.h>\n'if os.sys.platform == 'win32':cuda_include_dirs = glob.glob(os.getenv('CUDA_PATH', '') + '/include')cuda_include_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH', '') + '/include')else:cuda_include_dirs = glob.glob('/usr/local/cuda/include')cuda_include_dirs += glob.glob('/Developer/NVIDIA/CUDA-*/include')include_dirs.append(os.path.join(lib_dir, 'include', 'THC'))include_dirs.extend(cuda_include_dirs)return wrapper_source, include_dirsdef _create_module_dir(base_path, fullname):module, _, name = fullname.rpartition('.')if not module:target_dir = nameelse:target_dir = reduce(os.path.join, fullname.split('.'))target_dir = os.path.join(base_path, target_dir)try:os.makedirs(target_dir)except os.error:passfor dirname in _accumulate(fullname.split('.'), os.path.join):init_file = os.path.join(base_path, dirname, '__init__.py')open(init_file, 'a').close() # Create file if it doesn't exist yetreturn name, target_dirdef _build_extension(ffi, cffi_wrapper_name, target_dir, verbose):try:tmpdir = tempfile.mkdtemp()ext_suf = '.pyd' if os.sys.platform == 'win32' else '.so'libname = cffi_wrapper_name + ext_sufoutfile = ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname)shutil.copy(outfile, os.path.join(target_dir, libname))finally:shutil.rmtree(tmpdir)def _make_python_wrapper(name, cffi_wrapper_name, target_dir):py_source = PY_MODULE_TEMPLATE.substitute(name=name,cffi_wrapper_name=cffi_wrapper_name)with open(os.path.join(target_dir, '__init__.py'), 'w') as f:f.write(py_source)def create_extension(name, headers, sources, verbose=True, with_cuda=False,package=False, relative_to='.', **kwargs):base_path = os.path.abspath(os.path.dirname(relative_to))name_suffix, target_dir = _create_module_dir(base_path, name)if not package:cffi_wrapper_name = '_' + name_suffixelse:cffi_wrapper_name = (name.rpartition('.')[0] +'.{0}._{0}'.format(name_suffix))wrapper_source, include_dirs = _setup_wrapper(with_cuda)include_dirs.extend(kwargs.pop('include_dirs', []))if os.sys.platform == 'win32':library_dirs = glob.glob(os.getenv('CUDA_PATH', '') + '/lib/x64')library_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH', '') + '/lib/x64')here = os.path.abspath(os.path.dirname(__file__))lib_dir = os.path.join(here, '..', '..', 'lib')library_dirs.append(os.path.join(lib_dir))else:library_dirs = []library_dirs.extend(kwargs.pop('library_dirs', []))if isinstance(headers, str):headers = [headers]all_headers_source = ''for header in headers:with open(os.path.join(base_path, header), 'r') as f:all_headers_source += f.read() + '\n\n'ffi = cffi.FFI()sources = [os.path.join(base_path, src) for src in sources]# NB: TH headers are C99 nowkwargs['extra_compile_args'] = ['-std=c99'] + kwargs.get('extra_compile_args', [])ffi.set_source(cffi_wrapper_name, wrapper_source + all_headers_source,sources=sources,include_dirs=include_dirs,library_dirs=library_dirs, **kwargs)ffi.cdef(_typedefs + all_headers_source)_make_python_wrapper(name_suffix, '_' + name_suffix, target_dir)def build():_build_extension(ffi, cffi_wrapper_name, target_dir, verbose)ffi.build = buildreturn ffidef _wrap_function(function, ffi):@wraps(function)def safe_call(*args, **kwargs):args = tuple(ffi.cast(_torch_to_cffi.get(arg.type(), 'void') + '*', arg._cdata)if isinstance(arg, torch.Tensor) or torch.is_storage(arg)else argfor arg in args)args = (function,) + argsresult = torch._C._safe_call(*args, **kwargs)if isinstance(result, ffi.CData):typeof = ffi.typeof(result)if typeof.kind == 'pointer':cdata = int(ffi.cast('uintptr_t', result))cname = typeof.item.cnameif cname in _cffi_to_torch:# TODO: Maybe there is a less janky way to eval# off of thisreturn eval(_cffi_to_torch[cname])(cdata=cdata)return resultreturn safe_call
参考:https://github.com/jwyang/faster-rcnn.pytorch/issues/412