import os
import glob
import tempfile
import shutil
from functools import wraps,reducefrom string import Template
import torch
import torch.cuda
from torch._utils import _accumulate
try:import cffi
except ImportError:raise ImportError("torch.utils.ffi requires the cffi package")if cffi.__version_info__ <(1,4,0):raise ImportError("torch.utils.ffi requires cffi version >= 1.4, but ""got "+'.'.join(map(str, cffi.__version_info__)))def_generate_typedefs():
typedefs =[]for t in['Double','Float','Long','Int','Short','Char','Byte']:for lib in['TH','THCuda']:for kind in['Tensor','Storage']:
python_name = t + kind
if t =='Float'and lib =='THCuda':
th_name ='THCuda'+ kind
else:
th_name = lib + t + kind
th_struct ='struct '+ th_name
typedefs +=['typedef {} {};'.format(th_struct, th_name)]# We have to assemble a string here, because we're going to# do this lookup based on tensor.type(), which returns a# string (not a type object, as this code was before)
python_module ='torch.cuda'if lib =='THCuda'else'torch'
python_class = python_module +'.'+ python_name
_cffi_to_torch[th_struct]= python_class
_torch_to_cffi[python_class]= th_struct
return'\n'.join(typedefs)+'\n'
_cffi_to_torch ={}
_torch_to_cffi ={}
_typedefs = _generate_typedefs()
PY_MODULE_TEMPLATE = Template("""
from torch.utils.ffi import _wrap_function
from .$cffi_wrapper_name import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
_import_symbols(locals())
""")def_setup_wrapper(with_cuda):
here = os.path.abspath(os.path.dirname(__file__))
lib_dir = os.path.join(here,'..','..','lib')
include_dirs =[
os.path.join(lib_dir,'include'),
os.path.join(lib_dir,'include','TH'),]
wrapper_source ='#include
\n'
if with_cuda:import torch.cuda
wrapper_source +='#include \n'if os.sys.platform =='win32':
cuda_include_dirs = glob.glob(os.getenv('CUDA_PATH','')+'/include')
cuda_include_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH','')+'/include')else:
cuda_include_dirs = glob.glob('/usr/local/cuda/include')
cuda_include_dirs += glob.glob('/Developer/NVIDIA/CUDA-*/include')
include_dirs.append(os.path.join(lib_dir,'include','THC'))
include_dirs.extend(cuda_include_dirs)return wrapper_source, include_dirs
def_create_module_dir(base_path, fullname):
module, _, name = fullname.rpartition('.')ifnot module:
target_dir = name
else:
target_dir =reduce(os.path.join, fullname.split('.'))
target_dir = os.path.join(base_path, target_dir)try:
os.makedirs(target_dir)except os.error:passfor dirname in _accumulate(fullname.split('.'), os.path.join):
init_file = os.path.join(base_path, dirname,'__init__.py')open(init_file,'a').close()# Create file if it doesn't exist yetreturn name, target_dir
def_build_extension(ffi, cffi_wrapper_name, target_dir, verbose):try:
tmpdir = tempfile.mkdtemp()
ext_suf ='.pyd'if os.sys.platform =='win32'else'.so'
libname = cffi_wrapper_name + ext_suf
outfile = ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname)
shutil.copy(outfile, os.path.join(target_dir, libname))finally:
shutil.rmtree(tmpdir)def_make_python_wrapper(name, cffi_wrapper_name, target_dir):
py_source = PY_MODULE_TEMPLATE.substitute(name=name,
cffi_wrapper_name=cffi_wrapper_name)withopen(os.path.join(target_dir,'__init__.py'),'w')as f:
f.write(py_source)defcreate_extension(name, headers, sources, verbose=True, with_cuda=False,
package=False, relative_to='.',**kwargs):
base_path = os.path.abspath(os.path.dirname(relative_to))
name_suffix, target_dir = _create_module_dir(base_path, name)ifnot package:
cffi_wrapper_name ='_'+ name_suffix
else:
cffi_wrapper_name =(name.rpartition('.')[0]+'.{0}._{0}'.format(name_suffix))
wrapper_source, include_dirs = _setup_wrapper(with_cuda)
include_dirs.extend(kwargs.pop('include_dirs',[]))if os.sys.platform =='win32':
library_dirs = glob.glob(os.getenv('CUDA_PATH','')+'/lib/x64')
library_dirs += glob.glob(os.getenv('NVTOOLSEXT_PATH','')+'/lib/x64')
here = os.path.abspath(os.path.dirname(__file__))
lib_dir = os.path.join(here,'..','..','lib')
library_dirs.append(os.path.join(lib_dir))else:
library_dirs =[]
library_dirs.extend(kwargs.pop('library_dirs',[]))ifisinstance(headers,str):
headers =[headers]
all_headers_source =''for header in headers:withopen(os.path.join(base_path, header),'r')as f:
all_headers_source += f.read()+'\n\n'
ffi = cffi.FFI()
sources =[os.path.join(base_path, src)for src in sources]# NB: TH headers are C99 now
kwargs['extra_compile_args']=['-std=c99']+ kwargs.get('extra_compile_args',[])
ffi.set_source(cffi_wrapper_name, wrapper_source + all_headers_source,
sources=sources,
include_dirs=include_dirs,
library_dirs=library_dirs,**kwargs)
ffi.cdef(_typedefs + all_headers_source)
_make_python_wrapper(name_suffix,'_'+ name_suffix, target_dir)defbuild():
_build_extension(ffi, cffi_wrapper_name, target_dir, verbose)
ffi.build = build
return ffi
def_wrap_function(function, ffi):@wraps(function)defsafe_call(*args,**kwargs):
args =tuple(ffi.cast(_torch_to_cffi.get(arg.type(),'void')+'*', arg._cdata)ifisinstance(arg, torch.Tensor)or torch.is_storage(arg)else arg
for arg in args)
args =(function,)+ args
result = torch._C._safe_call(*args,**kwargs)ifisinstance(result, ffi.CData):
typeof = ffi.typeof(result)if typeof.kind =='pointer':
cdata =int(ffi.cast('uintptr_t', result))
cname = typeof.item.cname
if cname in _cffi_to_torch:# TODO: Maybe there is a less janky way to eval# off of thisreturneval(_cffi_to_torch[cname])(cdata=cdata)return result
return safe_call
参考
I solved by adding the source code of torch.utils.ffi to my project without changing my pytorch version.(I use torch 1.4.0).
For example, save the following code to ffiext.py, then you can replace from torch.utils.ffi import _wrap_function with from ffiext import _wrap_function.
You can also find the following source code from
当某个数据库用户在数据库中插入、更新、删除一个表的数据,或者增加一个表的主键时或者表的索引时,常常会出现ora-00054:resource busy and acquire with nowait specified这样的错误。主要是因为有事务正在执行(或者事务已经被锁),所有导致执行不成功。
1.下面的语句
insert提示IGNORE_ROW_ON_DUPKEY_INDEX
转自:http://space.itpub.net/18922393/viewspace-752123
在 insert into tablea ...select * from tableb中,如果存在唯一约束,会导致整个insert操作失败。使用IGNORE_ROW_ON_DUPKEY_INDEX提示,会忽略唯一
1.记录慢查询配置
show variables where variable_name like 'slow%' ; --查看默认日志路径
查询结果:--不用的机器可能不同
slow_query_log_file=/var/lib/mysql/centos-slow.log
修改mysqld配置文件:/usr /my.cnf[一般在/etc/my.cnf,本机在/user/my.cn
@ControllerAdvice,是spring3.2提供的新注解,从名字上可以看出大体意思是控制器增强。让我们先看看@ControllerAdvice的实现:
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Component
public @interface Co
BW Element
OLAP Universe Element
Cube Dimension
Class
Charateristic
A class with dimension and detail objects (Detail objects for key and desription)
Hi