代码地址:https://github.com/endernewton/tf-faster-rcnn
windows汇编issues:https://github.com/endernewton/tf-faster-rcnn/issues/335
Note: If any strange error occurred, I suggest deleting all the generated .c and .cpp files (lib/nms/cpu_nms.c, lib/nms/gpu_nms.cpp, lib/utils/bbox.c) and everything in lib/build
1.file lib/nms/cpu_nms.pyx:
change (line 25)
cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
to
cdef np.ndarray[np.int64_t, ndim=1] order = scores.argsort()[::-1]
2.file lib/nms/gpu_nms.pyx:
change (line 25)
cdef np.ndarray[np.int_t, ndim=1] \
to
cdef np.ndarray[np.int64_t, ndim=1] \
3.file lib/datasets/pascal_voc.py:
change (line 226)
'{:s}.xml')
to
'{0:s}.xml')
4.file lib/datasets/voc_eval.py
change (line 121)
with open(cachefile, 'w') as f:
to
with open(cachefile, 'wb') as f:
5.file lib/setup.py (nearly full modified, new code is as follow):
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
nvcc = find_in_path('nvcc.exe', os.environ['PATH'])
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {
'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib', 'x64')}
for k, v in iter(cudaconfig.items()):
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
# _msvccompiler.py imports:
import os
import shutil
import stat
import subprocess
import winreg
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
from itertools import count
super = self.compile
self.src_extensions.append('.cu')
# find python include
import sys
py_dir = sys.executable.replace('\\', '/').split('/')[:-1]
py_include = pjoin('/'.join(py_dir), 'include')
# override method in _msvccompiler.py, starts from line 340
def compile(sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
add_cpp_opts = False
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
add_cpp_opts = True
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
base, _ = os.path.splitext(os.path.basename(src))
rc_file = os.path.join(rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc, "/fo" + obj, rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext == '.cu':
# a trigger for cu compile
try:
# use the cuda for .cu files
# self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
arg = [CUDA['nvcc']] + sources + ['-odir', pjoin(output_dir, 'nms')]
for include_dir in include_dirs:
arg.append('-I')
arg.append(include_dir)
arg += ['-I', py_include]
# arg += ['-lib', CUDA['lib64']]
arg += ['-Xcompiler', '/EHsc,/W3,/nologo,/Ox,/MD']
arg += postargs
self.spawn(arg)
continue
except DistutilsExecError as msg:
# raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile {} to {}"
.format(src, obj))
args = [self.cc] + compile_opts + pp_opts
if add_cpp_opts:
args.append('/EHsc')
args.append(input_opt)
args.append("/Fo" + obj)
args.extend(extra_postargs)
try:
self.spawn(args)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
self.compile = compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={
'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={
'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_52',
'--ptxas-options=-v',
'-c']},
include_dirs = [numpy_include, CUDA['include']]
)
]
setup(
name='tf_faster_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={
'build_ext': custom_build_ext},
)
注意:修改上面的-arch=sm_52以适合您的图形卡,否则使用gpu_nms将得到损坏的结果。您可以遵循最初的安装指南。
6.Now, turn to lib directory, run python setup.py build_ext. It possible reports an error:
nms\gpu_nms.cpp(2075): error C2664: "void _nms(int *,int *,const float *,int,int,float,int)": cannot convert parameter 1 from '__pyx_t_5numpy_int32_t *' to 'int *'
Modify one line in the generated file in lib/nms/gpu_nms.cpp, search the keyword _nms, find the following line:
change (line 2075 in my generated cpp file)
_nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, ...
to
_nms((&(*__Pyx_BufPtrStrided1d(int *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, ...
最后,再次运行python setup.py build_ext,将lib / build / lib.win-amd64-3.6中的所有内容复制到lib。
from lib.model.train_val import get_training_roidb, train_net
from lib.model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, get_output_tb_dir
from lib.datasets.factory import get_imdb
import lib.datasets.imdb
import argparse
import pprint
import numpy as np
import sys
import tensorflow as tf
from lib.nets.vgg16 import vgg16
from lib.nets.resnet_v1 import resnetv1
class args:
"""
Parse input arguments
"""
cfg_file = r'G:\cai_op\tf-faster-rcnn\experiments\cfgs\res101.yml'
weight = r'G:\cai_op\tf-faster-rcnn\data\imagenet_weights\res101.ckpt'
imdb_name = 'voc_2007_trainval'
imdbval_name = 'voc_2007_test'
max_iters = 100000
tag = None
net = 'res101' #vgg16
set_cfgs = ['ANCHOR_SCALES', '[8,16,32]', 'ANCHOR_RATIOS', '[0.5,1,2]','TRAIN.STEPSIZE','[50000]']
def combined_roidb(imdb_names):
"""
Combine multiple roidbs
"""
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = lib.datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
if __name__ == '__main__':
# args = parse_args()
print('Called with args:')
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
# train set
imdb, roidb = combined_roidb(args.imdb_name)
print('{:d} roidb entries'.format(len(roidb)))
# output directory where the models are saved
output_dir = get_output_dir(imdb, args.tag)
print('Output will be saved to `{:s}`'.format(output_dir))
# tensorboard directory where the summaries are saved during training
tb_dir = get_output_tb_dir(imdb, args.tag)
print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))
# also add the validation set, but with no flipping images
orgflip = cfg.TRAIN.USE_FLIPPED
cfg.TRAIN.USE_FLIPPED = False
_, valroidb = combined_roidb(args.imdbval_name)
print('{:d} validation roidb entries'.format(len(valroidb)))
cfg.TRAIN.USE_FLIPPED = orgflip
# load network
if args.net == 'vgg16':
# net = vgg16(batch_size=cfg.TRAIN.IMS_PER_BATCH)
net = vgg16()
elif args.net == 'res101':
net = resnetv1(num_layers=101)
else:
raise NotImplementedError
train_net(net, imdb, roidb, valroidb, output_dir, tb_dir,
pretrained_model=args.weight,
max_iters=args.max_iters)
from lib.model.config import cfg
from lib.model.test import im_detect
from lib.model.nms_wrapper import nms
from lib.utils.timer import Timer
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse
from lib.nets.vgg16 import vgg16
from lib.nets.resnet_v1 import resnetv1
from PIL import Image,ImageDraw,ImageFont
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {
'vgg16': ('vgg16_faster_rcnn_iter_110000.ckpt',),'res101': ('res101_faster_rcnn_iter_110000.ckpt',)}
DATASETS= {
'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}
def vis_detections(image_name, class_name, dets,ax, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
image_name = image_name.split('/')[-1].replace('jpg', 'png')
plt.savefig(r'G:\cai_op\tf-faster-rcnn\data\result/' + image_name)
print("save image to data/result/{}".format(image_name))
def demo(sess, net, image_name):
im = cv2.imread(image_name)
# print(im.shape)
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
# print(scores)
# print(boxes)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
CONF_THRESH = 0.8
NMS_THRESH = 0.3
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(image_name, cls, dets,ax, thresh=CONF_THRESH)
plt.axis('off')
plt.tight_layout()
plt.draw()
def parse_args():
parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
choices=NETS.keys(), default='vgg16')#'vgg16')
parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',
choices=DATASETS.keys(), default='pascal_voc_0712')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
# model path
demonet = args.demo_net
dataset = args.dataset
tfmodel = os.path.join(r'G:\cai_op\tf-faster-rcnn', 'output', demonet, DATASETS[dataset][0], 'default',
NETS[demonet][0])
# print(tfmodel)
# tfmodel = r"/home/ycya/cxcode/tf-faster-rcnn/output/res101/voc_2007_trainval/default/res101_faster_rcnn_iter_70000.ckpt"
if not os.path.isfile(tfmodel + '.meta'):
raise IOError(('{:s} not found.\nDid you download the proper networks from '
'our server and place them properly?').format(tfmodel + '.meta'))
# set config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# load network
if demonet == 'vgg16':
net = vgg16()
elif demonet == 'res101':
net = resnetv1(num_layers=101)
else:
raise NotImplementedError
net.create_architecture("TEST", 21,
tag='default', anchor_scales=[8, 16, 32])
saver = tf.train.Saver()
saver.restore(sess, tfmodel)
print('Loaded network {:s}'.format(tfmodel))
path = r"G:\cai_op\tf-faster-rcnn\data\demo/"
for im_name in os.listdir(path):
# print(im_name)
im_file = path + im_name
# exit()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Demo for data/demo/{}'.format(im_name))
demo(sess, net, im_file)
plt.show()