ubuntu配置SSD

1.通过前面对caffe的配置,基本说明之前安装的软件以及依赖库没有问题,接下来将配置SSD。

git clone https://github.com/weiliu89/caffe.git caffe-ssd

cd caffe-ssd

git checkout ssd

如下图即成功了:

ubuntu配置SSD_第1张图片

之后同样需要修改Makefile.config文件:

cp Makefile.config.example Makefile.config

下面不再详细介绍如何修改,具体可以看我上一篇caffe的编译博客,这里我直接贴出来:

## Refer to http://caffe.berkeleyvision.org/installation.html

# Contributions simplifying and improving our build system are welcome!

# cuDNN acceleration switch (uncomment to build with cuDNN).

USE_CUDNN := 1

# CPU-only switch (uncomment to build without GPU support).

# CPU_ONLY := 1

# uncomment to disable IO dependencies and corresponding data layers

# USE_OPENCV := 0

# USE_LEVELDB := 0

# USE_LMDB := 0

# uncomment to allow MDB_NOLOCK when reading LMDB files (only if necessary)

# You should not set this flag if you will be reading LMDBs with any

# possibility of simultaneous read and write

# ALLOW_LMDB_NOLOCK := 1

# Uncomment if you're using OpenCV 3

OPENCV_VERSION := 3

# To customize your choice of compiler, uncomment and set the following.

# N.B. the default for Linux is g++ and the default for OSX is clang++

# CUSTOM_CXX := g++

# CUDA directory contains bin/ and lib/ directories that we need.

CUDA_DIR := /usr/local/cuda

# On Ubuntu 14.04, if cuda tools are installed via

# "sudo apt-get install nvidia-cuda-toolkit" then use this instead:

# CUDA_DIR := /usr

# CUDA architecture setting: going with all of them.

# For CUDA < 6.0, comment the lines after *_35 for compatibility.

CUDA_ARCH := -gencode arch=compute_35,code=sm_35 \

            -gencode arch=compute_50,code=sm_50 \

            -gencode arch=compute_52,code=sm_52 \

            -gencode arch=compute_61,code=sm_61

# BLAS choice:

# atlas for ATLAS (default)

# mkl for MKL

# open for OpenBlas

# BLAS := atlas

BLAS := open

# Custom (MKL/ATLAS/OpenBLAS) include and lib directories.

# Leave commented to accept the defaults for your choice of BLAS

# (which should work)!

# BLAS_INCLUDE := /path/to/your/blas

# BLAS_LIB := /path/to/your/blas

# Homebrew puts openblas in a directory that is not on the standard search path

# BLAS_INCLUDE := $(shell brew --prefix openblas)/include

# BLAS_LIB := $(shell brew --prefix openblas)/lib

# This is required only if you will compile the matlab interface.

# MATLAB directory should contain the mex binary in /bin.

# MATLAB_DIR := /usr/local

# MATLAB_DIR := /Applications/MATLAB_R2012b.app

# NOTE: this is required only if you will compile the python interface.

# We need to be able to find Python.h and numpy/arrayobject.h.

PYTHON_INCLUDE := /usr/include/python2.7 \

/usr/lib/python2.7/dist-packages/numpy/core/include

# Anaconda Python distribution is quite popular. Include path:

# Verify anaconda location, sometimes it's in root.

# ANACONDA_HOME := $(HOME)/anaconda2

# PYTHON_INCLUDE := $(ANACONDA_HOME)/include \

$(ANACONDA_HOME)/include/python2.7 \

$(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include \

# Uncomment to use Python 3 (default is Python 2)

# PYTHON_LIBRARIES := boost_python3 python3.5m

# PYTHON_INCLUDE := /usr/include/python3.5m \

#                /usr/lib/python3.5/dist-packages/numpy/core/include

# We need to be able to find libpythonX.X.so or .dylib.

PYTHON_LIB := /usr/lib

# PYTHON_LIB := $(ANACONDA_HOME)/lib

# Homebrew installs numpy in a non standard path (keg only)

# PYTHON_INCLUDE += $(dir $(shell python -c 'import numpy.core; print(numpy.core.__file__)'))/include

# PYTHON_LIB += $(shell brew --prefix numpy)/lib

# Uncomment to support layers written in Python (will link against Python libs)

WITH_PYTHON_LAYER := 1

# Whatever else you find you need goes here.

INCLUDE_DIRS := $(PYTHON_INCLUDE) /usr/local/include /usr/include/hdf5/serial

LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib /usr/lib/x86_64-linux-gnu /usr/lib/x86_64-linux-gnu/hdf5/serial

# If Homebrew is installed at a non standard location (for example your home directory) and you use it for general dependencies

# INCLUDE_DIRS += $(shell brew --prefix)/include

# LIBRARY_DIRS += $(shell brew --prefix)/lib

# Uncomment to use `pkg-config` to specify OpenCV library paths.

# (Usually not necessary -- OpenCV libraries are normally installed in one of the above $LIBRARY_DIRS.)

# USE_PKG_CONFIG := 1

# N.B. both build and distribute dirs are cleared on `make clean`

BUILD_DIR := build

DISTRIBUTE_DIR := distribute

# Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171

# DEBUG := 1

# The ID of the GPU that 'make runtest' will use to run unit tests.

TEST_GPUID := 0

# enable pretty build (comment to see full commands)

Q ?= @

上面是Makefile.config文件,下面是Makefile文件:

PROJECT := caffe

CONFIG_FILE := Makefile.config

# Explicitly check for the config file, otherwise make -k will proceed anyway.

ifeq ($(wildcard $(CONFIG_FILE)),)

$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.)

endif

include $(CONFIG_FILE)

BUILD_DIR_LINK := $(BUILD_DIR)

ifeq ($(RELEASE_BUILD_DIR),)

RELEASE_BUILD_DIR := .$(BUILD_DIR)_release

endif

ifeq ($(DEBUG_BUILD_DIR),)

DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug

endif

DEBUG ?= 0

ifeq ($(DEBUG), 1)

BUILD_DIR := $(DEBUG_BUILD_DIR)

OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR)

else

BUILD_DIR := $(RELEASE_BUILD_DIR)

OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR)

endif

# All of the directories containing code.

SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \

\( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print)

# The target shared library name

LIBRARY_NAME := $(PROJECT)

LIB_BUILD_DIR := $(BUILD_DIR)/lib

STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a

DYNAMIC_VERSION_MAJOR := 1

DYNAMIC_VERSION_MINOR := 0

DYNAMIC_VERSION_REVISION := 0-rc3

DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so

#DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR)

DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)

DYNAMIC_NAME := $(LIB_BUILD_DIR)/$(DYNAMIC_VERSIONED_NAME_SHORT)

COMMON_FLAGS += -DCAFFE_VERSION=$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR).$(DYNAMIC_VERSION_REVISION)

##############################

# Get all source files

##############################

# CXX_SRCS are the source files excluding the test ones.

CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp")

# CU_SRCS are the cuda source files

CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu")

# TEST_SRCS are the test source files

TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp

TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp")

TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS))

TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu")

GTEST_SRC := src/gtest/gtest-all.cpp

# TOOL_SRCS are the source files for the tool binaries

TOOL_SRCS := $(shell find tools -name "*.cpp")

# EXAMPLE_SRCS are the source files for the example binaries

EXAMPLE_SRCS := $(shell find examples -name "*.cpp")

# BUILD_INCLUDE_DIR contains any generated header files we want to include.

BUILD_INCLUDE_DIR := $(BUILD_DIR)/src

# PROTO_SRCS are the protocol buffer definitions

PROTO_SRC_DIR := src/$(PROJECT)/proto

PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto)

# PROTO_BUILD_DIR will contain the .cc and obj files generated from

# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files

PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR)

PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto

# NONGEN_CXX_SRCS includes all source/header files except those generated

# automatically (e.g., by proto).

NONGEN_CXX_SRCS := $(shell find \

src/$(PROJECT) \

include/$(PROJECT) \

python/$(PROJECT) \

matlab/+$(PROJECT)/private \

examples \

tools \

-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh")

LINT_SCRIPT := scripts/cpp_lint.py

LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint

LINT_EXT := lint.txt

LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS)))

EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT)

NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT)

# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)

PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp

PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so

PY$(PROJECT)_HXX := include/$(PROJECT)/layers/python_layer.hpp

# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT)

MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp

ifneq ($(MATLAB_DIR),)

MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext)

endif

MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT)

##############################

# Derive generated files

##############################

# The generated files for protocol buffers

PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \

$(notdir ${PROTO_SRCS:.proto=.pb.h}))

PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \

$(notdir ${PROTO_SRCS:.proto=.pb.h}))

PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc})

PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto

PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py

PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \

$(PY_PROTO_BUILD_DIR)/$(notdir $(file)))

# The objects corresponding to the source files

# These objects will be linked into the final shared library, so we

# exclude the tool, example, and test objects.

CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o})

CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o})

PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o}

OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS)

# tool, example, and test objects

TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o})

TOOL_BUILD_DIR := $(BUILD_DIR)/tools

TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test

TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test

TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o})

TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o})

TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS)

GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o})

EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o})

# Output files for automatic dependency generation

DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \

${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}

# tool, example, and test bins

TOOL_BINS := ${TOOL_OBJS:.o=.bin}

EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin}

# symlinks to tool bins without the ".bin" extension

TOOL_BIN_LINKS := ${TOOL_BINS:.bin=}

# Put the test binaries in build/test for convenience.

TEST_BIN_DIR := $(BUILD_DIR)/test

TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \

$(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj))))))

TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \

$(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj))))))

TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS)

# TEST_ALL_BIN is the test binary that links caffe dynamically.

TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin

##############################

# Derive compiler warning dump locations

##############################

WARNS_EXT := warnings.txt

CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)})

CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)})

TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)})

EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)})

TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)})

TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)})

ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS)

ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS)

ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS)

EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT)

NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT)

##############################

# Derive include and lib directories

##############################

CUDA_INCLUDE_DIR := $(CUDA_DIR)/include

CUDA_LIB_DIR :=

# add /lib64 only if it exists

ifneq ("$(wildcard $(CUDA_DIR)/lib64)","")

CUDA_LIB_DIR += $(CUDA_DIR)/lib64

endif

CUDA_LIB_DIR += $(CUDA_DIR)/lib

INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include

ifneq ($(CPU_ONLY), 1)

INCLUDE_DIRS += $(CUDA_INCLUDE_DIR)

LIBRARY_DIRS += $(CUDA_LIB_DIR)

LIBRARIES := cudart cublas curand

endif

LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_serial_hl hdf5_serial

# handle IO dependencies

USE_LEVELDB ?= 1

USE_LMDB ?= 1

USE_OPENCV ?= 1

ifeq ($(USE_LEVELDB), 1)

LIBRARIES += leveldb snappy

endif

ifeq ($(USE_LMDB), 1)

LIBRARIES += lmdb

endif

ifeq ($(USE_OPENCV), 1)

LIBRARIES += opencv_core opencv_highgui opencv_imgproc

ifeq ($(OPENCV_VERSION), 3)

LIBRARIES += opencv_imgcodecs opencv_videoio

endif

endif

PYTHON_LIBRARIES ?= boost_python python2.7

WARNINGS := -Wall -Wno-sign-compare

##############################

# Set build directories

##############################

DISTRIBUTE_DIR ?= distribute

DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib

DIST_ALIASES := dist

ifneq ($(strip $(DISTRIBUTE_DIR)),distribute)

DIST_ALIASES += distribute

endif

ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \

$(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \

$(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \

$(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR))

##############################

# Set directory for Doxygen-generated documentation

##############################

DOXYGEN_CONFIG_FILE ?= ./.Doxyfile

# should be the same as OUTPUT_DIRECTORY in the .Doxyfile

DOXYGEN_OUTPUT_DIR ?= ./doxygen

DOXYGEN_COMMAND ?= doxygen

# All the files that might have Doxygen documentation.

DOXYGEN_SOURCES := $(shell find \

src/$(PROJECT) \

include/$(PROJECT) \

python/ \

matlab/ \

examples \

tools \

-name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \

        -name "*.py" -or -name "*.m")

DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE)

##############################

# Configure build

##############################

# Determine platform

UNAME := $(shell uname -s)

ifeq ($(UNAME), Linux)

LINUX := 1

else ifeq ($(UNAME), Darwin)

OSX := 1

OSX_MAJOR_VERSION := $(shell sw_vers -productVersion | cut -f 1 -d .)

OSX_MINOR_VERSION := $(shell sw_vers -productVersion | cut -f 2 -d .)

endif

# Linux

ifeq ($(LINUX), 1)

CXX ?= /usr/bin/g++

GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)

# older versions of gcc are too dumb to build boost with -Wuninitalized

ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1)

WARNINGS += -Wno-uninitialized

endif

# boost::thread is reasonably called boost_thread (compare OS X)

# We will also explicitly add stdc++ to the link target.

LIBRARIES += boost_thread stdc++ boost_regex

VERSIONFLAGS += -Wl,-soname,$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../lib

endif

# OS X:

# clang++ instead of g++

# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0

ifeq ($(OSX), 1)

CXX := /usr/bin/clang++

ifneq ($(CPU_ONLY), 1)

CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release [0-9.]*' | tr -d '[a-z ]')

ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1)

CXXFLAGS += -stdlib=libstdc++

LINKFLAGS += -stdlib=libstdc++

endif

# clang throws this warning for cuda headers

WARNINGS += -Wno-unneeded-internal-declaration

# 10.11 strips DYLD_* env vars so link CUDA (rpath is available on 10.5+)

OSX_10_OR_LATER  := $(shell [ $(OSX_MAJOR_VERSION) -ge 10 ] && echo true)

OSX_10_5_OR_LATER := $(shell [ $(OSX_MINOR_VERSION) -ge 5 ] && echo true)

ifeq ($(OSX_10_OR_LATER),true)

ifeq ($(OSX_10_5_OR_LATER),true)

LDFLAGS += -Wl,-rpath,$(CUDA_LIB_DIR)

endif

endif

endif

# gtest needs to use its own tuple to not conflict with clang

COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1

# boost::thread is called boost_thread-mt to mark multithreading on OS X

LIBRARIES += boost_thread-mt

# we need to explicitly ask for the rpath to be obeyed

ORIGIN := @loader_path

VERSIONFLAGS += -Wl,-install_name,@rpath/$(DYNAMIC_VERSIONED_NAME_SHORT) -Wl,-rpath,$(ORIGIN)/../../build/lib

else

ORIGIN := \$$ORIGIN

endif

# Custom compiler

ifdef CUSTOM_CXX

CXX := $(CUSTOM_CXX)

endif

# Static linking

ifneq (,$(findstring clang++,$(CXX)))

STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME)

else ifneq (,$(findstring g++,$(CXX)))

STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive

else

  # The following line must not be indented with a tab, since we are not inside a target

  $(error Cannot static link with the $(CXX) compiler)

endif

# Debugging

ifeq ($(DEBUG), 1)

COMMON_FLAGS += -DDEBUG -g -O0

NVCCFLAGS += -G

else

COMMON_FLAGS += -DNDEBUG -O2

endif

# cuDNN acceleration configuration.

ifeq ($(USE_CUDNN), 1)

LIBRARIES += cudnn

COMMON_FLAGS += -DUSE_CUDNN

endif

# configure IO libraries

ifeq ($(USE_OPENCV), 1)

COMMON_FLAGS += -DUSE_OPENCV

endif

ifeq ($(USE_LEVELDB), 1)

COMMON_FLAGS += -DUSE_LEVELDB

endif

ifeq ($(USE_LMDB), 1)

COMMON_FLAGS += -DUSE_LMDB

ifeq ($(ALLOW_LMDB_NOLOCK), 1)

COMMON_FLAGS += -DALLOW_LMDB_NOLOCK

endif

endif

# CPU-only configuration

ifeq ($(CPU_ONLY), 1)

OBJS := $(PROTO_OBJS) $(CXX_OBJS)

TEST_OBJS := $(TEST_CXX_OBJS)

TEST_BINS := $(TEST_CXX_BINS)

ALL_WARNS := $(ALL_CXX_WARNS)

TEST_FILTER := --gtest_filter="-*GPU*"

COMMON_FLAGS += -DCPU_ONLY

endif

# Python layer support

ifeq ($(WITH_PYTHON_LAYER), 1)

COMMON_FLAGS += -DWITH_PYTHON_LAYER

LIBRARIES += $(PYTHON_LIBRARIES)

endif

# BLAS configuration (default = ATLAS)

BLAS ?= atlas

ifeq ($(BLAS), mkl)

# MKL

LIBRARIES += mkl_rt

COMMON_FLAGS += -DUSE_MKL

MKLROOT ?= /opt/intel/mkl

BLAS_INCLUDE ?= $(MKLROOT)/include

BLAS_LIB ?= $(MKLROOT)/lib $(MKLROOT)/lib/intel64

else ifeq ($(BLAS), open)

# OpenBLAS

LIBRARIES += openblas

else

# ATLAS

ifeq ($(LINUX), 1)

ifeq ($(BLAS), atlas)

# Linux simply has cblas and atlas

LIBRARIES += cblas atlas

endif

else ifeq ($(OSX), 1)

# OS X packages atlas as the vecLib framework

LIBRARIES += cblas

# 10.10 has accelerate while 10.9 has veclib

XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep 'version' | sed 's/[^0-9]*\([0-9]\).*/\1/')

XCODE_CLT_GEQ_7 := $(shell [ $(XCODE_CLT_VER) -gt 6 ] && echo 1)

XCODE_CLT_GEQ_6 := $(shell [ $(XCODE_CLT_VER) -gt 5 ] && echo 1)

ifeq ($(XCODE_CLT_GEQ_7), 1)

BLAS_INCLUDE ?= /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/$(shell ls /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ | sort | tail -1)/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/Headers

else ifeq ($(XCODE_CLT_GEQ_6), 1)

BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/

LDFLAGS += -framework Accelerate

else

BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/

LDFLAGS += -framework vecLib

endif

endif

endif

INCLUDE_DIRS += $(BLAS_INCLUDE)

LIBRARY_DIRS += $(BLAS_LIB)

LIBRARY_DIRS += $(LIB_BUILD_DIR)

# Automatic dependency generation (nvcc is handled separately)

CXXFLAGS += -MMD -MP

# Complete build flags.

COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I $(includedir))

CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)

NVCCFLAGS += -D_FORCE_INLINES -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)

# mex may invoke an older gcc that is too liberal with -Wuninitalized

MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized

LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)

USE_PKG_CONFIG ?= 0

ifeq ($(USE_PKG_CONFIG), 1)

PKG_CONFIG := $(shell pkg-config opencv --libs)

else

PKG_CONFIG :=

endif

LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \

$(foreach library,$(LIBRARIES),-l$(library))

PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library))

# 'superclean' target recursively* deletes all files ending with an extension

# in $(SUPERCLEAN_EXTS) below.  This may be useful if you've built older

# versions of Caffe that do not place all generated files in a location known

# to the 'clean' target.

#

# 'supercleanlist' will list the files to be deleted by make superclean.

#

# * Recursive with the exception that symbolic links are never followed, per the

# default behavior of 'find'.

SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo

# Set the sub-targets of the 'everything' target.

EVERYTHING_TARGETS := all py$(PROJECT) test warn lint

# Only build matcaffe as part of "everything" if MATLAB_DIR is specified.

ifneq ($(MATLAB_DIR),)

EVERYTHING_TARGETS += mat$(PROJECT)

endif

##############################

# Define build targets

##############################

.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \

py mat py$(PROJECT) mat$(PROJECT) proto runtest \

superclean supercleanlist supercleanfiles warn everything

all: lib tools examples

lib: $(STATIC_NAME) $(DYNAMIC_NAME)

everything: $(EVERYTHING_TARGETS)

linecount:

cloc --read-lang-def=$(PROJECT).cloc \

src/$(PROJECT) include/$(PROJECT) tools examples \

python matlab

lint: $(EMPTY_LINT_REPORT)

lintclean:

@ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT)

docs: $(DOXYGEN_OUTPUT_DIR)

@ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen

$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES)

$(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE)

$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR)

@ cat $(LINT_OUTPUTS) > $@

@ if [ -s "$@" ]; then \

cat $@; \

mv $@ $(NONEMPTY_LINT_REPORT); \

echo "Found one or more lint errors."; \

exit 1; \

  fi; \

  $(RM) $(NONEMPTY_LINT_REPORT); \

  echo "No lint errors!";

$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR)

@ mkdir -p $(dir $@)

@ python $(LINT_SCRIPT) $< 2>&1 \

| grep -v "^Done processing " \

| grep -v "^Total errors found: 0" \

> $@ \

|| true

test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS)

tools: $(TOOL_BINS) $(TOOL_BIN_LINKS)

examples: $(EXAMPLE_BINS)

py$(PROJECT): py

py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)

$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)

@ echo CXX/LD -o $@ $<

$(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \

-o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(PYTHON_LDFLAGS) \

-Wl,-rpath,$(ORIGIN)/../../build/lib

mat$(PROJECT): mat

mat: $(MAT$(PROJECT)_SO)

$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)

@ if [ -z "$(MATLAB_DIR)" ]; then \

echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \

"to build mat$(PROJECT)."; \

exit 1; \

fi

@ echo MEX $<

$(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \

CXX="$(CXX)" \

CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \

CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@

@ if [ -f "$(PROJECT)_.d" ]; then \

mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \

fi

runtest: $(TEST_ALL_BIN)

$(TOOL_BUILD_DIR)/caffe

$(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER)

pytest: py

cd python; python -m unittest discover -s caffe/test

mattest: mat

cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()'

warn: $(EMPTY_WARN_REPORT)

$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR)

@ cat $(ALL_WARNS) > $@

@ if [ -s "$@" ]; then \

cat $@; \

mv $@ $(NONEMPTY_WARN_REPORT); \

echo "Compiler produced one or more warnings."; \

exit 1; \

  fi; \

  $(RM) $(NONEMPTY_WARN_REPORT); \

  echo "No compiler warnings!";

$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o

$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked

# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link

# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it

# exists and $(DEBUG) is toggled later.

$(BUILD_DIR)/.linked:

@ mkdir -p $(BUILD_DIR)

@ $(RM) $(OTHER_BUILD_DIR)/.linked

@ $(RM) -r $(BUILD_DIR_LINK)

@ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK)

@ touch $@

$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK)

@ mkdir -p $@

$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)

@ echo LD -o $@

$(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(LINKFLAGS) $(LDFLAGS)

@ cd $(BUILD_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT);  ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)

$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR)

@ echo AR -o $@

$(Q)ar rcs $@ $(OBJS)

$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS)

@ echo CXX $<

$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \

|| (cat $@.$(WARNS_EXT); exit 1)

@ cat $@.$(WARNS_EXT)

$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \

| $(PROTO_BUILD_DIR)

@ echo CXX $<

$(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \

|| (cat $@.$(WARNS_EXT); exit 1)

@ cat $@.$(WARNS_EXT)

$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS)

@ echo NVCC $<

$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \

-odir $(@D)

$(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \

|| (cat $@.$(WARNS_EXT); exit 1)

@ cat $@.$(WARNS_EXT)

$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \

| $(DYNAMIC_NAME) $(TEST_BIN_DIR)

@ echo CXX/LD -o $@ $<

$(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \

-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib

$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \

$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)

@ echo LD $<

$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \

-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib

$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \

$(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR)

@ echo LD $<

$(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \

-o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib

# Target for extension-less symlinks to tool binaries with extension '*.bin'.

$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR)

@ $(RM) $@

@ ln -s $(notdir $<) $@

$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME)

@ echo CXX/LD -o $@

$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \

-Wl,-rpath,$(ORIGIN)/../lib

$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME)

@ echo CXX/LD -o $@

$(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \

-Wl,-rpath,$(ORIGIN)/../../lib

proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER)

$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \

$(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR)

@ echo PROTOC $<

$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $<

$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \

$(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR)

@ echo PROTOC \(python\) $<

$(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $<

$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR)

touch $(PY_PROTO_INIT)

clean:

@- $(RM) -rf $(ALL_BUILD_DIRS)

@- $(RM) -rf $(OTHER_BUILD_DIR)

@- $(RM) -rf $(BUILD_DIR_LINK)

@- $(RM) -rf $(DISTRIBUTE_DIR)

@- $(RM) $(PY$(PROJECT)_SO)

@- $(RM) $(MAT$(PROJECT)_SO)

supercleanfiles:

$(eval SUPERCLEAN_FILES := $(strip \

$(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \

-not -path './data/*'))))

supercleanlist: supercleanfiles

@ \

if [ -z "$(SUPERCLEAN_FILES)" ]; then \

echo "No generated files found."; \

else \

echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \

fi

superclean: clean supercleanfiles

@ \

if [ -z "$(SUPERCLEAN_FILES)" ]; then \

echo "No generated files found."; \

else \

echo "Deleting the following generated files:"; \

echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \

$(RM) $(SUPERCLEAN_FILES); \

fi

$(DIST_ALIASES): $(DISTRIBUTE_DIR)

$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)

# add proto

cp -r src/caffe/proto $(DISTRIBUTE_DIR)/

# add include

cp -r include $(DISTRIBUTE_DIR)/

mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto

cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto

# add tool and example binaries

cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin

cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin

# add libraries

cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib

install -m 644 $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib

cd $(DISTRIBUTE_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT);  ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)

# add python - it's not the standard way, indeed...

cp -r python $(DISTRIBUTE_DIR)/python

-include $(DEPS)

修改完成后开始编译caffe-ssd,编译命令如下;

make all -j10

make test -j10

make runtest -j10

make pycaffe

如果编译过程中出现CUDNN相关的提醒,如下:

ubuntu配置SSD_第2张图片

表示caffe-ssd中cuda版本不匹配,这个的解决方法就是使用之前编译过的caffe里面的文件来替代掉caffe-ssd里面的文件,具体的为:

使用caffe/include/caffe/util/cudnn.hpp替代掉caffe-ssd/include/caffe/util/cudnn.hpp

使用caffe/include/caffe/layers/下面cudnn_开头的所有文件替代掉caffe-ssd/include/caffe/layers/下面cudnn_开头的所有文件

使用caffe/src/caffe/util/cudnn.cpp替代掉caffe-ssd/src/caffe/util/cudnn.cpp

使用caffe/src/caffe/layers/ 下面cudnn_开头的所有文件替代掉caffe-ssd/src/caffe/layers/ 下面cudnn_开头的所有文件

完成上述步骤之后输入make-clean,然后重新开始编译

如果出现math_functions.h的提醒的话需要做的修改如下:

ubuntu配置SSD_第3张图片

在src/caffe/util/math_functions.cu,做出如下修改:

修改之后继续make clean 然后重新编译

如下图:

ubuntu配置SSD_第4张图片

表示已经编译成功。

将其添加进环境变量:

vim ~/.bashrc

在文档最后面添加:

export PYTHONPATH=/root/caffe-ssd/python:$PYTHONPATH

之后使环境变量生效:source ~/.bashrc

2.下载预训练模型

在models文件夹下新建一个文件夹VGGNet,将下载下来的模型存储在里面。

vgg16网络的基础模型位置:

链接:https://pan.baidu.com/s/14QRHqN90N6tMFLXykqp0Ng

提取码:we2w

这些处理好之后可以准备数据集了,下一步将介绍数据集的问题。

3.数据集

你可以选择使用voc格式数据集进行测试,也可以使用自己制作的voc格式的数据集进行训练,使用LabelImage标注工具标注后,得到自己的数据集。具体如何制作VOC格式数据集请自行搜索,或者我后面有时间的话再更新此博客。

在个人的根目录下面创建data目录,将自己的数据集放在该目录下:

ubuntu配置SSD_第5张图片

JPEGImage里面是图片的jpg文件,Annotations文件里面是标注的xm文件,而ImageSets里面有个Main文件夹,其内部是划分的训练集,测试集文件,在此目录下新建一个文件:calssfy.py。下面将使用这段代码对数据集进行划分:

import os

import random 

trainval_percent = 0.8334 

train_percent = 1 

xmlfilepath = 'Annotations' 

txtsavepath = 'ImageSets\Main' 

total_xml = os.listdir(xmlfilepath) 

num=len(total_xml) 

list=range(num) 

tv=int(num*trainval_percent) 

tr=int(tv*train_percent) 

trainval= random.sample(list,tv) 

train=random.sample(trainval,tr) 

ftrainval = open('ImageSets/Main/trainval.txt', 'w') 

ftest = open('ImageSets/Main/test.txt', 'w') 

ftrain = open('ImageSets/Main/train.txt', 'w') 

fval = open('ImageSets/Main/val.txt', 'w') 

for i  in list: 

    name=total_xml[i][:-4]+'\n' 

    if i in trainval: 

        ftrainval.write(name) 

        if i in train: 

            ftrain.write(name) 

        else: 

            fval.write(name) 

    else: 

        ftest.write(name) 

ftrainval.close() 

ftrain.close() 

fval.close() 

ftest .close() 

这种划分的目标是训练集2500张,测试集500张。最终标准的数据集构造完成,下面将使用这些数据集生成LMDB文件:

1.在caffe-ssd/data文件夹下创建新文件VOC2007,并将VOC0712内的三个文件拷贝到VOC2007内:

ubuntu配置SSD_第6张图片

打开labelmap_voc.prototxt文件并进行修改:

文件中的具体内容根据你的标记来:

ubuntu配置SSD_第7张图片

修改create_list.h:

root_dir=/root/data/

...

for name in VOC2007

...

#if [[ $dataset == "test" && $name == "VOC2012" ]]

# then

#  continue

# fi


ubuntu配置SSD_第8张图片

修改create_data.h

root_dir=/root/caffe-ssd

data_root_dir="/root/data"

dataset_name="VOC2007"

如下:


ubuntu配置SSD_第9张图片

在caffe-ssd/examples文件下创建文件夹VOC2007用来存放LMDB文件。

之后回到caffe-ssd的根目录,输入以下命令:

./data/VOC2007/create_list.sh

./data/VOC2007/create_data.sh

如下图:

ubuntu配置SSD_第10张图片

同时在caffe-ssd/examples/VOC2007文件夹下面

ubuntu配置SSD_第11张图片

下面开始进行训练的准备:

在caffe-ssd/examples/ssd目录下新建文件:ssd_pg.py,将ssd_pascal.py里面的内容拷到ssd_pg.py里面去:

并进行一定的修改:

第85行:

# The database file for training data. Created by data/VOC0712/create_data.sh

train_data = "examples/VOC2007/VOC2007_trainval_lmdb"

# The database file for testing data. Created by data/VOC0712/create_data.sh

test_data = "examples/VOC2007/VOC2007_test_lmdb"

第232行:

学习率除以10,如下:

base_lr = 0.000004

第237行开始

model_name = "VGG_VOC2007_{}".format(job_name)

# Directory which stores the model .prototxt file.

save_dir = "models/VGGNet/VOC2007/{}".format(job_name)

# Directory which stores the snapshot of models.

snapshot_dir = "models/VGGNet/VOC2007/{}".format(job_name)

# Directory which stores the job script and log file.

job_dir = "jobs/VGGNet/VOC2007/{}".format(job_name)

# Directory which stores the detection results.

output_result_dir = "{}/data/VOC2007/results/VOC2007/{}/Main".format(os.environ['HOME'], job_name)

第264行开始:

# Stores the test image names and sizes. Created by data/VOC0712/create_list.sh

name_size_file = "data/VOC2007/test_name_size.txt"

# The pretrained model. We use the Fully convolutional reduced (atrous) VGGNet.

pretrain_model = "models/VGGNet/VGG_ILSVRC_16_layers_fc_reduced.caffemodel"

# Stores LabelMapItem.

label_map_file = "data/VOC2007/labelmap_voc.prototxt"

第266行:

num_classes = 6

第332行:

gpus = "0"

第337行:

batch_size = 8

accum_batch_size = 8

第359行:

num_test_image = 500

test_batch_size = 8

第366行:

# Train parameters

    'base_lr': base_lr,

    'weight_decay': 0.0005,

    'lr_policy': "multistep",

    'stepvalue': [80000, 100000, 120000],

    'gamma': 0.1,

    'momentum': 0.9,

    'iter_size': iter_size,

    'max_iter': 60000,

    'snapshot':5000,

    'display': 10,

    'average_loss': 10,

    'type': "SGD",

    'solver_mode': solver_mode,

    'device_id': device_id,

    'debug_info': False,

    'snapshot_after_train': True,

    # Test parameters

    'test_iter': [test_iter],

    'test_interval': 2500,

    'eval_type': "detection",

      'show_per_class_result': True,

    'ap_version': "11point",

    'test_initialization': False,

完成以上修改后可以开始训练了:

输入命令开始训练

python ./examples/ssd/ssd_pg.py

训练过程:

ubuntu配置SSD_第12张图片

你可能感兴趣的:(ubuntu配置SSD)