工程:https://pan.baidu.com/s/1P_p46ahzDcnyBmE6Pn0cGw 提取码:nc5l
依赖包:https://pan.baidu.com/s/1kMPzXAU2a5YIJptegp1x0g 提取码:3ue8
1.环境搭建
解压工程,得到deepstream-plugins文件夹,将依赖包DeepStream2.0解压到deepstream-plugins文件夹
安装gstreamer
sudo apt-get install gstreamer1.0-plugins-base gstreamer1.0-plugins-bad gstreamer1.0-libav gstreamer1.0-plugins-bad-videoparsers gstreamer1.0-plugins-good gstreamer1.0-plugins-ugly libgstreamer1.0-0 libgstreamer1.0-dev python3-gst-1.0
实现的配置:TensorRT-5.0.2.6、cuda9.0、opencv-4.0.0-alpha(必须带dnn模块)
修改根目录的Makefile.config,其中opencv在/usr/local下
# MIT License
# Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#Update the install directory paths for dependencies below
CXX=g++
CUDA_VER:=9.0
#Set to TEGRA for jetson or TESLA for dGPU's
PLATFORM:=TESLA
#For Tesla Plugins
OPENCV_INSTALL_DIR:= /usr/local
TENSORRT_INSTALL_DIR:= /usr/local/TensorRT-5.0.2.6
DEEPSTREAM_INSTALL_DIR:= /deepstream-plugins/DeepStream_Release
#For Tegra Plugins
NVGSTIVA_APP_INSTALL_DIR:= /path/to/nvgstiva-app_sources
修改./sources/lib/Makefile
# MIT License
# Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
CONFIG :=../../Makefile.config
ifeq ($(wildcard $(CONFIG)),)
$(error $(CONFIG) missing.)
endif
include $(CONFIG)
ifeq ($(CUDA_VER),)
$(error "CUDA_VER variable is not set in Makefile.config")
endif
ifeq ($(PLATFORM),)
$(error PLATFORM variable is not set in Makefile.config)
endif
CUCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
SRCS:= $(wildcard *.cpp)
KERNELS:= $(wildcard *.cu)
BUILD_PATH:= ./build/
OBJS= $(patsubst %, $(BUILD_PATH)%, $(SRCS:.cpp=.o))
OBJS+=$(patsubst %, $(BUILD_PATH)%, $(KERNELS:.cu=.o))
DEPS:= $(SRCS)
DEPS+= $(KERNELS)
DEPS+= $(wildcard *.h)
TARGET:= libyoloplugin.a
ifeq ($(PLATFORM), TESLA)
INCS:= -I"$(TENSORRT_INSTALL_DIR)/include" \
-I"/usr/local/cuda-$(CUDA_VER)/include" \
-I "$(OPENCV_INSTALL_DIR)/include/opencv4"
LIBS:= -L"$(TENSORRT_INSTALL_DIR)/lib" -lnvinfer -lnvinfer_plugin -Wl,-rpath="$(TENSORRT_INSTALL_DIR)/lib" \
-L"/usr/local/cuda-$(CUDA_VER)/lib64" -lcudart -lcublas -lcurand -Wl,-rpath="/usr/local/cuda-$(CUDA_VER)/lib64" \
-L "$(OPENCV_INSTALL_DIR)/lib" -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_highgui -lopencv_dnn -Wl,-rpath="$(OPENCV_INSTALL_DIR)/lib"
endif
ifeq ($(PLATFORM), TEGRA)
INCS:= -I"usr/include/aarch64-linux-gnu" \
-I"/usr/local/cuda-$(CUDA_VER)/include" \
-I "/usr/include"
LIBS:= -L "/usr/lib/aarch64-linux-gnu" -lnvinfer -lnvinfer_plugin -Wl,-rpath="/usr/lib/aarch64-linux-gnu" \
-L "/usr/local/cuda-$(CUDA_VER)/lib64" -lcudart -lcublas -lcurand -Wl,-rpath="/usr/local/cuda-$(CUDA_VER)/lib64" \
-L "/usr/lib" -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_highgui -lopencv_dnn -Wl,-rpath="/usr/lib"
endif
CXXFLAGS:= -O2 -std=c++11 -lstdc++fs -ldl -fPIC -Wall -Wunused-function -Wunused-variable -Wfatal-errors $(shell pkg-config --cflags glib-2.0)
.PHONY: all dirs clean deps
all: dirs deps
ar rcs $(TARGET) $(OBJS)
dirs:
if [ ! -d "models" ]; then mkdir -p models; fi
if [ ! -d "calibration" ]; then mkdir -p calibration; fi
if [ ! -d "build" ]; then mkdir -p build; fi
if [ ! -d "../../../data/detections" ]; then mkdir -p ./../../data/detections; fi
deps: $(DEPS) $(OBJS)
$(BUILD_PATH)%.o: %.cpp %.h network_config.h network_config.cpp
$(CXX) $(INCS) -c -o $@ $(CXXFLAGS) $<
$(BUILD_PATH)%.o: %.cu
$(CUCC) -c -o $@ -arch=compute_50 --shared -Xcompiler -fPIC $<
clean:
rm -f ./build/*
rm -f ./*.a
clean_models:
rm -rf ./models/*
clean_detections:
rm -rf ../../data/detections/*
修改./sources/apps/trt-yolo/Makefile
# MIT License
# Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
CONFIG :=../../Makefile.config
ifeq ($(wildcard $(CONFIG)),)
$(error $(CONFIG) missing.)
endif
include $(CONFIG)
ifeq ($(CUDA_VER),)
$(error "CUDA_VER variable is not set in Makefile.config")
endif
ifeq ($(PLATFORM),)
$(error PLATFORM variable is not set in Makefile.config)
endif
CUCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
SRCS:= $(wildcard *.cpp)
KERNELS:= $(wildcard *.cu)
BUILD_PATH:= ./build/
OBJS= $(patsubst %, $(BUILD_PATH)%, $(SRCS:.cpp=.o))
OBJS+=$(patsubst %, $(BUILD_PATH)%, $(KERNELS:.cu=.o))
DEPS:= $(SRCS)
DEPS+= $(KERNELS)
DEPS+= $(wildcard *.h)
TARGET:= libyoloplugin.a
ifeq ($(PLATFORM), TESLA)
INCS:= -I"$(TENSORRT_INSTALL_DIR)/include" \
-I"/usr/local/cuda-$(CUDA_VER)/include" \
-I "$(OPENCV_INSTALL_DIR)/include/opencv4"
LIBS:= -L"$(TENSORRT_INSTALL_DIR)/lib" -lnvinfer -lnvinfer_plugin -Wl,-rpath="$(TENSORRT_INSTALL_DIR)/lib" \
-L"/usr/local/cuda-$(CUDA_VER)/lib64" -lcudart -lcublas -lcurand -Wl,-rpath="/usr/local/cuda-$(CUDA_VER)/lib64" \
-L "$(OPENCV_INSTALL_DIR)/lib" -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_highgui -lopencv_dnn -Wl,-rpath="$(OPENCV_INSTALL_DIR)/lib"
endif
ifeq ($(PLATFORM), TEGRA)
INCS:= -I"usr/include/aarch64-linux-gnu" \
-I"/usr/local/cuda-$(CUDA_VER)/include" \
-I "/usr/include"
LIBS:= -L "/usr/lib/aarch64-linux-gnu" -lnvinfer -lnvinfer_plugin -Wl,-rpath="/usr/lib/aarch64-linux-gnu" \
-L "/usr/local/cuda-$(CUDA_VER)/lib64" -lcudart -lcublas -lcurand -Wl,-rpath="/usr/local/cuda-$(CUDA_VER)/lib64" \
-L "/usr/lib" -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_highgui -lopencv_dnn -Wl,-rpath="/usr/lib"
endif
CXXFLAGS:= -O2 -std=c++11 -lstdc++fs -ldl -fPIC -Wall -Wunused-function -Wunused-variable -Wfatal-errors $(shell pkg-config --cflags glib-2.0)
.PHONY: all dirs clean deps
all: dirs deps
ar rcs $(TARGET) $(OBJS)
dirs:
if [ ! -d "models" ]; then mkdir -p models; fi
if [ ! -d "calibration" ]; then mkdir -p calibration; fi
if [ ! -d "build" ]; then mkdir -p build; fi
if [ ! -d "../../../data/detections" ]; then mkdir -p ./../../data/detections; fi
deps: $(DEPS) $(OBJS)
$(BUILD_PATH)%.o: %.cpp %.h network_config.h network_config.cpp
$(CXX) $(INCS) -c -o $@ $(CXXFLAGS) $<
$(BUILD_PATH)%.o: %.cu
$(CUCC) -c -o $@ -arch=compute_50 --shared -Xcompiler -fPIC $<
clean:
rm -f ./build/*
rm -f ./*.a
clean_models:
rm -rf ./models/*
clean_detections:
rm -rf ../../data/detections/*
直接在./sources/apps/trt-yolo/下编译,出现错误
trt_utils.h:83:22: error: ‘nvinfer1::DimsHW YoloTinyMaxpoolPaddingFormula::compute(nvinfer1::DimsHW, nvinfer1::DimsHW, nvinfer1::DimsHW, nvinfer1::DimsHW, nvinfer1::DimsHW, const char*)’ marked ‘override’, but does not override
nvinfer1::DimsHW compute(nvinfer1::DimsHW inputDims, nvinfer1::DimsHW kernelSize,
^~~~~~~
compilation terminated due to -Wfatal-errors.
在NVIDIA Developer Forums咨询了下nvidia的工程师,需要修改下./sources/lib/trt_utils.h
class YoloTinyMaxpoolPaddingFormula : public nvinfer1::IOutputDimensionsFormula
{
private:
std::set m_SamePaddingLayers;
nvinfer1::DimsHW compute(nvinfer1::DimsHW inputDims, nvinfer1::DimsHW kernelSize,
nvinfer1::DimsHW stride, nvinfer1::DimsHW padding,
nvinfer1::DimsHW dilation, const char* layerName) const override
{
assert(inputDims.d[0] == inputDims.d[1]);
assert(kernelSize.d[0] == kernelSize.d[1]);
assert(stride.d[0] == stride.d[1]);
assert(padding.d[0] == padding.d[1]);
int outputDim;
// Only layer maxpool_12 makes use of same padding
if (m_SamePaddingLayers.find(layerName) != m_SamePaddingLayers.end())
{
outputDim = (inputDims.d[0] + 2 * padding.d[0]) / stride.d[0];
}
// Valid Padding
else
{
outputDim = (inputDims.d[0] - kernelSize.d[0]) / stride.d[0] + 1;
}
return nvinfer1::DimsHW{outputDim, outputDim};
}
public:
void addSamePaddingLayer(std::string input) { m_SamePaddingLayers.insert(input); }
};
修改./sources/lib/ds_image.cpp 将CV_LOAD_IMAGE_COLOR修改为1
编译通过。
2.网络配置
以yolov3-voc.cfg为例,修改./sources/lib/network_config.h 宏定义MODEL_V3
修改./sources/lib/network_config.cpp对应的参数
其中kPRECISION取kFLOAT、kINT8、kHALF分别为fp32、int8、fp16精度
在根目录下对应配置网络、模型、校准图片、测试图片。
在./sources/apps/trt-yolo/下重新编译得到trt-yolo-app
在根目录执行
./sources/apps/trt-yolo/trt-yolo-app
3.推理速度比较
对比一下推理时间,在1080Ti下(不支持fp16),以darknet53为主干网络的权重,fp32推理时间9.4ms,int8 batch1 推理时间5.9ms,对比一下在darknet框架下fp32推理时间20.1ms,有较大提升。
原工程:https://github.com/NVIDIA-AI-IOT/deepstream_reference_apps
该工程还在不断更新,目前增加SENet的tensorrt模型。有兴趣可以尝试。
本文使用的工程主要是在2018年11月左右下载,因此给出压缩包形式。与目前的框架已有较大不同,主要是作者将网络配置这块单独提炼,更方便使用,建议使用新框架。
参考:https://blog.csdn.net/cgt19910923/article/details/88847228