运行环境参考https://blog.csdn.net/shajiayu1/article/details/102669346
《JETSON-Nano刷机运行deepstream4.0的demo》。我们这里运行yoloV3-tiny的示例。
该示例是deepstream4.0和tensorRT的综合实例。
工程路径deepstream_sdk_v4.0.1_jetson\sources\objectDetector_Yolo
1.首先下载权重文件和配置文件。
执行这个脚本 prebuild.sh。可以把脚本里的不需要的部分注释掉。提高下载速度。
./prebuild.sh
2.编译工程
cd objectDetector_Yolo
export CUDA_VER=10.0
make -C nvdsinfer_custom_impl_Yolo
这时候会在nvdsinfer_custom_impl_Yolo文件夹里生成.so文件
3.运行示例
deepstream-app -c deepstream_app_config_yoloV3_tiny.txt
一、首先是sources\objectDetector_Yolo文件夹里配置文件、权重文件和相关的库文件。
deepstream_app_config_yoloV3_tiny.txt deepstream配置文件
config_infer_primary_yoloV3_tiny.txt GStreamer等的配置文件
nvdsinfer_yolo_engine.cpp 根据网络类型创建引擎
nvdsparsebbox_Yolo.cpp yolo目标检测结果的输出
yoloPlugins.h
yoloPlugins.cpp 模型搭建的一些组件以及相应的实现
kernels.cu cuda核最底层的实现
trt_utils.h
trt_utils.cpp 建立tensorRT网络的部分,已经支持的部分
yolo.h
yolo.cpp 创建引擎、创建网络等的具体实现
二、另外一个文件夹
sources\apps\sample_apps\deepstream-app
deepstream_app.c pipeline的一些操作在这里
deepstream_app.h
deepstream_app_config_parser.c 配置文件的解析
deepstream_app_main.c deepstream主函数
该程序分为两个部分:
一部分是tensorRT部分 就是sources\objectDetector_Yolo文件夹里的,编译后会生产动态库文件。也就是libnvdsinfer_custom_impl_Yolo.so。另一部分是deepstream的文件,在sources\apps\sample_apps\deepstream-app文件夹里。
首先说明下tensorRT几个文件的关系:
nvdsinfer_yolo_engine.cpp 根据网络类型创建引擎
nvdsparsebbox_Yolo.cpp yolo目标检测结果的输出
yoloPlugins.cpp 模型搭建的一些组件以及相应的实现
kernels.cu cuda核最底层的实现
trt_utils.cpp 建立tensorRT网络的部分,已经支持的部分
yolo.cpp 创建引擎、创建网络等的具体实现
其中yolo.cpp实现了网络层的搭建。是核心代码部分。贴一段该文件的代码,该代码实现了卷积层的搭建:
else if (m_configBlocks.at(i).at("type") == "convolutional") //如果是卷积层
{
std::string inputVol = dimsToString(previous->getDimensions());//获取tensor的大小
nvinfer1::ILayer* out;
std::string layerType;//保存网络类型
// check if batch_norm enabled
if (m_configBlocks.at(i).find("batch_normalize") != m_configBlocks.at(i).end())
{
out = netAddConvBNLeaky(i, m_configBlocks.at(i), weights, trtWeights, weightPtr,//有BN的卷积层
channels, previous, network);
layerType = "conv-bn-leaky";
}
else
{
out = netAddConvLinear(i, m_configBlocks.at(i), weights, trtWeights, weightPtr, //没有BN的卷积层
channels, previous, network);
layerType = "conv-linear";
}
previous = out->getOutput(0);//获取该层的输出
assert(previous != nullptr);
channels = getNumChannels(previous);
std::string outputVol = dimsToString(previous->getDimensions());//获取tensor的大小
tensorOutputs.push_back(out->getOutput(0));//保存相应的层输出
printLayerInfo(layerIndex, layerType, inputVol, outputVol, std::to_string(weightPtr));//打印参数
}
下面说明一下deepstream部分代码:
deepstream_app.c pipeline的一些操作在这里
deepstream_app_config_parser.c 配置文件的解析
deepstream_app_main.c deepstream主函数
deepstream会调用动态库文件libnvdsinfer_custom_impl_Yolo.so实现网络层的搭建等功能。
=================================================
deepstream_app_main.c 主要完成的功能是
1.配置文件的解析。deepstream_app_config_parser.c文件完成
2.pipeline的建立。 deepstream_app.c 文件完成
3.图像的显示。
首先看下deepstream_app_main.c
int
main (int argc, char *argv[])
{
GOptionContext *ctx = NULL;
GOptionGroup *group = NULL;
GError *error = NULL;
guint i;
ctx = g_option_context_new ("Nvidia DeepStream Demo");
group = g_option_group_new ("abc", NULL, NULL, NULL, NULL);
g_option_group_add_entries (group, entries);//把entries添加到group
g_option_context_set_main_group (ctx, group);//group添加到ctx
g_option_context_add_group (ctx, gst_init_get_option_group ());
GST_DEBUG_CATEGORY_INIT (NVDS_APP, "NVDS_APP", 0, NULL);
// 解析命令行参数,识别已添加到上下文的选项
if (!g_option_context_parse (ctx, &argc, &argv, &error)) {
NVGSTDS_ERR_MSG_V ("%s", error->message);
return -1;
}
if (print_version) {
g_print ("deepstream-app version %d.%d.%d\n",
NVDS_APP_VERSION_MAJOR, NVDS_APP_VERSION_MINOR, NVDS_APP_VERSION_MICRO);
nvds_version_print ();
return 0;
}
if (print_dependencies_version) {
g_print ("deepstream-app version %d.%d.%d\n",
NVDS_APP_VERSION_MAJOR, NVDS_APP_VERSION_MINOR, NVDS_APP_VERSION_MICRO);
nvds_version_print ();
nvds_dependencies_version_print ();
return 0;
}
// cfg_files[0]=deepstream_app_config_yoloV3_tiny.txt
// cfg_files[1]=null
// cfg_files[2]=null
if (cfg_files) {//一般执行这项
num_instances = g_strv_length (cfg_files);//num_instances==1
}
if (input_files) {
num_input_files = g_strv_length (input_files);
}
memset (source_ids, -1, sizeof (source_ids));
if (!cfg_files || num_instances == 0) {
NVGSTDS_ERR_MSG_V ("Specify config file with -c option");
return_value = -1;
goto done;
}
// g_print ("deepstream-app version %d.%d.%d\n",
// NVDS_APP_VERSION_MAJOR, NVDS_APP_VERSION_MINOR, NVDS_APP_VERSION_MICRO);
for (i = 0; i < num_instances; i++) {// num_instances
appCtx[i] = g_malloc0 (sizeof (AppCtx));
appCtx[i]->person_class_id = -1;
appCtx[i]->car_class_id = -1;
appCtx[i]->index = i;
if (show_bbox_text) {
appCtx[i]->show_bbox_text = TRUE;
}
if (input_files && input_files[i]) {
appCtx[i]->config.multi_source_config[0].uri =
g_strdup_printf ("file://%s", input_files[i]);
g_free (input_files[i]);
}
//解析配置文件内容,这是个重点函数
if (!parse_config_file (&appCtx[i]->config, cfg_files[i])) {
NVGSTDS_ERR_MSG_V ("Failed to parse config file '%s'", cfg_files[i]);
appCtx[i]->return_value = -1;
goto done;
}
}
//针对配置进行创建pipeline,这里是关键部分deepstream部分
for (i = 0; i < num_instances; i++) {
if (!create_pipeline (appCtx[i], NULL,
all_bbox_generated, perf_cb, overlay_graphics)) {
NVGSTDS_ERR_MSG_V ("Failed to create pipeline");
return_value = -1;
goto done;
}
}
////////////////////////////////////////////////
main_loop = g_main_loop_new (NULL, FALSE);
_intr_setup ();
g_timeout_add (400, check_for_interrupt, NULL);
g_mutex_init (&disp_lock);
//这部分是显示相关的内容
display = XOpenDisplay (NULL);
for (i = 0; i < num_instances; i++) {
guint j;
if (gst_element_set_state (appCtx[i]->pipeline.pipeline,
GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) {
NVGSTDS_ERR_MSG_V ("Failed to set pipeline to PAUSED");
return_value = -1;
goto done;
}
if (!appCtx[i]->config.tiled_display_config.enable)
continue;
for (j = 0; j < appCtx[i]->config.num_sink_sub_bins; j++) {
XTextProperty xproperty;
gchar *title;
guint width, height;
if (!GST_IS_VIDEO_OVERLAY (appCtx[i]->pipeline.instance_bins[0].
sink_bin.sub_bins[j].sink)) {
continue;
}
if (!display) {
NVGSTDS_ERR_MSG_V ("Could not open X Display");
return_value = -1;
goto done;
}
//显示设置
if (appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.width)
width =
appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.width;
else
width = appCtx[i]->config.tiled_display_config.width;
if (appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.height)
height =
appCtx[i]->config.sink_bin_sub_bin_config[j].render_config.height;
else
height = appCtx[i]->config.tiled_display_config.height;
width = (width) ? width : DEFAULT_X_WINDOW_WIDTH;
height = (height) ? height : DEFAULT_X_WINDOW_HEIGHT;
windows[i] =
XCreateSimpleWindow (display, RootWindow (display,
DefaultScreen (display)), 0, 0, width, height, 2, 0x00000000,
0x00000000);
if (num_instances > 1)
title = g_strdup_printf (title, APP_TITLE "-%d", i);
else
title = g_strdup (APP_TITLE);
if (XStringListToTextProperty ((char **) &title, 1, &xproperty) != 0) {
XSetWMName (display, windows[i], &xproperty);
XFree (xproperty.value);
}
XSetWindowAttributes attr = { 0 };
if ((appCtx[i]->config.tiled_display_config.enable &&
appCtx[i]->config.tiled_display_config.rows *
appCtx[i]->config.tiled_display_config.columns == 1) ||
(appCtx[i]->config.tiled_display_config.enable == 0 &&
appCtx[i]->config.num_source_sub_bins == 1)) {
attr.event_mask = KeyPress;
} else {
attr.event_mask = ButtonPress | KeyRelease;
}
XChangeWindowAttributes (display, windows[i], CWEventMask, &attr);
Atom wmDeleteMessage = XInternAtom (display, "WM_DELETE_WINDOW", False);
if (wmDeleteMessage != None) {
XSetWMProtocols (display, windows[i], &wmDeleteMessage, 1);
}
XMapRaised (display, windows[i]);
XSync (display, 1); //discard the events for now
gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (appCtx
[i]->pipeline.instance_bins[0].sink_bin.sub_bins[j].sink),
(gulong) windows[i]);
gst_video_overlay_expose (GST_VIDEO_OVERLAY (appCtx[i]->
pipeline.instance_bins[0].sink_bin.sub_bins[j].sink));
if (!x_event_thread)
x_event_thread = g_thread_new ("nvds-window-event-thread",
nvds_x_event_thread, NULL);
}
}
/* Dont try to set playing state if error is observed */
//发生错误的处理
if (return_value != -1) {
for (i = 0; i < num_instances; i++) {
if (gst_element_set_state (appCtx[i]->pipeline.pipeline,
GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
g_print ("\ncan't set pipeline to playing state.\n");
return_value = -1;
goto done;
}
}
}
print_runtime_commands ();
changemode (1);
g_timeout_add (40, event_thread_func, NULL);
g_main_loop_run (main_loop);
changemode (0);
done:
g_print ("Quitting\n");
//下面是释放资源
for (i = 0; i < num_instances; i++) {
if (appCtx[i]->return_value == -1)
return_value = -1;
destroy_pipeline (appCtx[i]);
g_mutex_lock (&disp_lock);
if (windows[i])
XDestroyWindow (display, windows[i]);
windows[i] = 0;
g_mutex_unlock (&disp_lock);
g_free (appCtx[i]);
}
g_mutex_lock (&disp_lock);
if (display)
XCloseDisplay (display);
display = NULL;
g_mutex_unlock (&disp_lock);
g_mutex_clear (&disp_lock);
if (main_loop) {
g_main_loop_unref (main_loop);//执行循环推理
}
if (ctx) {
g_option_context_free (ctx);
}
if (return_value == 0) {
g_print ("App run successful\n");
} else {
g_print ("App run failed\n");
}
gst_deinit ();
return return_value;
}
然后是解析配置文件的代码:
//这个是解析配置文件的,各种类型
gboolean
parse_config_file (NvDsConfig *config, gchar *cfg_file_path)
{
GKeyFile *cfg_file = g_key_file_new ();
GError *error = NULL;
gboolean ret = FALSE;
gchar **groups = NULL;
gchar **group;
guint i, j;
if (!APP_CFG_PARSER_CAT) {
GST_DEBUG_CATEGORY_INIT (APP_CFG_PARSER_CAT, "NVDS_CFG_PARSER", 0, NULL);
}
if (!g_key_file_load_from_file (cfg_file, cfg_file_path, G_KEY_FILE_NONE,
&error)) {
GST_CAT_ERROR (APP_CFG_PARSER_CAT, "Failed to load uri file: %s",
error->message);
goto done;
}
//信息存储在里cfg_file里面
groups = g_key_file_get_groups (cfg_file, NULL);
//这里对应的是8项
//group[0]=application
//group[1]=tiled-display
//group[2]=source0
//group[3]=sink0
//group[4]=osd
//。。。。
for (group = groups; *group; group++) {
gboolean parse_err = FALSE;
GST_CAT_DEBUG (APP_CFG_PARSER_CAT, "Parsing group: %s", *group);
//application
if (!g_strcmp0 (*group, CONFIG_GROUP_APP)) {
parse_err = !parse_app (config, cfg_file, cfg_file_path);
}
//source0
if (!strncmp (*group, CONFIG_GROUP_SOURCE, sizeof (CONFIG_GROUP_SOURCE) - 1)) {
if (config->num_source_sub_bins == MAX_SOURCE_BINS) {//规定有多少个输入源
NVGSTDS_ERR_MSG_V ("App supports max %d sources", MAX_SOURCE_BINS);
ret = FALSE;
goto done;
}
//对于每个输入源的配置
parse_err = !parse_source (&config->multi_source_config[config->num_source_sub_bins],
cfg_file, *group, cfg_file_path);
if (config->multi_source_config[config->num_source_sub_bins].enable) {
config->num_source_sub_bins++;
}
}
//streammux
if (!g_strcmp0 (*group, CONFIG_GROUP_STREAMMUX)) {
//存储到config->streammux_config中
parse_err = !parse_streammux (&config->streammux_config, cfg_file);
}
//osd
if (!g_strcmp0 (*group, CONFIG_GROUP_OSD)) {
parse_err = !parse_osd (&config->osd_config, cfg_file);
}
//primary_gie
if (!g_strcmp0 (*group, CONFIG_GROUP_PRIMARY_GIE)) {
parse_err =
!parse_gie (&config->primary_gie_config, cfg_file,
CONFIG_GROUP_PRIMARY_GIE, cfg_file_path);
}
if (!g_strcmp0 (*group, CONFIG_GROUP_TRACKER)) {
parse_err = !parse_tracker (&config->tracker_config, cfg_file, cfg_file_path);
}
if (!strncmp (*group, CONFIG_GROUP_SECONDARY_GIE,
sizeof (CONFIG_GROUP_SECONDARY_GIE) - 1)) {
if (config->num_secondary_gie_sub_bins == MAX_SECONDARY_GIE_BINS) {
NVGSTDS_ERR_MSG_V ("App supports max %d secondary GIEs", MAX_SECONDARY_GIE_BINS);
ret = FALSE;
goto done;
}
parse_err =
!parse_gie (&config->secondary_gie_sub_bin_config[config->
num_secondary_gie_sub_bins],
cfg_file, *group, cfg_file_path);
if (config->secondary_gie_sub_bin_config[config->num_secondary_gie_sub_bins].enable){
config->num_secondary_gie_sub_bins++;
}
}
//sink0
if (!strncmp (*group, CONFIG_GROUP_SINK, sizeof (CONFIG_GROUP_SINK) - 1)) {
if (config->num_sink_sub_bins == MAX_SINK_BINS) {
NVGSTDS_ERR_MSG_V ("App supports max %d sinks", MAX_SINK_BINS);
ret = FALSE;
goto done;
}
parse_err =
!parse_sink (&config->
sink_bin_sub_bin_config[config->num_sink_sub_bins], cfg_file, *group,
cfg_file_path);
if (config->
sink_bin_sub_bin_config[config->num_sink_sub_bins].enable){
config->num_sink_sub_bins++;
}
}
//tiled-display
if (!g_strcmp0 (*group, CONFIG_GROUP_TILED_DISPLAY)) {
parse_err = !parse_tiled_display (&config->tiled_display_config, cfg_file);
}
//
if (!g_strcmp0 (*group, CONFIG_GROUP_DSEXAMPLE)) {
parse_err = !parse_dsexample (&config->dsexample_config, cfg_file);
}
//tests
if (!g_strcmp0 (*group, CONFIG_GROUP_TESTS)) {
parse_err = !parse_tests (config, cfg_file);
}
if (parse_err) {
GST_CAT_ERROR (APP_CFG_PARSER_CAT, "Failed to parse '%s' group", *group);
goto done;
}
}
//二级网络处理
for (i = 0; i < config->num_secondary_gie_sub_bins; i++) {
if (config->secondary_gie_sub_bin_config[i].unique_id ==
config->primary_gie_config.unique_id) {
NVGSTDS_ERR_MSG_V ("Non unique gie ids found");
ret = FALSE;
goto done;
}
}
for (i = 0; i < config->num_secondary_gie_sub_bins; i++) {
for (j = i + 1; j < config->num_secondary_gie_sub_bins; j++) {
if (config->secondary_gie_sub_bin_config[i].unique_id ==
config->secondary_gie_sub_bin_config[j].unique_id) {
NVGSTDS_ERR_MSG_V ("Non unique gie id %d found",
config->secondary_gie_sub_bin_config[i].unique_id);
ret = FALSE;
goto done;
}
}
}
//多输入源处理
for (i = 0; i < config->num_source_sub_bins; i++) {
if (config->multi_source_config[i].type == NV_DS_SOURCE_URI_MULTIPLE) {
if (config->multi_source_config[i].num_sources < 1) {
config->multi_source_config[i].num_sources = 1;
}
for (j = 1; j < config->multi_source_config[i].num_sources; j++) {
if (config->num_source_sub_bins == MAX_SOURCE_BINS) {
NVGSTDS_ERR_MSG_V ("App supports max %d sources", MAX_SOURCE_BINS);
ret = FALSE;
goto done;
}
memcpy (&config->multi_source_config[config->num_source_sub_bins],
&config->multi_source_config[i],
sizeof (config->multi_source_config[i]));
config->multi_source_config[config->num_source_sub_bins].type = NV_DS_SOURCE_URI;
config->multi_source_config[config->num_source_sub_bins].uri =
g_strdup_printf (config->multi_source_config[config->num_source_sub_bins].uri, j);
config->num_source_sub_bins++;
}
config->multi_source_config[i].type = NV_DS_SOURCE_URI;
config->multi_source_config[i].uri =
g_strdup_printf (config->multi_source_config[i].uri, 0);
}
}
ret = TRUE;
done:
if (cfg_file) {
g_key_file_free (cfg_file);
}
if (groups) {
g_strfreev (groups);
}
if (error) {
g_error_free (error);
}
if (!ret) {
NVGSTDS_ERR_MSG_V ("%s failed", __func__);
}
return ret;
}
最后是建立pipeline的代码,建立pipeline是属于deepstream的部分,里面包含了tensorRT的调用。配置文件当中的[primary-gie]是对推理引擎的相关配置。修改配置文件可以对模型参数进行设置。