视频源—— 视频流处理(RTSP/RAW、DECODE)—— 预处理大小、分辨率等(IMAGE PROCESSING)—— 批次处理即几张图片组合到一起进行处理(BATCHING)—— 推理任务(DNN(s))—— 进行更多操作如追踪任务(TRACKING)—— 显示合成(VIZ)—— 输出(DISPLAY/STORAGE/CLOUD)
Transfer Learning Toolkit是一个基于python的工具包,它使开发人员能够利用NVIDIA预先训练的模型,并为开发人员提供一系列的工具,使流行的网络架构适应他们自己的数据,并且能够训练、调整、修剪和导出模型以进行部署。它还拥有简单的接口和抽象APl,提高了深度学习训练工作流的效率。
sudo apt-get install python-gi-dev
export GST_LIBS="-lgstreamer-1.0 -lgobject-2.0 -lglib-2.0"
export GST_CFLAGS="-pthread -I/usr/include/gstreamer-1.0 -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include"
git clone https://github.com/GStreamer/gst-python.git
cd gst-python
git checkout 1a8f48a
./autogen.sh PYTHON=python3
./configure PYTHON=python3
make
sudo make install
tar xf ds_pybind_0.5.tbz2 -C /home/nvidia/Downloads/deepstream/sources
包装内容
DeepStream Python包 包括:
1.DeepStream 元数据库的 Python 绑定 这些绑定作为已编译模块提供,可用于 x86_64 和 Jetson 平台。 在以下位置找到它们:
|- x86_64
|- pyds.so
|- jetson
|- pyds.so
2.Python 中的 DeepStream 测试应用程序有四个测试应用程序可用: deepstream-test1/deepstream_test_1.py deepstream-test2/deepstream_test_2.py deepstream-test3/deepstream_test_3.py deepstream-test3/deepstream_test_4.py
运行代码:
注意 - 确保 pyds.so 文件所在的文件夹路径位于 PATH 中。
import sys
sys.path.append('../')
sys.path.append('/home/nvidia/Downloads/deepstream/sources/python/bindings/x86_64')
sys.path.append('/usr/lib/python3/dist-packages')
from common.is_aarch_64 import is_aarch64
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
import pyds
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
def osd_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_ROADSIGN:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
# Standard initialization procedure
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# 创建用于从文件中读取的源元素
print("Creating Source \n ")
source = Gst.ElementFactory.make("filesrc", "file-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
# 创建一个 h264 解析器作为输入文件是一个基本的 h264 流
print("Creating H264Parser \n")
h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
if not h264parser:
sys.stderr.write(" Unable to create h264 parser \n")
# 在 GPU 上使用 nvdec_h264 进行加速解码
print("Creating Decoder \n")
decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
if not decoder:
sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")
# 创建一个 nvstreammux 实例以形成一个或多个源的批处理
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# 设置 nvinfer 以在解码器输出上运行推理(注意:推理的行为是通过配置文件设置的)
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# 根据 nvosd 的要求,使用转换器从 NV12 转换为 RGBA
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# 创建 OSD 以在转换后的 RGBA 缓冲区上绘图
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Jetson 设备所需的检查
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
建议阅读:dstest1_pgie_config.txt
# Edit this cell to change the inputs and the stream config
print("Playing file /home/nvidia/Downloads/deepstream/sources/python/apps/deepstream-test1/sample_720p.h264 ")
source.set_property('location', \
"/home/nvidia/Downloads/deepstream/sources/python/apps/deepstream-test1/sample_720p.h264")
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', \
"/home/nvidia/Downloads/deepstream/sources/python/apps/deepstream-test1/dstest1_pgie_config.txt")
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(h264parser)
pipeline.add(decoder)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# file-source -> h264-parser -> nvh264-decoder ->
# nvinfer -> nvvidconv -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(h264parser)
h264parser.link(decoder)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = decoder.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of decoder \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# we add probe to the sink pad of the osd element, since by that time,
# the buffer would have had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleaning up as the pipeline comes to an end
pipeline.set_state(Gst.State.NULL)
0:每帧推理
1:每隔一帧推断一次(例如,每隔一帧检测一次对象,使用跟踪器在中间进行定位)
2:每三帧推理一次
转到下面提到的配置文件
“/home/nvidia/Downloads/deepstream/sources/python/apps/deepstream-test1/dstest1_pgie_config.txt”
并根据需要更改间隔。
from IPython.display import Image
Image(filename='ds_test1.png')
源代码:我是一个Github链接
欢迎各位投稿互相支持
“我的NVIDIA开发者之旅” | 征文活动进行中…