在使用python多线程调用Intel Realsense D435多个摄像头时,发现pyrealsense的例如pipeline.start()、context.query_devices()函数会占用单个线程的较多资源,而python的多个线程都是在同一进程内的,所以一个线程占用资源较多,可能就会影响到其他线程,导致其他线程卡住
解决办法
相关问题:Tensorflow_yolov3 Intel Realsense D435奇怪的现象,多摄像头连接时一旦能检测到深度马上就会卡(卡住)
# -*- coding: utf-8 -*-
"""
@File : 20200408_避障代码落地优化_防卡顿.py.py
@Time : 2020/4/8 13:54
@Author : Dontla
@Email : [email protected]
@Software: PyCharm
"""
# todo python多线程其中一个摄像头掉线后频繁报错占用资源貌似会占用大量资源,从而影响到另外一个线程。。。会使另一个线程变卡,
# 看不是不是可以用多进程方式,反正也不用数据共享,直接所有动作在一个进程里完成就好了
# 实现功能:障碍物检测、发送告警信号
# 功能细节:障碍物检测到超过x帧才告警,不是检测出就马上告警,防止误检测
import socket
import struct
import threading
import time
import traceback
import numpy as np
import pyrealsense2 as rs
import cv2
import sys
from numba import jit
# 参数配置
cam_serials = ['838212073806', '836612070984']
cam_num = len(cam_serials)
ctx = rs.context()
cam_width, cam_height = 848, 480 # 【传输分辨率】
threshold_dangerous_distance = 3000 # 【危险距离:单位mm】
distance_cam_vertical_to_cotton_top = 260 # 【摄像头到棉花平面垂直距离(单位mm)】
factor_compensation_dangerous_distance = 1.5 # 【危险距离补偿系数】用于让最下面深度远离临界值,避免造成误检测
threshold_dangerous_scale = 0.05 # 【危险距离像素占比】
FOV_width, FOV_height = 69.4, 42.5 # 【摄像头视场角(单位°)】
# 【实际变换后height视场角】
if cam_height / cam_width < FOV_height / FOV_width:
FOV_height_actual = FOV_width * cam_height / cam_width
# print(FOV_height_actual) # 39.283018867924525
else:
FOV_height_actual = FOV_height
# 【计算过滤α值(distance_min为图像最下方的深度,看到最近棉花的距离)】
# 当摄像头到棉花顶垂直距离为800,最小距离为2256,当危险距离为2000时,alpha滤值为0.88
# 当摄像头到棉花顶垂直距离为800,最小距离为2256,当危险距离为3000时,alpha滤值为1.32
# 所以,后面进行滤值时需判断self.filter_alpha的值是否大于1(已添加进filter_alpha()函数中)
distance_min = distance_cam_vertical_to_cotton_top / (
np.tan(FOV_height_actual / 2 * np.pi / 180))
filter_alpha = threshold_dangerous_distance / distance_min * factor_compensation_dangerous_distance
# 【UDP信号发送模块】
# 远程主机ip地址及端口
ip_port = ('192.168.1.49', 9000)
udp_server_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# self.bytes_udp_pack = 1024
bytes_udp_pack = 65507
# 发动告警信号给主机
def udp_send_image(img, pack_size, socket, ip_port):
_, img_encode = cv2.imencode('.jpg', img)
data = img_encode.tobytes()
# print(len(data)) # 有很多超过65535的
# 【定义文件头、数据】(打包名为l?不是,l表示长整型,占四个字节)
fhead = struct.pack('i', len(data))
# 【发送文件头、数据】
socket.sendto(fhead, ip_port)
# 每次发送x字节,计算所需发送次数
send_times = len(data) // pack_size + 1
for count in range(send_times):
# time.sleep(0.01)
if count < send_times - 1:
socket.sendto(
data[pack_size * count:pack_size * (count + 1)], ip_port)
else:
socket.sendto(data[pack_size * count:], ip_port)
# alpha映射,用于将背景部分推出到障碍检测范围外
# @jit
# 貌似开不了jit,不知啥原因,开了也没明显看到加速
def alpha_map(depth_image, filter_alpha):
if filter_alpha > 1:
# 获取depth_image宽高
h, w = depth_image.shape[0], depth_image.shape[1] # 360,640
# 创建上下alpha(不同方法都能创建)
# filter_upper = np.array([1] * int(h / 2))
filter_upper = np.full(int(h / 2), 1)
filter_lower = np.linspace(1, filter_alpha, h / 2)
# 将filter_upper和filter_lower连在一起
filter = np.r_[filter_upper, filter_lower]
# print(filter)
# print(filter.shape) # (360,)
# print(filter_alpha_upper)
# print(filter_alpha_upper.shape) # (180,)
# print(filter_alpha_lower)
# print(filter_alpha_lower.shape) # (180,)
return (depth_image.T * filter).T
else:
return depth_image
# 如果要防止下面棉花过近被误探测,可用两层for循环设置梯度过滤
# 不过貌似还得中间对半分,下面直接舍弃掉,只用上面作为判断,因为就算下面用了梯度...(还是得用梯度...)
@jit
def traversing_pixels(depth_image, threshold_dangerous_distance):
num_dangerous = 0
num_all_pixels = 0
depth_image_ravel = depth_image.ravel()
# depth_image_segmentation为分割后的图像(红蓝两色)
depth_image_segmentation_ravel = []
for pixel in depth_image_ravel:
num_all_pixels += 1
# 第一种效果要好一些(pixel==0意味着黑洞)
if pixel < threshold_dangerous_distance and pixel != 0:
# if pixel < threshold_dangerous_distance:
num_dangerous += 1
# 6000蓝色?0红色?
depth_image_segmentation_ravel.append(6000)
else:
depth_image_segmentation_ravel.append(0)
depth_image_segmentation = np.array(depth_image_segmentation_ravel).reshape(depth_image.shape)
return num_all_pixels, num_dangerous, depth_image_segmentation
# 【类】单个摄像头帧传输线程
class CamThread(threading.Thread):
def __init__(self, cam_serial):
threading.Thread.__init__(self)
self.cam_serial = cam_serial
# 【类函数】
def run(self):
while True:
try:
print('摄像头{}线程启动:'.format(self.cam_serial))
# 配置摄像头并启动流
# self.cam_cfg(self.cam_serial) # 放函数里就不行了不知为什么?(因为那是局部变量啊傻子,只能在函数内使用)
locals()['pipeline' + self.cam_serial] = rs.pipeline(ctx)
locals()['config' + self.cam_serial] = rs.config()
locals()['config' + self.cam_serial].enable_device(self.cam_serial)
locals()['config' + self.cam_serial].enable_stream(rs.stream.depth, cam_width, cam_height,
rs.format.z16, 30)
locals()['config' + self.cam_serial].enable_stream(rs.stream.color, cam_width, cam_height,
rs.format.bgr8, 30)
locals()['pipeline' + self.cam_serial].start(locals()['config' + self.cam_serial])
locals()['align' + self.cam_serial] = rs.align(rs.stream.color)
# 从内存循环读取摄像头传输帧
while True:
# globals()['a'] += 1
# print(globals()['a'])
locals()['frames' + self.cam_serial] = locals()['pipeline' + self.cam_serial].wait_for_frames()
locals()['aligned_frames' + self.cam_serial] = locals()['align' + self.cam_serial].process(
locals()['frames' + self.cam_serial])
locals()['aligned_depth_frame' + self.cam_serial] = locals()[
'aligned_frames' + self.cam_serial].get_depth_frame()
# locals()['color_frame' + self.cam_serial] = locals()[
# 'aligned_frames' + self.cam_serial].get_color_frame()
# locals()['color_profile' + self.cam_serial] = locals()[
# 'color_frame' + self.cam_serial].get_profile()
# locals()['cvsprofile' + self.cam_serial] = rs.video_stream_profile(
# locals()['color_profile' + self.cam_serial])
# locals()['color_intrin' + self.cam_serial] = locals()[
# 'cvsprofile' + self.cam_serial].get_intrinsics()
# locals()['color_intrin_part' + self.cam_serial] = [locals()['color_intrin' + self.cam_serial].ppx,
# locals()['color_intrin' + self.cam_serial].ppy,
# locals()['color_intrin' + self.cam_serial].fx,
# locals()['color_intrin' + self.cam_serial].fy]
globals()['depth_image_raw' + self.cam_serial] = np.asanyarray(
locals()['aligned_depth_frame' + self.cam_serial].get_data())
# locals()['color_image' + self.cam_serial] = np.asanyarray(
# locals()['color_frame' + self.cam_serial].get_data())
except Exception:
traceback.print_exc()
# Dontla 20200326 下面这句主要担心摄像头掉线后,重新配置直到pipeline.start()时,摄像头还未连上,然后又重新执行下面这句就会报管道无法在启动之前关闭的错误,所以加个try
try:
locals()['pipeline' + self.cam_serial].stop()
except Exception:
traceback.print_exc()
pass
print('摄像头{}线程{}掉线重连:'.format(self.cam_serial, self.name))
# 20200408 Dontla 要检查摄像头是否连上再去执行pipeline.start(),不然会卡住的
count_try_times = 0
break1 = False
while True:
time.sleep(0.5)
try:
count_try_times += 1
print(count_try_times)
for dev in ctx.query_devices():
if self.cam_serial == dev.get_info(rs.camera_info.serial_number):
break1 = True
break
except Exception:
pass
finally:
if break1:
break
# 【类】帧处理与显示
class ImgProcess(threading.Thread):
def __init__(self, cam_serial):
threading.Thread.__init__(self)
self.cam_serial = cam_serial
# 【类函数】
# todo Dontla 找出到底是哪儿卡住占用资源了
def run(self):
while True:
try:
if 'depth_image_raw{}'.format(self.cam_serial) not in globals():
continue
# 【阿尔法过滤】
locals()['depth_image_alpha_filter{}'.format(self.cam_serial)] = alpha_map(
globals()['depth_image_raw{}'.format(self.cam_serial)], filter_alpha)
# 【遍历深度图像素值,如存在小于危险值范围比例超过阈值,则告警】
locals()['num_all_pixels{}'.format(self.cam_serial)], locals()[
'num_dangerous{}'.format(self.cam_serial)], locals()[
'depth_image_segmentation{}'.format(self.cam_serial)] = traversing_pixels(
locals()['depth_image_alpha_filter{}'.format(self.cam_serial)], threshold_dangerous_distance)
print('num_all_pixels:{}'.format(locals()['num_all_pixels{}'.format(self.cam_serial)]))
print('num_dangerous:{}'.format(locals()['num_dangerous{}'.format(self.cam_serial)]))
locals()['dangerous_scale{}'.format(self.cam_serial)] = \
locals()['num_dangerous{}'.format(self.cam_serial)] / locals()[
'num_all_pixels{}'.format(self.cam_serial)]
print('危险比例:{}'.format(locals()['dangerous_scale{}'.format(self.cam_serial)]))
locals()['depth_colormap{}'.format(self.cam_serial)] = cv2.applyColorMap(
cv2.convertScaleAbs(locals()['depth_image_segmentation{}'.format(self.cam_serial)], alpha=0.0425),
cv2.COLORMAP_JET)
# 注意: 窗口名不要用中文字符, 小心乱码
cv2.imshow('win{}'.format(self.cam_serial), locals()['depth_colormap{}'.format(self.cam_serial)])
cv2.waitKey(1)
except Exception:
traceback.print_exc()
pass
# 【函数】摄像头连续验证、连续验证机制
def cam_conti_veri(cam_num, ctx):
# D·C 1911202:创建最大验证次数max_veri_times;创建连续稳定值continuous_stable_value,用于判断设备重置后是否处于稳定状态
max_veri_times = 100
continuous_stable_value = 5
print('\n', end='')
print('开始连续验证,连续验证稳定值:{},最大验证次数:{}:'.format(continuous_stable_value, max_veri_times))
continuous_value = 0
veri_times = 0
while True:
devices = ctx.query_devices()
# for dev in devices:
# print(dev.get_info(rs.camera_info.serial_number), dev.get_info(rs.camera_info.usb_type_descriptor))
connected_cam_num = len(devices)
print('摄像头个数:{}'.format(connected_cam_num))
if connected_cam_num == cam_num:
continuous_value += 1
if continuous_value == continuous_stable_value:
break
else:
continuous_value = 0
veri_times += 1
if veri_times == max_veri_times:
print("检测超时,请检查摄像头连接!")
sys.exit()
# 【函数】循环reset摄像头
def cam_hardware_reset(ctx, cam_serials):
# hardware_reset()后是不是应该延迟一段时间?不延迟就会报错
print('\n', end='')
print('开始初始化摄像头:')
for dev in ctx.query_devices():
# 先将设备的序列号放进一个变量里,免得在下面for循环里访问设备的信息过多(虽然不知道它会不会每次都重新访问)
dev_serial = dev.get_info(rs.camera_info.serial_number)
# 匹配序列号,重置我们需重置的特定摄像头(注意两个for循环顺序,哪个在外哪个在内很重要,不然会导致刚重置的摄像头又被访问导致报错)
for serial in cam_serials:
if serial == dev_serial:
dev.hardware_reset()
# 像下面这条语句居然不会报错,不是刚刚才重置了dev吗?莫非区别在于没有通过for循环ctx.query_devices()去访问?
# 是不是刚重置后可以通过ctx.query_devices()去查看有这个设备,但是却没有存储设备地址?如果是这样,
# 也就能够解释为啥能够通过len(ctx.query_devices())函数获取设备数量,但访问序列号等信息就会报错的原因了
print('摄像头{}初始化成功'.format(dev.get_info(rs.camera_info.serial_number)))
# 如果只有一个摄像头,要让它睡够5秒(避免出错,保险起见)
time.sleep(5 / len(cam_serials))
if __name__ == '__main__':
# 连续验证
cam_conti_veri(cam_num, ctx)
# 摄像头重置
cam_hardware_reset(ctx, cam_serials)
# 连续验证
cam_conti_veri(cam_num, ctx)
# 创建新线程
for serial in cam_serials:
locals()['CamThread_{}'.format(serial)] = CamThread(serial)
locals()['ImgProcess_{}'.format(serial)] = ImgProcess(serial)
# 开启新线程
for serial in cam_serials:
locals()['CamThread_{}'.format(serial)].start()
locals()['ImgProcess_{}'.format(serial)].start()
# 阻塞主程序
for serial in cam_serials:
locals()['CamThread_{}'.format(serial)].join()
print("退出主线程")