Intel Realsense D435 多摄像头多线程目标识别架构

每一个摄像头单独开一个线程,摄像头配置是否要放进线程内?

每一个摄像头线程内单独处理错误信息

每一个摄像头要存储最新彩色帧和深度帧(用于识别和获取深度信息)

接收打击信号要一个线程,对打击信号进行排序、分析、存储。

识别线程去获取存储的打击信号知道要识别哪个,去调用指定摄像头存储的帧识别,存储识别结果。

发送坐标信息需要一个线程,用于获取识别线程的存储结果进行发送

(因为我们单帧识别时间较长,所以不能使用一直识别的方式,需要采用接收信号然后识别的方式【当然也不排除可以使用预测方式,像商品推荐那样,我预测你哪个机械臂会发来打击信号,然后提前识别然后直接给你发过去】)

20200330 更新

初步完成代码

# -*- coding: utf-8 -*-
"""
@File    : 20200324_多摄像头多线程调度.py
@Time    : 2020/3/24 15:58
@Author  : Dontla
@Email   : [email protected]
@Software: PyCharm
"""

import sys
import threading
import time
import traceback
import cv2
import numpy as np
import pyrealsense2 as rs
import dontla_package.tensorflow_yolov3_algorithm as tya
import core.utils as utils

# Parameters
# cam_serials = ['836612070298', '838212073806', '827312071616']
cam_serials = ['826212070395', '838212072365']
# cam_serials = ['826212070395']

# Needn't modify
cam_num = len(cam_serials)
ctx = rs.context()


# 【类】单个摄像头帧传输线程
class CamThread(threading.Thread):
    def __init__(self, cam_serial):
        threading.Thread.__init__(self)
        self.cam_serial = cam_serial

    # 【类函数】
    def run(self):
        while True:
            try:
                print('摄像头{}线程启动:'.format(self.cam_serial))

                # 配置摄像头并启动流
                # self.cam_cfg(self.cam_serial) # 放函数里就不行了不知为什么?(因为那是局部变量啊傻子,只能在函数内使用)
                locals()['pipeline' + self.cam_serial] = rs.pipeline(ctx)
                locals()['config' + self.cam_serial] = rs.config()
                locals()['config' + self.cam_serial].enable_device(self.cam_serial)
                locals()['config' + self.cam_serial].enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
                locals()['config' + self.cam_serial].enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
                locals()['pipeline' + self.cam_serial].start(locals()['config' + self.cam_serial])
                locals()['align' + self.cam_serial] = rs.align(rs.stream.color)

                # 从内存循环读取摄像头传输帧
                while True:
                    locals()['frames' + self.cam_serial] = locals()['pipeline' + self.cam_serial].wait_for_frames()
                    locals()['aligned_frames' + self.cam_serial] = locals()['align' + self.cam_serial].process(
                        locals()['frames' + self.cam_serial])
                    globals()['aligned_depth_frame' + self.cam_serial] = locals()[
                        'aligned_frames' + self.cam_serial].get_depth_frame()
                    locals()['color_frame' + self.cam_serial] = locals()[
                        'aligned_frames' + self.cam_serial].get_color_frame()
                    locals()['color_profile' + self.cam_serial] = locals()[
                        'color_frame' + self.cam_serial].get_profile()
                    locals()['cvsprofile' + self.cam_serial] = rs.video_stream_profile(
                        locals()['color_profile' + self.cam_serial])
                    locals()['color_intrin' + self.cam_serial] = locals()[
                        'cvsprofile' + self.cam_serial].get_intrinsics()
                    globals()['color_intrin_part' + self.cam_serial] = [locals()['color_intrin' + self.cam_serial].ppx,
                                                                        locals()['color_intrin' + self.cam_serial].ppy,
                                                                        locals()['color_intrin' + self.cam_serial].fx,
                                                                        locals()['color_intrin' + self.cam_serial].fy]
                    globals()['color_image' + self.cam_serial] = np.asanyarray(
                        locals()['color_frame' + self.cam_serial].get_data())
            except Exception:
                # Dontla 20200326 下面这句主要担心摄像头掉线后,重新配置直到pipeline.start()时,摄像头还未连上,然后又重新执行下面这句就会报管道无法在启动之前关闭的错误,所以加个try
                try:
                    locals()['pipeline' + self.cam_serial].stop()
                except Exception:
                    pass
                print('摄像头{}线程{}掉线重连:'.format(self.cam_serial, self.name))


# 【类】帧处理与显示
class ImgProcess(threading.Thread):
    def __init__(self, cam_serial):
        threading.Thread.__init__(self)
        self.cam_serial = cam_serial

    # 【类函数】
    def run(self):
        while True:
            try:
                # 搞了半天我说怎么卡住运行不下去,原来是加锁没try,即使出错也不会释放锁。。。那如果对了不就?是不是不释放锁即使是拥有锁的线程也没法再次获得锁的?
                # TODO(Dontla)如果某个摄像头掉线,它还一直占用锁怎么办?必须要用到队列?现在的情况是所有都会卡住
                # 必须要检测globals()['color_image' + self.cam_serial]是否为空
                if 'color_image{}'.format(self.cam_serial) not in globals():
                    continue
                threadLock.acquire()
                try:
                    locals()['boxes_pr' + self.cam_serial] = YoloTest.predict(
                        globals()['color_image' + self.cam_serial])
                except Exception:
                    threadLock.release()
                    continue

                # locals()['boxes_image' + self.cam_serial] = YoloTest.draw_bbox(
                #     globals()['color_image' + self.cam_serial], locals()['boxes_pr' + self.cam_serial],
                #     globals()['aligned_depth_frame' + self.cam_serial],
                #     globals()['color_intrin_part' + self.cam_serial])

                locals()['boxes_image' + self.cam_serial] = YoloTest.draw_bbox(
                    globals()['color_image' + self.cam_serial], locals()['boxes_pr' + self.cam_serial])
                cv2.imshow('{}'.format(self.cam_serial), locals()['boxes_image' + self.cam_serial])
                cv2.waitKey(1)
            except Exception:
                traceback.print_exc()
                pass


# 【函数】摄像头连续验证、连续验证机制
def cam_conti_veri(cam_num, ctx):
    # D·C 1911202:创建最大验证次数max_veri_times;创建连续稳定值continuous_stable_value,用于判断设备重置后是否处于稳定状态
    max_veri_times = 100
    continuous_stable_value = 5
    print('\n', end='')
    print('开始连续验证,连续验证稳定值:{},最大验证次数:{}:'.format(continuous_stable_value, max_veri_times))
    continuous_value = 0
    veri_times = 0
    while True:
        devices = ctx.query_devices()
        # for dev in devices:
        #     print(dev.get_info(rs.camera_info.serial_number), dev.get_info(rs.camera_info.usb_type_descriptor))
        connected_cam_num = len(devices)
        print('摄像头个数:{}'.format(connected_cam_num))
        if connected_cam_num == cam_num:
            continuous_value += 1
            if continuous_value == continuous_stable_value:
                break
        else:
            continuous_value = 0
        veri_times += 1
        if veri_times == max_veri_times:
            print("检测超时,请检查摄像头连接!")
            sys.exit()


# 【函数】循环reset摄像头
def cam_hardware_reset(ctx, cam_serials):
    # hardware_reset()后是不是应该延迟一段时间?不延迟就会报错
    print('\n', end='')
    print('开始初始化摄像头:')
    for dev in ctx.query_devices():
        # 先将设备的序列号放进一个变量里,免得在下面for循环里访问设备的信息过多(虽然不知道它会不会每次都重新访问)
        dev_serial = dev.get_info(rs.camera_info.serial_number)
        # 匹配序列号,重置我们需重置的特定摄像头(注意两个for循环顺序,哪个在外哪个在内很重要,不然会导致刚重置的摄像头又被访问导致报错)
        for serial in cam_serials:
            if serial == dev_serial:
                dev.hardware_reset()
                # 像下面这条语句居然不会报错,不是刚刚才重置了dev吗?莫非区别在于没有通过for循环ctx.query_devices()去访问?
                # 是不是刚重置后可以通过ctx.query_devices()去查看有这个设备,但是却没有存储设备地址?如果是这样,
                # 也就能够解释为啥能够通过len(ctx.query_devices())函数获取设备数量,但访问序列号等信息就会报错的原因了
                print('摄像头{}初始化成功'.format(dev.get_info(rs.camera_info.serial_number)))
    # 如果只有一个摄像头,要让它睡够5秒(避免出错,保险起见)
    time.sleep(5 / len(cam_serials))


if __name__ == '__main__':
    # 连续验证
    cam_conti_veri(cam_num, ctx)

    # 摄像头重置
    cam_hardware_reset(ctx, cam_serials)

    # 连续验证
    cam_conti_veri(cam_num, ctx)

    # 创建YoloTest对象
    YoloTest = tya.YoloTest()

    # 创建线程锁
    threadLock = threading.Lock()

    globals()['flag'] = True

    # 创建新线程
    for serial in cam_serials:
        locals()['CamThread_{}'.format(serial)] = CamThread(serial)
        locals()['ImgProcess_{}'.format(serial)] = ImgProcess(serial)

    # 开启新线程
    for serial in cam_serials:
        locals()['CamThread_{}'.format(serial)].start()
        locals()['ImgProcess_{}'.format(serial)].start()

    # 阻塞主程序
    for serial in cam_serials:
        locals()['CamThread_{}'.format(serial)].join()

    print("退出主线程")

# -*- coding: utf-8 -*-
"""
@File    : tensorflow_yolov3_algorithm.py
@Time    : 2020/3/27 10:30
@Author  : Dontla
@Email   : [email protected]
@Software: PyCharm
"""
import colorsys
import random

import cv2
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3


# Dontla 191106注释:创建能够返回读取class.names文件信息为字典参数的函数
def read_class_names(class_file_name):
    """loads class name from a file"""
    names = {}
    with open(class_file_name, 'r') as data:
        for ID, name in enumerate(data):
            names[ID] = name.strip('\n')
    return names


my_classes = read_class_names(cfg.YOLO.CLASSES)


class YoloTest(object):

    def __init__(self):
        # D·C 191111:__C.TEST.INPUT_SIZE = 544
        self.input_size = cfg.TEST.INPUT_SIZE
        self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
        # Dontla 191106注释:初始化class.names文件的字典信息属性
        self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
        # D·C 191115:类数量属性
        self.num_classes = len(self.classes)
        self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
        # D·C 191111:__C.TEST.SCORE_THRESHOLD = 0.3
        self.score_threshold = cfg.TEST.SCORE_THRESHOLD
        # D·C 191120:__C.TEST.IOU_THRESHOLD = 0.45
        self.iou_threshold = cfg.TEST.IOU_THRESHOLD
        self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
        # D·C 191120:__C.TEST.ANNOT_PATH = "./data/dataset/Dontla/20191023_Artificial_Flower/test.txt"
        self.annotation_path = cfg.TEST.ANNOT_PATH
        # D·C 191120:__C.TEST.WEIGHT_FILE = "./checkpoint/f_g_c_weights_files/yolov3_test_loss=15.8845.ckpt-47"
        self.weight_file = cfg.TEST.WEIGHT_FILE
        # D·C 191115:可写标记(bool类型值)
        self.write_image = cfg.TEST.WRITE_IMAGE
        # D·C 191115:__C.TEST.WRITE_IMAGE_PATH = "./data/detection/"(识别图片画框并标注文本后写入的图片路径)
        self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
        # D·C 191116:TEST.SHOW_LABEL设置为True
        self.show_label = cfg.TEST.SHOW_LABEL

        # D·C 191120:创建命名空间“input”
        with tf.name_scope('input'):
            # D·C 191120:建立变量(创建占位符开辟内存空间)
            self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
            self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')

        model = YOLOV3(self.input_data, self.trainable)
        self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox

        # D·C 191120:创建命名空间“指数滑动平均”
        with tf.name_scope('ema'):
            ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)

        # D·C 191120:在允许软设备放置的会话中启动图形并记录放置决策。(不懂啥意思。。。)allow_soft_placement=True表示允许tf自动选择可用的GPU和CPU
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        # D·C 191120:variables_to_restore()用于加载模型计算滑动平均值时将影子变量直接映射到变量本身
        self.saver = tf.train.Saver(ema_obj.variables_to_restore())
        # D·C 191120:用于下次训练时恢复模型
        self.saver.restore(self.sess, self.weight_file)

        # 摄像头序列号
        # self.cam_serials = ['838212073249', '827312070790']
        # self.cam_serials = ['838212073249', '827312070790', '838212071055', '838212074152', '838212073806',
        #                     '827312071616']
        self.cam_serials = ['838212074152', '838212072365', '827312070790', '838212073806', '826212070395']
        self.cam_num = len(self.cam_serials)
        # self.cam_num = 2

    def predict(self, image):
        # D·C 191107:复制一份图片的镜像,避免对图片直接操作改变图片的内在属性
        org_image = np.copy(image)
        # D·C 191107:获取图片尺寸
        org_h, org_w, _ = org_image.shape

        # D·C 191108:该函数将源图结合input_size,将其转换成预投喂的方形图像(作者默认544×544,中间为缩小尺寸的源图,上下空区域为灰图):
        image_data = utils.image_preprocess(image, [self.input_size, self.input_size])

        # D·C 191108:打印维度看看:
        # print(image_data.shape)
        # (544, 544, 3)

        # D·C 191108:创建新轴,不懂要创建新轴干嘛?
        image_data = image_data[np.newaxis, ...]

        # D·C 191108:打印维度看看:
        # print(image_data.shape)
        # (1, 544, 544, 3)

        # D·C 191110:三个box可能存放了预测框图(可能是N多的框,有用的没用的重叠的都在里面)的信息(但是打印出来的值完全看不懂啊喂?)
        pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(
            [self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],
            feed_dict={
                self.input_data: image_data,
                self.trainable: False
            }
        )

        # D·C 191110:打印三个box的类型、形状和值看看:
        # print(type(pred_sbbox))
        # print(type(pred_mbbox))
        # print(type(pred_lbbox))
        # 都是

        # print(pred_sbbox.shape)
        # print(pred_mbbox.shape)
        # print(pred_lbbox.shape)
        # (1, 68, 68, 3, 6)
        # (1, 34, 34, 3, 6)
        # (1, 17, 17, 3, 6)

        # print(pred_sbbox)
        # print(pred_mbbox)
        # print(pred_lbbox)

        # D·C 191110:(-1,6)表示不知道有多少行,反正你给我整成6列,然后concatenate又把它们仨给叠起来,最终得到无数个6列数组(后面self.num_classes)个数存放的貌似是这个框属于类的概率)
        pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
                                    np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
                                    np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)

        # D·C 191111:打印pred_bbox和它的维度看看:
        # print(pred_bbox)
        # print(pred_bbox.shape)
        # (18207, 6)

        # D·C 191111:猜测是第一道过滤,过滤掉score_threshold以下的图片,过滤完之后少了好多:
        # D·C 191115:bboxes维度为[n,6],前四列是坐标,第五列是得分,第六列是对应类下标
        bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
        # D·C 191111:猜测是第二道过滤,过滤掉iou_threshold以下的图片:
        bboxes = utils.nms(bboxes, self.iou_threshold)

        return bboxes

    def draw_bbox(self, image, bboxes, aligned_depth_frame=None, color_intrin_part=None, show_label=True):
        """
        bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
        """
        # D·C 191117:创建存放class.names文件中类数目的变量
        num_classes = len(my_classes)
        # Dontla 20191014
        # print(num_classes)
        # 80
        # D·A 191117
        # print(num_classes)
        # 1

        # D·A 191117:获取图片分辨率
        image_h, image_w, _ = image.shape
        # Dontla 20191014
        # print('(image_h,image_w):', image_h, '', image_w)
        # (image_h,image_w): 240  424

        # D·C 191118:hsv颜色模式第一个数表示色阶,用于将不同类别的框分配给不同颜色,
        #             用RGB模式不好做颜色分配,用这种方法分配后再转换成rgb模式,方便很多!
        hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
        # D·A 191118 打印hsv_tuples看看
        # print(hsv_tuples)
        # [(0.0, 1.0, 1.0)]

        # D·C 191118:将hsv颜色模式转换成rgb颜色模式
        colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        # D·A 191118 打印colors看看
        # print(colors)
        # [(1.0, 0.0, 0.0)]

        # D·C 191118:将转换后的值分配给0-255
        colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
        # Dontla 20191014
        # print(type(colors))
        # 
        # print(colors)
        # [(255, 0, 0)]

        # D·C 191119:生成固定随机数种子
        random.seed(0)
        # D·C 191119:将colors里的颜色打乱(由于设定了随机数种子,每次打乱后的颜色顺序都是一样的)
        random.shuffle(colors)
        # D·C 191119:取消固定随机数种子
        random.seed(None)

        # Dontla 20191017
        # print('*' * 50)
        # print(bboxes)
        # [array([1.57846606e+00, 3.41371887e+02, 8.33116150e+01, 4.69713715e+02, 3.56435806e-01, 7.30000000e+01]),
        #  array([3.03579620e+02, 3.27595886e+02, 6.24216125e+02, 4.67514557e+02, 4.15683120e-01, 6.30000000e+01]),
        #  array([356.42529297, 143.32351685, 424.81719971, 191.51617432,0.55459815,  62.        ]),
        #  array([468.05984497, 186.09997559, 638.58270264, 295.89181519,0.68913358,  57.        ])]
        # 解释:每个元素前四位数值为框左上角和右下角坐标,第五位为真实概率,第六位为类号。如book为73,laptop为63。
        # 识别出目标:book 中心点像素坐标:(42, 405) 深度:0.640m
        # 识别出目标:laptop 中心点像素坐标:(464, 397) 深度:0.479m
        # 识别出目标:tvmonitor 中心点像素坐标:(390, 167) 深度:4.274m
        # 识别出目标:sofa 中心点像素坐标:(553, 240) 深度:1.608m

        # Dontla 20191017
        # 提取ppx,ppy,fx,fy
        # Dontla 20191104 Dontla reformed at 20200301
        # 如果color_intrin_part不为空(因为参数在传入之前color_frame和aligned_depth_frame都已经判断过了,这里没必要再做判断)
        if color_intrin_part:
            ppx = color_intrin_part[0]
            ppy = color_intrin_part[1]
            fx = color_intrin_part[2]
            fy = color_intrin_part[3]

        # D·C 191119:获取行列索引作为下标
        for i, bbox in enumerate(bboxes):
            # 取前四位数字作为画框坐标
            coor = np.array(bbox[:4], dtype=np.int32)
            # 创建标注字符字体大小的变量?(将fontScale增加后,发现画框左上角的文字变大了)
            fontScale = 0.5

            # 取第五位数字作为得分值
            score = bbox[4]

            # 取第六位数字作为类号
            class_ind = int(bbox[5])
            # print(class_ind)
            # 如:59

            # 以类序号分配颜色
            bbox_color = colors[class_ind]
            # print(bbox_color)
            # 如:(255, 0, 133) 不知道是咋跟序号对应上的?(见前面,框颜色的创建) 【识别出目标:bed 中心点像素坐标:(322, 374) 深度:4.527m】

            # D·C 191119:设置框的边宽?不过为啥不直接设置成:bbox_thick = int((image_h + image_w) / 1000)
            bbox_thick = int(0.6 * (image_h + image_w) / 600)
            # Dontla 20191014
            # print(type(bbox_thick))
            # 
            # print(bbox_thick)
            # 640×480像素下固定为1,因为之前用的是424×248像素所以值为0
            # print(0.6 * (640 + 480) / 600)
            # 1.12
            # 创建存储框左上角和右下角坐标的变量
            c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])

            # Dontla 20191014
            # print(c1, '', c2)
            # 貌似打印出的就是识别框的左上角和右下角坐标(以图像左上角为原点,x轴水平向右,y轴垂直向下)
            # 如 (126, 77)  (229, 206)
            #    (363, 112)  (423, 239)

            # D·C 191119:绘制矩形框
            cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
            # D·C 191119:如果允许显示标签
            if show_label:
                # D·C 191119:创建需显示在框左上角的字符信息(类名+得分)
                bbox_mess = '%s: %.2f' % (my_classes[class_ind], score)

                # Dontla 20191017
                # print(type(bbox_mess))
                # 
                # print(bbox_mess)
                # 如:
                # keyboard: 0.66
                # laptop: 0.48

                # D·A 191119:获取目标的中心点坐标,round后默认带一位小数,可用int去掉它
                target_xy_pixel = [int(round((coor[0] + coor[2]) / 2)), int(round((coor[1] + coor[3]) / 2))]

                # Dontla 20191104 Dontla reformed at 20200301
                # 如果aligned_depth_frame不为空(因为参数在传入之前color_frame和aligned_depth_frame都已经判断过来,这里没必要再做判断)
                if aligned_depth_frame:
                    target_depth = aligned_depth_frame.get_distance(target_xy_pixel[0], target_xy_pixel[1])

                    target_xy_true = [(target_xy_pixel[0] - ppx) * target_depth / fx,
                                      (target_xy_pixel[1] - ppy) * target_depth / fy]

                # Dontla commented out at 20200301(测试掉线不需要打印这个,先注释掉)
                # print('识别出目标:{} 中心点像素坐标:({}, {}) 实际坐标(mm):({:.0f},{:.0f}) 深度(mm):{:.0f}'.format(classes[class_ind],
                #                                                                                 target_xy_pixel[0],
                #                                                                                 target_xy_pixel[1],
                #                                                                                 target_xy_true[
                #                                                                                     0] * 1000,
                #                                                                                 -target_xy_true[
                #                                                                                     1] * 1000,
                #                                                                                 target_depth * 1000))

                # print('识别出目标:{} 中心点像素坐标:({}, {}) 深度:{:.3f}m'.format(classes[class_ind], target_xy_pixel[0],
                #                                                     target_xy_pixel[1],
                #                                                     target_depth))

                # 识别出目标:cup 中心点像素坐标:(317, 148)
                # 识别出目标:keyboard 中心点像素坐标:(398, 162)
                # 识别出目标:bottle 中心点像素坐标:(321, 124)

                # D·C 191119:计算bbox_mess字符所占像素的大小
                t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]
                # D·A 191119:打印t_size看看:
                # print(t_size)
                # (97, 12)
                # D·A 191119:打印cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)看看:
                # print(cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2))
                # ((97, 12), 5)     # 5是基线与中线的距离,我们这里用不到,所以就舍去了

                # D·C 191119:绘制装载文字的矩形实心框
                # D·R 191119:当框在顶端,标签显示不出来,我想把它弄到框内
                # 原始为:cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, thickness=-1)  # filled
                cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] + t_size[1] + 3), bbox_color, thickness=-1)  # filled

                # D·C 191119:绘制文字
                # D·R 191119:当框在顶端,标签显示不出来,我想把它弄到框内(bbox_thick是线宽。-2是底线到中线距离?)
                # 原始为:cv2.putText(image, bbox_mess, (c1[0], c1[1] - 2), cv2.FONT_HERSHEY_SIMPLEX,
                #                         fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
                cv2.putText(image, bbox_mess, (c1[0], c1[1] + t_size[1] + bbox_thick - 2), cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
        return image

你可能感兴趣的:(Intel,RealSense)