OPENCV+YOLOV3+ZED实现对物体的识别与测距

由于工作需求,需要实现对物体的识别与追踪,那么单纯依靠yolo+deepsort是不够的,所以还需要借助zed双目相机获得深度信息。

主要实施步骤:

1.配置合适的cuda

2.下载对应ZED SDK

3.安装合适的库

4.安装ZED-OPENCV-API

5.使用

具体的信息见stereolab-github

详细代码如下:

import numpy as np
import cv2
import os
import time
from collections import  deque
import pyzed.sl as sl
from ctypes import *
import math
import random
import statistics
import sys
import getopt
from random import randint



#参数设置
init_params = sl.InitParameters()
init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use PERFORMANCE depth mode
init_params.coordinate_units = sl.UNIT.METER  # Use meter units (for depth measurements)
init_params.camera_resolution = sl.RESOLUTION.HD720
cam = sl.Camera()
#capture = cv2.VideoCapture(1)
status = cam.open(init_params)
step_camera_settings = 1
runtime_parameters = sl.RuntimeParameters()
runtime_parameters = sl.RuntimeParameters()
runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode
# Setting the depth confidence parameters
runtime_parameters.confidence_threshold = 100
runtime_parameters.textureness_confidence_threshold = 100
mat1 = sl.Mat()
mat2 = sl.Mat()
image = sl.Mat()
depth = sl.Mat()
point_cloud = sl.Mat()

#yolo
weightsPath = "./yolov3.weights"
configPath = "./cfg/yolov3.cfg"
labelsPath = "./data/coco.names"
mybuffer = 50
pts = deque(maxlen=mybuffer)
LABELS = open(labelsPath).read().strip().split("\n")  # 物体类别
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")  # 颜色

net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
fps = 0.0

videowrite = cv2.VideoWriter('./output/MySaveVideo-' +  '.avi', cv2.VideoWriter_fourcc('I', '4', '2', '0'),
                             30,(1280,720))
mirror_ref = sl.Transform()
mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
tr_np = mirror_ref.m
while True:
    t1 = time.time()
    boxes = []
    confidences = []
    classIDs = []
    err = cam.grab(runtime_parameters)

    cam.retrieve_image(mat1, sl.VIEW.LEFT)
    ret=True
    frame = mat1.get_data()
    frame = frame[:, :, :3]
#双目相机获得的图片是4通道,且最后一个通道默认为255,因此需要作切片处理
    #print(frame.shape)
    #cam.retrieve_image(mat2, sl.VIEW.RIGHT)
    #cv2.imshow("ZED-R", mat2.get_data())
    #cv2.imshow("ZED-L", mat1.get_data())

    (H, W) = frame.shape[:2]
    ln = net.getLayerNames()
    ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
    # 从输入图像构造一个blob,然后通过加载的模型,给我们提供边界框和相关概率
    blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
    net.setInput(blob)
    layerOutputs = net.forward(ln)
    cam.retrieve_image(mat1, sl.VIEW.LEFT)
    cam.retrieve_measure(depth, sl.MEASURE.DEPTH)
    cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)

    for output in layerOutputs:
        # 对每个检测进行循环
        for detection in output:
            scores = detection[5:]
            classID = np.argmax(scores)
            confidence = scores[classID]
            # 过滤掉那些置信度较小的检测结果
            if confidence > 0.5:
                # 框后接框的宽度和高度
                box = detection[0:4] * np.array([W, H, W, H])
                (centerX, centerY, width, height) = box.astype("int")
                # 边框的左上角
                xx = int(centerX - (width / 2))
                yy = int(centerY - (height / 2))
                boxes.append([xx, yy, int(width), int(height)])
                confidences.append(float(confidence))
                classIDs.append(classID)
    idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.2, 0.3)
    if len(idxs) > 0:
        for i in idxs.flatten():
            (xx, yy) = (boxes[i][0], boxes[i][1])
            (w, h) = (boxes[i][2], boxes[i][3])
            center = (xx + int(w / 2), yy + int(h / 2))
            pts.appendleft(center)
            err, point_cloud_value = point_cloud.get_value(centerX, centerY)
            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                             point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])
            point_cloud_np = point_cloud.get_data()
            point_cloud_np.dot(tr_np)
            # 在原图上绘制边框和类别
            color = [int(c) for c in COLORS[classIDs[i]]]
            frame=cv2.rectangle(frame, (xx, yy), (xx + w, yy + h), color, 2)
            text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
            cv2.putText(frame, 'distance::'+str(round(distance,2)), (xx, yy -20 ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            cv2.putText(frame, text, (xx, yy - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            frame=cv2.putText(frame, "FPS: %f" % (fps), (int(20), int(40)), 0, 5e-3 * 200, (0, 255, 0), 3)

            #cv2.circle(frame, (xx + (int(w / 2)), yy + int(h / 2)), 1, (0, 0, 225), 4)
    for i in range(1, len(pts)):
        if pts[i - 1] is None or pts[i] is None:
            continue
            # 计算所画小线段的粗细
        thickness = 2
        # 画出小线段
        #cv2.arrowedLine(frame, pts[i], pts[i - 1], (0, 0, 255), thickness,tipLength = 0.5)

    videowrite.write(frame)
    cv2.imshow("Image", frame)
    fps  = ( fps + (1./(time.time()-t1)) ) / 2

    #else:
    #    print("Can't estimate distance at this position.")
    #    print("Your camera is probably too close to the scene, please move it backwards.\n")
    key = cv2.waitKey(1)
    if key == ord("q"):
        break


    # cv2.waitKey(5)

 

你可能感兴趣的:(深度学习,opencv)