此项目直接调用zed相机实现三维测距,无需标定,相关内容如下:
1. YOLOV5 + 双目测距
2. yolov4直接调用zed相机实现三维测距
3.具体实现效果已在哔哩哔哩发布,点击此链接跳转
相关配置
python==3.7
Windows系统
pycharm平台
zed api(zed api 配置步骤)
zed代码比较简洁,但是一定要把环境配置好,后续自己会把他当做普通双目标定,去对比分析效果,并完善相关功能
主代码
def main():
zed = sl.Camera()
input_type = sl.InputType()
if opt.svo is not None:
input_type.set_from_svo_file(opt.svo)
# Create a InitParameters object and set configuration parameters
init_params = sl.InitParameters(input_t=input_type, svo_real_time_mode=True)
init_params.camera_resolution = sl.RESOLUTION.HD720
init_params.coordinate_units = sl.UNIT.METER
init_params.depth_mode = sl.DEPTH_MODE.ULTRA # QUALITY
init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
init_params.depth_maximum_distance = 5
runtime_params = sl.RuntimeParameters()
status = zed.open(init_params)
if status != sl.ERROR_CODE.SUCCESS:
print(repr(status))
exit()
image_left_tmp = sl.Mat()
print("Initialized Camera")
positional_tracking_parameters = sl.PositionalTrackingParameters()
zed.enable_positional_tracking(positional_tracking_parameters)
obj_param = sl.ObjectDetectionParameters()
obj_param.detection_model = sl.DETECTION_MODEL.CUSTOM_BOX_OBJECTS
obj_param.enable_tracking = True
zed.enable_object_detection(obj_param)
objects = sl.Objects()
obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
point_cloud_render = sl.Mat()
point_cloud = sl.Mat()
image_left = sl.Mat()
depth = sl.Mat()
# Utilities for 2D display
while True and not exit_signal:
if zed.grab(runtime_params) == sl.ERROR_CODE.SUCCESS:
# -- Get the image
lock.acquire()
zed.retrieve_image(image_left_tmp, sl.VIEW.LEFT)
image_net = image_left_tmp.get_data()
zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
lock.release()
run_signal = True
# -- Detection running on the other thread
while run_signal:
sleep(0.001)
# Wait for detections
lock.acquire()
# -- Ingest detections
lock.release()
zed.retrieve_objects(objects, obj_runtime_param)
zed.retrieve_image(image_left, sl.VIEW.LEFT)
else:
exit_signal = True
exit_signal = True
zed.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--svo', type=str, default=None, help='optional svo file')
parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')
parser.add_argument('--conf_thres', type=float, default=0.6, help='object confidence threshold')
opt = parser.parse_args()
with torch.no_grad():
main()
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
下方代码仅供参考,试图加跟踪模块,出现了问题!
下方代码仅供参考,试图加跟踪模块,出现了问题!
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import sys
import numpy as np
import pyzed.sl as sl
import cv2
help_string = "[s] Save side by side image [d] Save Depth, [n] Change Depth format, [p] Save Point Cloud, [m] Change Point Cloud format, [q] Quit"
prefix_point_cloud = "Cloud_"
prefix_depth = "Depth_"
path = "./"
count_save = 0
mode_point_cloud = 0
mode_depth = 0
point_cloud_format_ext = ".ply"
depth_format_ext = ".png"
def point_cloud_format_name():
global mode_point_cloud
if mode_point_cloud > 3:
mode_point_cloud = 0
switcher = {
0: ".xyz",
1: ".pcd",
2: ".ply",
3: ".vtk",
}
return switcher.get(mode_point_cloud, "nothing")
def depth_format_name():
global mode_depth
if mode_depth > 2:
mode_depth = 0
switcher = {
0: ".png",
1: ".pfm",
2: ".pgm",
}
return switcher.get(mode_depth, "nothing")
def save_point_cloud(zed, filename):
print("Saving Point Cloud...")
tmp = sl.Mat()
zed.retrieve_measure(tmp, sl.MEASURE.XYZRGBA)
saved = (tmp.write(filename + point_cloud_format_ext) == sl.ERROR_CODE.SUCCESS)
if saved:
print("Done")
else:
print("Failed... Please check that you have permissions to write on disk")
def save_depth(zed, filename):
print("Saving Depth Map...")
tmp = sl.Mat()
zed.retrieve_measure(tmp, sl.MEASURE.DEPTH)
saved = (tmp.write(filename + depth_format_ext) == sl.ERROR_CODE.SUCCESS)
if saved:
print("Done")
else:
print("Failed... Please check that you have permissions to write on disk")
def save_sbs_image(zed, filename):
image_sl_left = sl.Mat()
zed.retrieve_image(image_sl_left, sl.VIEW.LEFT)
image_cv_left = image_sl_left.get_data()
image_sl_right = sl.Mat()
zed.retrieve_image(image_sl_right, sl.VIEW.RIGHT)
image_cv_right = image_sl_right.get_data()
sbs_image = np.concatenate((image_cv_left, image_cv_right), axis=1)
cv2.imwrite(filename, sbs_image)
def process_key_event(zed, key):
global mode_depth
global mode_point_cloud
global count_save
global depth_format_ext
global point_cloud_format_ext
if key == 100 or key == 68: # ‘d’ & 'D'
save_depth(zed, path + prefix_depth + str(count_save))
count_save += 1
elif key == 110 or key == 78: # 'n' & 'N'
mode_depth += 1
depth_format_ext = depth_format_name()
print("Depth format: ", depth_format_ext)
elif key == 112 or key == 80: # 'p' & 'P'
save_point_cloud(zed, path + prefix_point_cloud + str(count_save))
count_save += 1
elif key == 109 or key == 77: # 'm' & 'M'
mode_point_cloud += 1
point_cloud_format_ext = point_cloud_format_name()
print("Point Cloud format: ", point_cloud_format_ext)
elif key == 104 or key == 72: # 'h' & 'H'
print(help_string)
elif key == 115: # 's'
save_sbs_image(zed, "ZED_image" + str(count_save) + ".png")
count_save += 1
else:
a = 0
def print_help():
print(" Press 's' to save Side by side images")
print(" Press 'p' to save Point Cloud")
print(" Press 'd' to save Depth image")
print(" Press 'm' to switch Point Cloud format")
print(" Press 'n' to switch Depth format")
def main():
# Create a ZED camera object
zed = sl.Camera()
# Set configuration parameters
# 定义视频数据源和相机参数
input_type = sl.InputType()
if len(sys.argv) >= 2:
input_type.set_from_svo_file(sys.argv[1])
init = sl.InitParameters(input_t=input_type)
init.camera_resolution = sl.RESOLUTION.HD720
init.camera_fps = 30
init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
init.coordinate_units = sl.UNIT.MILLIMETER
# Open the camera
err = zed.open(init)
if err != sl.ERROR_CODE.SUCCESS:
print(repr(err))
zed.close()
exit(1)
# Display help in console
print_help()
# Set runtime parameters after opening the camera
runtime = sl.RuntimeParameters()
# 定义深度贴图方法,此处为标准模式,FILL为全填充(小果不是很好)
runtime.sensing_mode = sl.SENSING_MODE.STANDARD
# Prepare new image size to retrieve half-resolution images
# 此处用于获取和设置图像的分辨率
image_size = zed.get_camera_information().camera_resolution
# image_size.width = image_size.width / 2
# image_size.height = image_size.height / 2
# Declare your sl.Mat matrices
image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
point_cloud = sl.Mat()
key = ' '
while key != 113:
err = zed.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
# Retrieve the left image, depth image in the half-resolution
zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size)
# Retrieve the RGBA point cloud in half resolution
zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, sl.MEM.CPU, image_size)
# To recover data from sl.Mat to use it with opencv, use the get_data() method
# It returns a numpy array that can be used as a matrix with opencv
image_ocv = image_zed.get_data()
depth_image_ocv = depth_image_zed.get_data()
cv2.imshow("Image", image_ocv)
cv2.imshow("Depth", depth_image_ocv)
key = cv2.waitKey(10)
process_key_event(zed, key)
cv2.destroyAllWindows()
zed.close()
print("\nFINISH")
if __name__ == "__main__":
main()