今天,test-191204-单个摄像头调用multiprocessing线程队列queue识别时,报错:
D:\20191031_tensorflow_yolov3\python\python.exe D:/20191031_tensorflow_yolov3/tensorflow-yolov3/test-191204-单个摄像头调用multiprocessing线程队列queue识别.py
2019-12-05 14:40:38.472262: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2019-12-05 14:40:39.549686: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1411] Found device 0 with properties:
name: GeForce GTX 1080 Ti major: 6 minor: 1 memoryClockRate(GHz): 1.6575
pciBusID: 0000:0e:00.0
totalMemory: 11.00GiB freeMemory: 9.10GiB
2019-12-05 14:40:39.731129: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1411] Found device 1 with properties:
name: GeForce GT 710 major: 3 minor: 5 memoryClockRate(GHz): 0.954
pciBusID: 0000:05:00.0
totalMemory: 2.00GiB freeMemory: 1.67GiB
2019-12-05 14:40:39.731780: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1461] Ignoring visible gpu device (device: 1, name: GeForce GT 710, pci bus id: 0000:05:00.0, compute capability: 3.5) with Cuda compute capability 3.5. The minimum required Cuda capability is 3.7.
2019-12-05 14:40:39.732402: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1490] Adding visible gpu devices: 0
2019-12-05 14:40:41.527990: I tensorflow/core/common_runtime/gpu/gpu_device.cc:971] Device interconnect StreamExecutor with strength 1 edge matrix:
2019-12-05 14:40:41.528315: I tensorflow/core/common_runtime/gpu/gpu_device.cc:977] 0 1
2019-12-05 14:40:41.528511: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0: N N
2019-12-05 14:40:41.528706: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 1: N N
2019-12-05 14:40:41.529236: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1103] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 8789 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1080 Ti, pci bus id: 0000:0e:00.0, compute capability: 6.1)
Traceback (most recent call last):
File "D:/20191031_tensorflow_yolov3/tensorflow-yolov3/test-191204-单个摄像头调用multiprocessing线程队列queue识别.py", line 223, in <module>
YoloTest().dontla_evaluate_detect()
File "D:/20191031_tensorflow_yolov3/tensorflow-yolov3/test-191204-单个摄像头调用multiprocessing线程队列queue识别.py", line 201, in dontla_evaluate_detect
process.start()
File "D:\20191031_tensorflow_yolov3\python\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "D:\20191031_tensorflow_yolov3\python\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "D:\20191031_tensorflow_yolov3\python\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "D:\20191031_tensorflow_yolov3\python\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "D:\20191031_tensorflow_yolov3\python\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object 'YoloTest.dontla_evaluate_detect.<locals>.predict_result'
Process finished with exit code 1
# -*- coding: utf-8 -*-
"""
@File : test-191204-单个摄像头调用multiprocessing线程队列queue识别.py
@Time : 2019/12/5 13:50
@Author : Dontla
@Email : [email protected]
@Software: PyCharm
"""
import multiprocessing
import cv2
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3
import pyrealsense2 as rs
import functools
class YoloTest(object):
def __init__(self):
# D·C 191111:__C.TEST.INPUT_SIZE = 544
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
# Dontla 191106注释:初始化class.names文件的字典信息属性
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
# D·C 191115:类数量属性
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
# D·C 191111:__C.TEST.SCORE_THRESHOLD = 0.3
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
# D·C 191120:__C.TEST.IOU_THRESHOLD = 0.45
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
# D·C 191120:__C.TEST.ANNOT_PATH = "./data/dataset/Dontla/20191023_Artificial_Flower/test.txt"
self.annotation_path = cfg.TEST.ANNOT_PATH
# D·C 191120:__C.TEST.WEIGHT_FILE = "./checkpoint/f_g_c_weights_files/yolov3_test_loss=15.8845.ckpt-47"
self.weight_file = cfg.TEST.WEIGHT_FILE
# D·C 191115:可写标记(bool类型值)
self.write_image = cfg.TEST.WRITE_IMAGE
# D·C 191115:__C.TEST.WRITE_IMAGE_PATH = "./data/detection/"(识别图片画框并标注文本后写入的图片路径)
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
# D·C 191116:TEST.SHOW_LABEL设置为True
self.show_label = cfg.TEST.SHOW_LABEL
# D·C 191120:创建命名空间“input”
with tf.name_scope('input'):
# D·C 191120:建立变量(创建占位符开辟内存空间)
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(self.input_data, self.trainable)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
# D·C 191120:创建命名空间“指数滑动平均”
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
# D·C 191120:在允许软设备放置的会话中启动图形并记录放置决策。(不懂啥意思。。。)allow_soft_placement=True表示允许tf自动选择可用的GPU和CPU
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# D·C 191120:variables_to_restore()用于加载模型计算滑动平均值时将影子变量直接映射到变量本身
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
# D·C 191120:用于下次训练时恢复模型
self.saver.restore(self.sess, self.weight_file)
def predict(self, image):
# D·C 191107:复制一份图片的镜像,避免对图片直接操作改变图片的内在属性
org_image = np.copy(image)
# D·C 191107:获取图片尺寸
org_h, org_w, _ = org_image.shape
# D·C 191108:该函数将源图结合input_size,将其转换成预投喂的方形图像(作者默认544×544,中间为缩小尺寸的源图,上下空区域为灰图):
image_data = utils.image_preprocess(image, [self.input_size, self.input_size])
# D·C 191108:打印维度看看:
# print(image_data.shape)
# (544, 544, 3)
# D·C 191108:创建新轴,不懂要创建新轴干嘛?
image_data = image_data[np.newaxis, ...]
# D·C 191108:打印维度看看:
# print(image_data.shape)
# (1, 544, 544, 3)
# D·C 191110:三个box可能存放了预测框图(可能是N多的框,有用的没用的重叠的都在里面)的信息(但是打印出来的值完全看不懂啊喂?)
pred_sbbox, pred_mbbox, pred_lbbox = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox],
feed_dict={
self.input_data: image_data,
self.trainable: False
}
)
# D·C 191110:打印三个box的类型、形状和值看看:
# print(type(pred_sbbox))
# print(type(pred_mbbox))
# print(type(pred_lbbox))
# 都是
# print(pred_sbbox.shape)
# print(pred_mbbox.shape)
# print(pred_lbbox.shape)
# (1, 68, 68, 3, 6)
# (1, 34, 34, 3, 6)
# (1, 17, 17, 3, 6)
# print(pred_sbbox)
# print(pred_mbbox)
# print(pred_lbbox)
# D·C 191110:(-1,6)表示不知道有多少行,反正你给我整成6列,然后concatenate又把它们仨给叠起来,最终得到无数个6列数组(后面self.num_classes)个数存放的貌似是这个框属于类的概率)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
# D·C 191111:打印pred_bbox和它的维度看看:
# print(pred_bbox)
# print(pred_bbox.shape)
# (18207, 6)
# D·C 191111:猜测是第一道过滤,过滤掉score_threshold以下的图片,过滤完之后少了好多:
# D·C 191115:bboxes维度为[n,6],前四列是坐标,第五列是得分,第六列是对应类下标
bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
# D·C 191111:猜测是第二道过滤,过滤掉iou_threshold以下的图片:
bboxes = utils.nms(bboxes, self.iou_threshold)
return bboxes
def dontla_evaluate_detect(self):
pipeline1 = rs.pipeline()
config1 = rs.config()
ctx = rs.context()
# 通过程序去获取已连接摄像头序列号
serial1 = ctx.devices[0].get_info(rs.camera_info.serial_number)
config1.enable_device(serial1)
config1.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config1.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline1.start(config1)
# 创建对齐对象(深度对齐颜色)
align1 = rs.align(rs.stream.color)
try:
while True:
frames1 = pipeline1.wait_for_frames()
# 获取对齐帧集
aligned_frames1 = align1.process(frames1)
# 获取对齐后的深度帧和彩色帧
aligned_depth_frame1 = aligned_frames1.get_depth_frame()
color_frame1 = aligned_frames1.get_color_frame()
# 获取颜色帧内参
color_profile1 = color_frame1.get_profile()
cvsprofile1 = rs.video_stream_profile(color_profile1)
color_intrin1 = cvsprofile1.get_intrinsics()
color_intrin_part1 = [color_intrin1.ppx, color_intrin1.ppy, color_intrin1.fx, color_intrin1.fy]
# if not aligned_depth_frame1 or not color_frame1:
# continue
# if not aligned_depth_frame2 or not color_frame2:
# continue
color_image1 = np.asanyarray(color_frame1.get_data())
# D·C 191121:显示帧看看
# cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
# cv2.imshow('RealSense', color_frame)
# cv2.waitKey(1)
def predict_result(color_image, queue):
queue.put(self.predict(color_image))
queue = multiprocessing.Queue()
jobs = []
for i in range(1):
process = multiprocessing.Process(target=predict_result, args=(color_image1, queue))
jobs.append(process)
process.start()
# 等待进程运行完
for process in jobs:
process.join()
results = [queue.get() for j in jobs]
bboxes_pr1 = results[0]
# bboxes_pr1 = self.predict(color_image1)
# bboxes_pr2 = self.predict(color_image2)
image1 = utils.draw_bbox(color_image1, bboxes_pr1, aligned_depth_frame1, color_intrin_part1,
show_label=self.show_label)
# cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('window1', image1)
cv2.waitKey(1)
finally:
pipeline1.stop()
if __name__ == '__main__':
YoloTest().dontla_evaluate_detect()
参考解决办法:AttributeError: Can’t pickle local object 解决办法
反正我是没解决。。。