REID.py
# TODO KCF 1: import\
import sys
sys.path.append('/home/hp/zjc/nk_PyCharm/PyCharm_project/nk_DeepSortYolo/deep_sort_yolov3-master/KCFcpp_py_wrapper')
import cv2
import os
os.environ["CUDA_VISIBLE_DEVICE"] = "0" # os.environ['环境变量名称']='环境变量值' #其中key和value均为string类型
sys.path.append('/home/hp/zjc/Tensorflow/Deeplearning/SSD-Tensorflow-master/')
from tools import generate_detections as gdet
feature_flag = True
model_filename = 'model_data/mars-small128.pb'
if __name__ == '__main__':
if feature_flag:
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
cap = cv2.VideoCapture('/home/hp/zjc/nk_PyCharm/PyCharm_project/nk_fishbox/nk_background_sub/fish_video.avi')
while True:
ret, frame = cap.read()
# features = encoder(frame, boxs)
cv2.imshow('', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
tool/generate_detections.py
# -*- coding=utf-8 -*-
# Author: zjc
# Creation Date:19-8-27
# vim: expandtab:ts=4:sw=4
import os
import errno
import argparse
import numpy as np
import cv2
import tensorflow as tf
from PIL import Image
# from PIL import Image 配合 # TODO 放缩
def _run_in_batches(f, data_dict, out, batch_size): # TODO 4
data_len = len(out)
# print(data_len)为人的个数
num_batches = int(data_len / batch_size)
s, e = 0, 0
for i in range(num_batches):
s, e = i * batch_size, (i + 1) * batch_size
batch_data_dict = {k: v[s:e] for k, v in data_dict.items()}
out[s:e] = f(batch_data_dict)
if e < len(out):
batch_data_dict = {k: v[e:] for k, v in data_dict.items()}
out[e:] = f(batch_data_dict)
# 此处的提取图片切片是不包含放缩小的直接切成(64,128)宽高的人性切片,
# 因为我们引用了新的放缩方法(在上面) def scaling_extract_image_patch: 来让动物的全身特征都加入特征内放缩为(64,128)
# TODO 5 三:切割法 x方向
def extract_image_patch(image, bbox, patch_shape):
# patch_shape = image_shape[:2]=[128, 64]
"""Extract image patch from bounding box.
Parameters
----------
image : ndarray
The full image.
bbox : array_like
The bounding box in format (x, y, width, height).这里的xy指的是左上角点的坐标
patch_shape : Optional[array_like]
This parameter can be used to enforce a desired patch shape
(height, width). First, the `bbox` is adapted to the aspect ratio
of the patch shape, then it is clipped at the image boundaries.
If None, the shape is computed from :arg:`bbox`.
Returns
-------
ndarray | NoneType
An image patch showing the :arg:`bbox`, optionally reshaped to
:arg:`patch_shape`.
Returns None if the bounding box is empty or fully outside of the image
boundaries.
如果bbox空的或者完全离开图像边界,就返回none
"""
# print('bbox:', bbox) # 列表
bbox = np.array(bbox)
# bbox1 = bbox
# bbox1[2:] += bbox1[:2]
# bbox1 = bbox1.astype(np.int)
# sx1, sy1, ex1, ey1 = bbox1
# print('bbox:', bbox) # 数组
# 只改变宽度来适应纵横比
if patch_shape is not None: # patch_shape为image_shape[:2]=[128, 64] 高宽
# correct aspect ratio to patch shape
target_aspect = float(patch_shape[1]) / patch_shape[0] # 宽/高 height bbox[3]高在这里没有改变,只改变宽度
new_width = target_aspect * bbox[3] # bbox[3] height
# 新的宽度 = 宽/高 ×bbox[高]
bbox[0] -= (new_width - bbox[2]) / 2 # 中心点x,这里的新的宽度一般是小雨原来宽度,所以--为+最后是加
# bbox(x, y, width, height)
# bbox[2]是原来的宽, x - (新宽 - 原来宽)/2
bbox[2] = new_width
# bbox 新的xywh
# convert (x,y,width,height) to ( left,top,right, bottom ) ltrb
bbox[2:] += bbox[:2]
bbox = bbox.astype(np.int)
# clip at image boundaries
bbox[:2] = np.maximum(0, bbox[:2]) # top,left 取非负
bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:]) # right,bottom不要过原图像右下界限
if np.any(bbox[:2] >= bbox[2:]): # 图像top left 不能大于 right bottom
return None
sx, sy, ex, ey = bbox # x左 y上 x右 y下
image = image[sy:ey, sx:ex] # y方向,x方向
# image1 = image[sy1:ey1, sx1:ex1]
cv2.imwrite('/home/hp/zjc/nk_PyCharm/PyCharm_project/nk_DeepSortYolo/train/' + '-cut-x1' + ".jpg", image)
# cv2.imwrite('/home/hp/zjc/nk_PyCharm/PyCharm_project/nk_DeepSortYolo/train/' + '-2' + ".jpg", image1)
image = cv2.resize(image, tuple(patch_shape[::-1])) #
cv2.imwrite('/home/hp/zjc/nk_PyCharm/PyCharm_project/nk_DeepSortYolo/train/' + '-cut-x2-zoom' + ".jpg", image)
# resize需要(宽,高)格式 image_shape高宽为[128, 64, 3]
# image_shape[:2]= patch_shape 是[高,宽]
return image
# TODO
class ImageEncoder(object): # TODO ImageEncoder
def __init__(self, checkpoint_filename, input_name="images",
output_name="features"):
self.session = tf.Session()
with tf.gfile.GFile(checkpoint_filename, "rb") as file_handle: # TODO 2 学习tf这部分
graph_def = tf.GraphDef()
# tf.gfile.GFile(filename, mode)
# 获取文本操作句柄,类似于python提供的文本操作open()函数,filename是要打开的文件名,
# mode是以何种方式去读写,将会返回一个文本操作句柄。
graph_def.ParseFromString(file_handle.read())
tf.import_graph_def(graph_def, name="net")
# 将图从graph_def导入到当前默认图中.
# graph_def: 包含要导入到默认图中的操作的GraphDef proto.
self.input_var = tf.get_default_graph().get_tensor_by_name(
"net/%s:0" % input_name)
print('self.input_var:', self.input_var)
# input_var: Tensor("net/images:0", shape=(?, 128, 64, 3), dtype=uint8)
self.output_var = tf.get_default_graph().get_tensor_by_name(
"net/%s:0" % output_name)
print('self.output_name:', self.output_var)
# output_name: Tensor("net/features:0", shape=(?, 128), dtype=float32)
# print('self.input_var', self.input_var) Tensor("net/images:0", shape=(?, 128, 64, 3), dtype=uint8)
# print('self.output_var', self.output_var) Tensor("net/features:0", shape=(?, 128), dtype=float32)
assert len(self.output_var.get_shape()) == 2
assert len(self.input_var.get_shape()) == 4
self.feature_dim = self.output_var.get_shape().as_list()[-1]
self.image_shape = self.input_var.get_shape().as_list()[1:]
# print('self.feature_dim', self.feature_dim) 128
# print('第一个shape:', self.image_shape) 高宽为[128, 64, 3]
def __call__(self, data_x, batch_size=32):
out = np.zeros((len(data_x), self.feature_dim), np.float32)
# print(data_x) # 为(1, 128, 64, 3)的四维数组多层嵌套,两个人就1变2
# print(data_x.shape)
# print(len(data_x)) # 为1
# print('zeros:', out.shape) # 为(1,128)一个人,128特征 [[0.17.., ... ,0.066..]]数组
_run_in_batches(
lambda x: self.session.run(self.output_var, feed_dict=x),
{self.input_var: data_x}, out, batch_size)
# def _run_in_batches(f, data_dict, out, batch_size):
# f
# data_dict {self.input_var: data_x}
# out.shape为(1,128)一个人,128特征 数组形式
# batch_size为1
# TODO 3 print('ImageEncoder结果输出out为:', '\n', out, '\n', out.shape)
return out
# todo->>>
# TODO 特征2: 原始的x方向单一特征
def create_box_encoder(model_filename, input_name="images",
output_name="features", batch_size=32): # TODO 1 encoder(model_filename, batch_size=1)
image_encoder = ImageEncoder(model_filename, input_name, output_name) # TODO ImageEncoder
# image_encoder= out [[0.17.., ... ,0.066..][..., ..., ...]] (2,128) 数组含义,两个人,128特征
# print('image_encoder:', image_encoder)
# image_encoder:
image_shape = image_encoder.image_shape
# print('image_shape:', image_shape)
# image_shape: [128, 64, 3]
# print('image_shape:', image_shape) # self.image_shape高宽为[128, 64, 3]
def encoder(image, boxes):
print('-->单特征create_box_encoder')
image_patches = []
# image_patches_y = [] # todo y
# image_patches_y_270 = [] # todo y270
for box in boxes: # 此处循环, 针对每个目标进行处理
patch = extract_image_patch(image, box, image_shape[:2]) # TODO 5 x 缩放法
# patch_y = scaling_y_extract_image_patch(image, box, image_shape[:2]) # todo y
# patch_y_270 = scaling_y_270_extract_image_patch(image, box, image_shape[:2]) # todo y270
# TODO 5 extract_image_patch & scaling_x_extract_image_patch
# print('格式:', type(patch)) #
# image_shape[:2]=[128, 64]
# type(patch)是numpy.ndarray数组
# TODO patch是正常的纵向reid,patch2,patch3是横向
if patch is None:
print("WARNING: Failed to extract image patch: %s." % str(box))
patch = np.random.uniform(
0., 255., image_shape).astype(np.uint8) # 在0到255范围内随机取,矩阵是[128,64]的随机数
'''
if patch_y is None: # todo y
print("WARNING: Failed to extract image patch: %s." % str(box))
patch_y = np.random.uniform(
0., 255., image_shape).astype(np.uint8) # 在0到255范围内随机取,矩阵是[128,64]的随机数
if patch_y_270 is None: # todo y
print("WARNING: Failed to extract image patch: %s." % str(box))
patch_y_270 = np.random.uniform(
0., 255., image_shape).astype(np.uint8) # 在0到255范围内随机取,矩阵是[128,64]的随机数
'''
image_patches.append(patch) # 出现了几个人,就有几个box,就有几个patch
# image_patches_y.append(patch_y) # 出现了几个人,就有几个box,就有几个patch # todo y
# image_patches_y.append(patch_y_270) # 出现了几个人,就有几个box,就有几个patch # todo y270
# type(image_patches) 是列表
image_patches = np.asarray(image_patches) # image_patches -->
# image_patches_y = np.asarray(image_patches_y) # todo y
# image_patches_y_270 = np.asarray(image_patches_y_270) # todo y270
# type(image_patches)是numpy.ndarray数组
# print('batch_size', batch_size) # 永远都是1
# print('image_patches', image_patches)
return image_encoder(image_patches, batch_size)
# image_encoder(image_patches_y, batch_size), image_encoder(image_patches_y_270, batch_size)
return encoder