源码改进
import argparse
import time
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
from pathlib import Path
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import matplotlib
import shutil
matplotlib.use('TkAgg')
import datetime
import json
import time
import uuid
from kafka import KafkaProducer
from kafka.errors import KafkaError
app = FastAPI()
import cv2 as zj
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import numpy as np
from models.experimental import attempt_load
from utils.zhoujie import zhoujie_data, zhoujie_source, zhoujie_conf
from utils.original import LoadStreams, LoadImages, check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords, \
xyxy2xywh, strip_optimizer, set_logging, increment_path, plot_one_box, select_device1,select_device2, load_classifier, time_synchronized
producer = KafkaProducer(bootstrap_servers='h01:9092')
topic = 'test01'
# 处理跨域问题
origins = [
"*",
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:8888",
"http://localhost:8888/distinguish/deposit",
"http://172.16.11.167:8888/distinguish/deposit",
"http://外网ip:端口号/distinguish/deposit",
"http://外网ip:端口号",
"http://172.16.15.18:8888/distinguish/deposit",
"http://172.16.15.18:8888",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
dict_json = {}
class Message(BaseModel):
uuid: str
img_base: str
@app.post("/distinguish/deposit")
async def json_send(obj: Message):
uuid = obj.uuid
img_base = obj.img_base
defaultpath, source = zhoujie_source(uuid, img_base, opt.source)
opt.source = defaultpath
try:
dict = detect(model)
dict_json = dict
msg_string = dict_json['msg_string']
n = 0
producer.send(topic, json.dumps(msg_string).encode())
print("send:" + json.dumps(msg_string))
time.sleep(0.5)
except:
opt.source = source
msg_string = None
opt.source = source
return {'status': '10000', 'data': msg_string, 'return_path': uuid}
def detect(save_img=True):
flag = 0
alert_list = []
global model
output, saveput, source, weights, view_img, save_txt, imgsz = opt.output, opt.saveput, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://'))
# Directories
save_dir = Path(opt.saveput) # increment run
if not os.path.exists(save_dir):
os.mkdir(save_dir)
else:
shutil.rmtree(save_dir)
os.mkdir(save_dir)
out_dir = Path(opt.output)
# Initialize
set_logging()
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
out_path = str(out_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# det xyxy,conf,class
res = det.cpu().numpy()
labels = res[:, -1].astype(int)
labels_list = []
for item in labels:
label = names[item]
labels_list.append(label)
labels_arr = np.array(labels_list)
# alert_list
alert_list.append((res[:, 0:4], res[:, -2], res[:, -1], labels_arr))
for item in alert_list:
labels_name = [str(i) for i in item[3]]
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# remove low conf target
zhoujie_conf(det, names)
# Write results
for *xyxy, conf, cls in reversed(det):
if save_img or view_img: # Add bbox to image
label = f'{names[int(cls)]} {conf:.2f}'
label_name = label.split(' ')[0]
if label_name == 'trash':
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
data = det
data = data.cpu().numpy()
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
string = zhoujie_data(data, labels_name)
dict = {'msg_string': string}
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
zj.imshow(str(p), im0)
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
zj.imwrite(save_path, im0)
if len(alert_list):
zj.imwrite(out_path, im0)
else: # 'video'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, zj.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(zj.CAP_PROP_FPS)
w = int(vid_cap.get(zj.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(zj.CAP_PROP_FRAME_HEIGHT))
vid_writer = zj.VideoWriter(save_path, zj.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
return dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='weights/gaokong.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='ZhouJie/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--saveput', type=str, default='ZhouJie/saveput', help='saveput folder') # saveput folder
parser.add_argument('--output', type=str, default='ZhouJie/output', help='output folder') # output folder
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
check_requirements()
with torch.no_grad():
device = select_device1(opt.device)
model = attempt_load(opt.weights, map_location=device)
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half() # to FP16
uvicorn.run(app=app, host="172.16.15.18", port=8888)
使用shell编程启动
#!/bin/bash
PROG_NAME="output1.py"
function check() {
PID=$(ps aux | grep $1 | grep -v grep | awk '{print $2}')
if [[ "${PID[@]}" != "" ]];then
echo "Process already exist"
exit 1
fi
}
function check_start() {
PID=$(ps aux | grep $1 | grep -v grep | awk '{print $2}')
if [[ "${PID[@]}" != "" ]];then
echo "Service start successfully: $1"
fi
}
function stop_service() {
PID=$(ps aux | grep $1 | grep -v grep | awk '{print $2}')
if [[ "${PID[@]}" != "" ]];then
ps -ef | grep $1 | grep -v grep | cut -c 9-15 | xargs kill -9
test $? -eq 0 && echo "Process has been killed: $1"
else
echo "Process is not running: $1"
fi
}
function start_service() {
check ${PROG_NAME}
nohup python ${PROG_NAME} >/dev/null 2>&1 &
check_start ${PROG_NAME}
}
case $1 in
start)
start_service
;;
stop)
stop_service ${PROG_NAME}
;;
restart)
stop_service ${PROG_NAME}
sleep 1
start_service
;;
*)
echo "Usage: bash $0 start | stop | restart"
;;
esac
启动方法
conda activate zhoujie
./tbyoung.sh start