详细完整项目链接:https://download.csdn.net/download/weixin_46570668/86954697?spm=1001.2014.3001.5503
使用 Pytorch 自带的预训练模型 fasterrcnn_resnet50_fpn,该模型是在COCO 数据集上进行预训练。
COCO 数据集是一个大型的、丰富的用于物体检测、分割的数据集,提供的图片类别有80 类,有超过 33 万张图片,其中 20 万张图片有标注,整个数据集中个体的数目超过 150万个。COCO2017 数据共 80 个类别,类别的 id 号不是连续的,最大为 90。
记载预训练模型代码如下:
import torch
import torchvision
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
pretrained=True, progress=True, num_classes=91, pretrained_backbone=True)
使用OpenCV调用摄像头,代码如下:
import cv2
capture=cv2.VideoCapture(0)
获得标签及对应类别(这个文件好像可以去COCO还是Pytorch的什么网站上下载,我有点记不清了,不过我也把它和代码一起打包上传资源)
classes_path = 'classes.txt'
classes_path = os.path.expanduser(classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
num_classes = len(class_names)
进行目标检测,并画图函数
def detect_image(image):
start_time = time.time()
image_shape = np.array(np.shape(image)[0:2])
old_width = image_shape[1]
old_height = image_shape[0]
old_image = copy.deepcopy(image)
width, height = get_new_img_size(old_width, old_height)
image = image.resize([width, height])
photo = np.array(image, dtype=np.float32) / 255
photo = np.transpose(photo, (2, 0, 1))
with torch.no_grad():
model.eval()
# Using RGB conversion to convert image pixel-wise into a numpy array
np_sample_image = np.array(image.convert("RGB"))
# Converting the numpy array to a tensor
transformed_img = torchvision.transforms.transforms.ToTensor()(
torchvision.transforms.ToPILImage()(np_sample_image))
result = model([transformed_img])
bbox = []
label = []
conf = []
for i in range(len(result[0]['scores'])):
if result[0]['scores'][i] > confidence:
bbox.append(result[0]['boxes'][i].tolist())
label.append(result[0]['labels'][i].tolist())
conf.append(result[0]['scores'][i].tolist())
bbox = np.array(bbox)
label = np.array(label)
conf = np.array(conf)
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
bbox = np.array(bbox, np.int32)
image = old_image
thickness = (np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2
font = ImageFont.truetype(font='simhei.ttf',
size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
for i, c in enumerate(label):
predicted_class = class_names[int(c)]
score = conf[i]
left, top, right, bottom = bbox[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[int(c)])
draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)
del draw
print("time:", time.time() - start_time)
return image
依次读入帧,转变格式,并送入模型进行检测
t1 = time.time()
# 读取某一帧
ref,frame=capture.read()
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = Image.fromarray(np.uint8(frame))
frame = np.array(detect_image(frame))
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
cv2.imshow("video",frame)
c= cv2.waitKey(30) & 0xff
if c==27:
capture.release()
break
完整代码如下!赶紧试试看!
import numpy as np
import time
import torch
import torchvision
import os
import cv2
import copy
import colorsys
from PIL import Image, ImageFont, ImageDraw
# 调用摄像头
capture=cv2.VideoCapture(0)
fps = 0.0
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
pretrained=True, progress=True, num_classes=91, pretrained_backbone=True)
def get_new_img_size(width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = int(img_min_side)
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = int(img_min_side)
return resized_width, resized_height
# classes_path = 'voc_classes.txt'
classes_path = 'classes.txt'
classes_path = os.path.expanduser(classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
num_classes = len(class_names)
mean = torch.Tensor([0,0,0,0]).cuda().repeat(num_classes+1)[None]
std = torch.Tensor([0.1, 0.1, 0.2, 0.2]).cuda().repeat(num_classes+1)[None]
# 画框设置不同的颜色
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
confidence = 0.5
def detect_image(image):
start_time = time.time()
image_shape = np.array(np.shape(image)[0:2])
old_width = image_shape[1]
old_height = image_shape[0]
old_image = copy.deepcopy(image)
width, height = get_new_img_size(old_width, old_height)
image = image.resize([width, height])
photo = np.array(image, dtype=np.float32) / 255
photo = np.transpose(photo, (2, 0, 1))
with torch.no_grad():
model.eval()
# Using RGB conversion to convert image pixel-wise into a numpy array
np_sample_image = np.array(image.convert("RGB"))
# Converting the numpy array to a tensor
transformed_img = torchvision.transforms.transforms.ToTensor()(
torchvision.transforms.ToPILImage()(np_sample_image))
result = model([transformed_img])
bbox = []
label = []
conf = []
for i in range(len(result[0]['scores'])):
if result[0]['scores'][i] > confidence:
bbox.append(result[0]['boxes'][i].tolist())
label.append(result[0]['labels'][i].tolist())
conf.append(result[0]['scores'][i].tolist())
bbox = np.array(bbox)
label = np.array(label)
conf = np.array(conf)
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
bbox = np.array(bbox, np.int32)
image = old_image
thickness = (np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2
font = ImageFont.truetype(font='simhei.ttf',
size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
for i, c in enumerate(label):
predicted_class = class_names[int(c)]
score = conf[i]
left, top, right, bottom = bbox[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[int(c)])
draw.text(text_origin, str(label, 'UTF-8'), fill=(0, 0, 0), font=font)
del draw
print("time:", time.time() - start_time)
return image
while(True):
t1 = time.time()
# 读取某一帧
ref,frame=capture.read()
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = Image.fromarray(np.uint8(frame))
frame = np.array(detect_image(frame))
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
cv2.imshow("video",frame)
c= cv2.waitKey(30) & 0xff
if c==27:
capture.release()
break