手势比心,绽放最美的烟火【内附源码】

新年期间,我们基于AidLux平台能力,通过AI模型和效果渲染实现了手势比心添加烟花创意的落地。今天就一起来看看实现过程吧!

1.成品展示

手势比心检测,烟花效果的成品展示,具体效果在bilibili上可看:

新年烟花,指尖绽放!闲置手机硬核改造玩出新花样

2.具体实现

手势比心绽放烟火的实现过程为:通过目标检测算法检测图像中是否有手部区域;将捕捉到的手部区域的图像抠出送入到手势识别的网络中,识别手势;识别到“比心”的动作之后,通过HTML+CSS+JS渲染完成烟火效果。详细代码:下载。

2.1 代码详解

2.1.1 python AI部分

将代码部署到Aidlux平台,运行heart.py。heart.py的代码如下所示:

import cv2
import numpy as np
import torch
import aidlite_gpu
from utils import *
import torch
from cvs import *
import time

import os
from flask import Flask, render_template, Response, make_response, request
from flask_socketio import SocketIO
from flask_cors import CORS, cross_origin
import json
import signal
import sys
import base64


app = Flask(__name__, static_folder='./', static_url_path='', template_folder='')
cors = CORS(app)
socketio = SocketIO(app, cors_allowed_origins='*')

#可识别的手势姿势
resultLabel = ["one","five","fist","OK","heartSingle",
         "yearh","three","four","six","love you","gun","thumbUp","nine","pink"]

#加载手部定位的模型
aidlite = aidlite_gpu.aidlite()
imgW=320
imgH=320
inShape = [1*imgW*imgH*3*4,]
outShape = [1*6300*6*4,]

#加载手势检测的模型
recoInShape = [1*192*192*3*4,]
recoOutShape = [14*4]
model_path = "model/best-fp16.tflite"
reco_path = "model/handclassification.tflite"

aidlite.set_g_index(0)
print('gpu:',aidlite.ANNModel(model_path,inShape,outShape,3,0))

aidlite.set_g_index(1)
print('gpu:',aidlite.ANNModel(reco_path,recoInShape,recoOutShape,3,0))

colors = Colors()
labels=['hand']
conf_thres = 0.25
iou_thres = 0.45
line_thickness = 1
classes = None
agnostic_nms = False
image_path = "000260_005.jpg"


#手势识别
def handRecognition(image):
    image = transformer(image)
    aidlite.set_g_index(1)
    aidlite.setTensor_Fp32(image,192,192)
    aidlite.invoke()
    predict = aidlite.getOutput_Float32(0)
    out = np.reshape(predict,(14,))
    index = np.argmax(out)
    # print("index:",index)
    return resultLabel[index]

#打开手机摄像头
camid=0
cap=cvs.VideoCapture(camid)


#相机推流
def gen():
    while True:
        #获取相机图像,如果没有获取到图像,重新获取
        image0 = cap.read()
        if image0 is None:
            continue
        image0 = np.array(image0)
        H,W,_ = image0.shape
        image = image0.copy()
        image1 = image0.copy()
        image = cv2.resize(image,(320,320),interpolation=cv2.INTER_LINEAR)
        image = image[:,:,::-1]
        image = image/255.0
        image = np.expand_dims(image,axis=0)
        image = image.astype(dtype=np.float32)


        # 将图像送入模型进行预测
        start = time.time()
        aidlite.set_g_index(0)
        aidlite.setTensor_Fp32(image,imgW,imgH)
        aidlite.invoke()

        #得到手部的坐标位置
        pred = np.array(aidlite.getOutput_Float32(0))
        # print(pred)
        pred = np.reshape(pred,(1,6300,6))
        middle = time.time()
        # print("inference:",(middle-start))
        pred[..., 0] *= imgH  # x
        pred[..., 1] *= imgW  # y
        pred[..., 2] *= imgH  # w
        pred[..., 3] *= imgW
        pred = torch.tensor(pred)
        #对多个候选框做非极大值抑制
        pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms,max_det=10)
        #得到首部区域检测结果的候选框
        det = pred[0]
        det[:, :4] = scale_coords(image.shape[1:-1], det[:, :4], image0.shape).round()
        # print(det[:,4:])
        results=""
        for *box, conf, cls in reversed(det):
            #将检测到的手部坐标框标注到原图中
            c = int(cls)
            label = labels[c]
            p1,p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
            #将检测到的手部区域从原图中裁剪下来,然后送入到手势识别的网络中,识别手势姿态
            handimage = image1[int(box[1]):int(box[3])+1,int(box[0]):int(box[2])+1,:]
            result = handRecognition(handimage)
            results = results+","+result
            cv2.rectangle(image0, p1, p2, colors(c,True), thickness=line_thickness, lineType=cv2.LINE_AA)
            tf = max(line_thickness- 1, 1)  # font thickness
            w, h = cv2.getTextSize(label, 0, fontScale=line_thickness, thickness=tf)[0]  # text width, height
            outside = p1[1] - h - 3 >= 0  # label fits outside box
            p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
            cv2.rectangle(image0, p1, p2, colors(c,True), -1, cv2.LINE_AA)  # filled
            cv2.putText(image0, result, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, line_thickness, (255,255,255),
                        thickness=tf, lineType=cv2.LINE_AA)
        # 如果手势姿态为“比心”的姿态的话,则开始展示烟火
        if 'heartSingle' in results:
            showFireWork()
  
        # 将处理结果转成bytes,供前端显示
        ret, jpeg = cv2.imencode('.jpg', image0)
        frame = jpeg.tobytes()
        time.sleep(0.03)
  
        yield (b'--frame\r\n'
              b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
# 相机喂流
@app.route('/video_feed')
def video_feed():
    return Response(gen(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')

# 当前实时相机画面
@app.route('/')
@cross_origin(supports_credentials=True)
def cur_camera():
    return render_template('index.html')


@socketio.on('firework')
def showFireWork():
    socketio.emit('show_firework', { 'data': True })


# 连接成功
@socketio.on('connect')
def connect():
    socketio.emit('connect', { 'data': 'Connected' })


# 断开连接
@socketio.on('disconnect')
def disconnect():
    print('Client disconnected')

# 退出
def exit(signum, frame):
    sys.exit(0)

if __name__ == '__main__':
  
    signal.signal(signal.SIGINT, exit)
    signal.signal(signal.SIGHUP, exit)
    signal.signal(signal.SIGTERM, exit)
    app.run(host='0.0.0.0', debug=False, port=63532)

heart.py所需的一些功能接口在utils.py文件中可以看到

import numpy as np
import cv2
import torchvision
import torch
import time


#图像预处理,包括resize,归一化操作
def transformer(image):
    # image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
    image = cv2.resize(image,(192,192),interpolation=cv2.INTER_CUBIC)
    image = (image-128.)/256.
    image =image.transpose(2,0,1)
    image = image.astype(np.float32)
    return image

#将模型预测到的坐标映射到图像上的真实坐标
def clip_coords(boxes, shape):
    # Clip bounding xyxy bounding boxes to image shape (height, width)
    if isinstance(boxes, torch.Tensor):  # faster individually
        boxes[:, 0].clamp_(0, shape[1])  # x1
        boxes[:, 1].clamp_(0, shape[0])  # y1
        boxes[:, 2].clamp_(0, shape[1])  # x2
        boxes[:, 3].clamp_(0, shape[0])  # y2
    else:  # np.array (faster grouped)
        boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1])  # x1, x2
        boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0])  # y1, y2

#计算resize前和resize后的图像尺寸比例,将坐标值映射回rsize前
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
    # Rescale coords (xyxy) from img1_shape to img0_shape
    if ratio_pad is None:  # calculate from img0_shape
        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]

    coords[:, [0, 2]] -= pad[0]  # x padding
    coords[:, [1, 3]] -= pad[1]  # y padding
    coords[:, :4] /= gain
    clip_coords(coords, img0_shape)
    return coords

#将预测到的框的中心坐标和宽高转换为框的左上角坐标和右下角坐标
def xywh2xyxy(x):
    # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
    return y

#给不同类别的坐标设置不同的显示颜色
class Colors:
    # Ultralytics color palette https://ultralytics.com/
    def __init__(self):
        # hex = matplotlib.colors.TABLEAU_COLORS.values()
        hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
               '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
        self.palette = [self.hex2rgb('#' + c) for c in hex]
        self.n = len(self.palette)

    def __call__(self, i, bgr=False):
        c = self.palette[int(i) % self.n]
        return (c[2], c[1], c[0]) if bgr else c

    @staticmethod
    def hex2rgb(h):  # rgb order (PIL)
        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  
#计算两个框之间的IOU值
def box_iou(box1, box2):
    # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
    """
    Return intersection-over-union (Jaccard index) of boxes.
    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
    Arguments:
        box1 (Tensor[N, 4])
        box2 (Tensor[M, 4])
    Returns:
        iou (Tensor[N, M]): the NxM matrix containing the pairwise
            IoU values for every element in boxes1 and boxes2
    """

    def box_area(box):
        # box = 4xn
        return (box[2] - box[0]) * (box[3] - box[1])

    area1 = box_area(box1.T)
    area2 = box_area(box2.T)

    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
    inter = (np.min(box1[:, None, 2:], box2[:, 2:]) - np.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
    return inter / (area1[:, None] + area2 - inter)  # iou = inter / (area1 + area2 - inter)

#用非极大值抑制来筛选掉多余的候选框
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
                        labels=(), max_det=300):
    """Runs Non-Maximum Suppression (NMS) on inference results

    Returns:
         list of detections, on (n,6) tensor per image [xyxy, conf, cls]
    """

    nc = prediction.shape[2] - 5  # number of classes
    xc = prediction[..., 4] > conf_thres  # candidates

    # Checks
    assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
    assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'

    # Settings
    min_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and height
    max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()
    time_limit = 10.0  # seconds to quit after
    redundant = True  # require redundant detections
    multi_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)
    merge = False  # use merge-NMS

    t = time.time()
    output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
    for xi, x in enumerate(prediction):  # image index, image inference
        # Apply constraints
        # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-height
        x = x[xc[xi]]  # confidence

        # Cat apriori labels if autolabelling
        if labels and len(labels[xi]):
            l = labels[xi]
            v = torch.zeros((len(l), nc + 5), device=x.device)
            v[:, :4] = l[:, 1:5]  # box
            v[:, 4] = 1.0  # conf
            v[range(len(l)), l[:, 0].long() + 5] = 1.0  # cls
            x = torch.cat((x, v), 0)

        # If none remain process next image
        if not x.shape[0]:
            continue

        # Compute conf
        x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf

        # Box (center x, center y, width, height) to (x1, y1, x2, y2)
        box = xywh2xyxy(x[:, :4])

        # Detections matrix nx6 (xyxy, conf, cls)
        if multi_label:
            i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
            x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
        else:  # best class only
            conf, j = x[:, 5:].max(1, keepdim=True)
            x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]

        # Filter by class
        if classes is not None:
            x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]

        # Apply finite constraint
        # if not torch.isfinite(x).all():
        #     x = x[torch.isfinite(x).all(1)]

        # Check shape
        n = x.shape[0]  # number of boxes
        if not n:  # no boxes
            continue
        elif n > max_nms:  # excess boxes
            x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence

        # Batched NMS
        c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
        if i.shape[0] > max_det:  # limit detections
            i = i[:max_det]
        if merge and (1 < n < 3E3):  # Merge NMS (boxes merged using weighted mean)
            # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
            iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrix
            weights = iou * scores[None]  # box weights
            x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes
            if redundant:
                i = i[iou.sum(1) > 1]  # require redundancy

        output[xi] = x[i]
        if (time.time() - t) > time_limit:
            print(f'WARNING: NMS time limit {time_limit}s exceeded')
            break  # time limit exceeded

    return output

2.1.2 HTML+CSS+JS烟火效果

前端主要用于在算法处理的结果上显示烟花的效果,接收到算法给到识别成功的通知,然后开始播放烟花效果持续3.5s,此期间接收到算法给出的识别成功结果不作处理,3.5s后接收到通知,再持续播放。

播放烟花用到的效果是fireworks.js

完整前端代码如下:




  
  
  
  firework
  


  

2.2 所需环境

运行整段代码需要在aidlux安装依赖包,主要包括:

flask, flask_socketio, flask_cors, signal, sys, cv2, numpy, aidlite_gpu, cvs, time. 这些包aidlux中已经集成自带

用户需要自己安装:pip install torch=1.10.1, pip install torchvision=0.11.2.

!> 注:torch和torchvision的版本一定要对应上,否则在非极大值抑制时,torchvision会报错。

2.3 代码获取

点击代码下载,即可获得源码,代码在平台上部署运行方式可参考这篇帖子来完成。运行heart.py,然后在浏览器访问:http://x.x.x.x:63532,即可观看比心检测的效果。其中,x.x.x.x是AidLux平台的ip地址。

3.效果展示

具体效果如下图所示

手势比心,绽放最美的烟火【内附源码】_第1张图片

你可能感兴趣的:(AidLux,python,开发语言,人工智能)