静默活体检测的python实现

  本文将实现静默活体检测, 网络为二分类, 输出图片中的人脸为真人脸 还是照片视频替代的假人脸. 具体的代码实现方式如下.
其中face_detector的百度网盘下载地址: 密码: hgu4

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/3/25 12:03 下午
# @Author  : sarah feng
# @File    : Slient_liveness_detect.py
# @attention: 尽量在光线充足的地方进行检测, 一般光线教暗的情况下检测是不通过的, 同时要保证眼睛正视前方。

from imutils.video import VideoStream
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import argparse
import imutils
import pickle
import time
import cv2
import os


class SilenceDetect(object):
    def __init__(self):
        self.protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
        self.modelPath = os.path.sep.join(["face_detector",
                                      "res10_300x300_ssd_iter_140000.caffemodel"])
        self.net = cv2.dnn.readNetFromCaffe(self.protoPath, self.modelPath)

        self.model = load_model("liveness.model")
        self.le = pickle.loads(open("le.pickle", "rb").read())
        self.confidence = 0.5

    def liveness_detect(self, frame):
        labels, confidences, start_box, end_box, areas = [], [], [], [], []
        # 对图像进行预处理,https://blog.csdn.net/weixin_42216109/article/details/103010206
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                     (300, 300), (104.0, 177.0, 123.0))

        self.net.setInput(blob)
        detections = self.net.forward()
        # print("detect", detections.shape) # (1, 1, 200, 7)
        for i in range(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]

            # filter out weak detections
            if confidence > self.confidence: #识别人的时候的置信度
                # 拿到人脸的区域框
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # 保证拿到的人脸区域框不在整张图片的范围外
                startX = max(0, startX)
                startY = max(0, startY)
                endX = min(w, endX)
                endY = min(h, endY)

                # 将人脸部分提取出来, 对应的数组进行操作和训练时候的方式一样
                face = frame[startY:endY, startX:endX]
                face = cv2.resize(face, (32, 32))
                face = face.astype("float") / 255.0
                face = img_to_array(face)
                face = np.expand_dims(face, axis=0)

                # 把提取的人脸部分放入到网络中判断是真的人脸还是假的人脸
                preds = self.model.predict(face)[0]
                j = np.argmax(preds)
                label = self.le.classes_[j]
                labels.append(label)
                confidences.append(preds[j])
                start_box.append([startX, startY])
                end_box.append([endX, endY])
                areas.append((endY - startY) * (endX - startX))
        return labels, confidences, start_box, end_box, areas


if __name__ == '__main__':
    print("[INFO] starting video stream thread...")
    vs = VideoStream(src=0).start()
    time.sleep(1.0)
    silence_detect = SilenceDetect()
    fake_num = 0
    fake_frame = 5
    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=600)
        labels, confidences, start_bbox, end_bbox, areas = silence_detect.liveness_detect(frame)
        # 只使用最前面的人脸进行检测
        if labels:
            index_num =np.argmax(areas)
            label, confidence, start_box, end_box = labels[index_num], confidences[index_num], \
                                                    start_bbox[index_num], end_bbox[index_num]
            # 连续5帧内是fake的时候
            if label == b'fake' and confidence > 0.8:
                fake_num += 1
            else:
                fake_num = 0
            if fake_num < fake_frame:
                label == b'real'

        label = "{}: {:.4f}".format(label, confidence)
        cv2.putText(frame, label, (start_box[0], start_box[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.rectangle(frame, (start_box[0], start_box[1]), (end_box[0], end_box[1]),
                      (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

cv2.destroyAllWindows()
vs.stop()

你可能感兴趣的:(功能实现)