Python MASK and Pedestrian safety distance detection and bioassay

参考这一篇博客:https://blog.csdn.net/m0_37690102/article/details/106975159,今天,接着实现上面一篇博客没有实现的活体检测功能,今天,实现这部分功能,界面设计等功能就不在阐述了,主要实现活体检测的功能,使用dlib的库。

  • Today, reference this blog: https://blog.csdn.net/m0_37690102/article/details/106975159, then to realize the above a blog not implemented in vivo detection function, today, this part of the functions, interface design, and other functions are not elaborated, main function of living detection using dlib library.

首先,安装dlib的库,你可以选择离线安装,你也可以选择在线安装。在此,选择在线安装的方式。

  • First, install dlib libraries. You can choose to install them offline, or you can choose to install them online.Here, choose the way to install online.
pip instal dlib 

直接看看测试的效果吧

  • Just look at the test

    Python MASK and Pedestrian safety distance detection and bioassay_第1张图片

接下来就是功能的实现,参考下面的代码,我只实现了其中的一部分,一部分是参考网上他人博客,出处很遗憾没有找到。不过您可以用下面的代码;

  • The next step is the implementation of the function. I refer to the code below. I only implemented part of it.But you can use the following code;
def eye_aspect_ratio(eye):
	# compute the euclidean distances between the two sets of vertical eye landmarks (x, y)-coordinates
	# 计算两组垂直眼睛地标(x,y)坐标之间的欧氏距离
	A = dist.euclidean(eye[1], eye[5])  # (43, 47) 垂直欧几里德距离
	B = dist.euclidean(eye[2], eye[4])  # (44, 46)

	# compute the euclidean distance between the horizontal eye landmark (x, y)-coordinates
	# 计算水平眼睛地标(x,y)坐标之间的欧氏距离
	C = dist.euclidean(eye[0], eye[3])  # (42, 45) 水平欧几里德距离

	# compute the eye aspect ratio 长宽比
	ear = (A + B) / (2.0 * C)

	# return the eye aspect ratio
	return ear

def mouth_aspect_ratio(mouth):
	A = dist.euclidean(mouth[1], mouth[11])
	B = dist.euclidean(mouth[2], mouth[10])
	C = dist.euclidean(mouth[3], mouth[9])
	D = dist.euclidean(mouth[4], mouth[8])
	E = dist.euclidean(mouth[5], mouth[7])

	F = dist.euclidean(mouth[0], mouth[6])  #  水平欧几里德距离

	ratio = (A + B + C + D + E) / (5.0 * F)

	return ratio

def left_right_face_ratio(face):
	rightA = dist.euclidean(face[0], face[27])
	rightB = dist.euclidean(face[2], face[30])
	rightC = dist.euclidean(face[4], face[48])
	leftA = dist.euclidean(face[16], face[27])
	leftB = dist.euclidean(face[14], face[30])
	leftC = dist.euclidean(face[12], face[54])

	ratioA = rightA / leftA
	ratioB = rightB / leftB
	ratioC = rightC / leftC
	ratio = (ratioA + ratioB + ratioC) / 3

	return ratio # 左转大于2.0,右转小于0.5

初始化的代码:

  • Initialization code: 
# =========================================================================================
        # define two constants, one for the eye aspect ratio to indicate
        # blink and then a second constant for the number of consecutive
        # frames the eye must be below the threshold
        self.EYE_AR_THRESH = 0.25
        self.EYE_AR_CONSEC_FRAMES = 2

        # initialize the frame counters and the total number of blinks
        self.COUNTER = 0
        self.TOTAL = 0
        self.OPEN_MOUTH_COUNTER = 0
        self.MOUTH_TOTAL = 0
        self.TRUN_LEFT_TOTAL = 0
        self.TRUN_RIGHT_TOTAL = 0
        self.TRUN_LEFT_COUNTER = 0
        self.TRUN_RIGHT_COUNTER = 0

        self.random_number = random.randint(1, 2)

        # initialize dlib's face detector (HOG-based) and then create
        # the facial landmark predictor
        print("[INFO] loading facial landmark predictor...")
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(r'./shape_predictor_68_face_landmarks.dat')  # 这里需要修改成你自己的路径

        # grab the indexes of the facial landmarks for the left and
        # right eye, respectively
        (self.lStart, self.lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]  # (42, 48)
        (self.rStart, self.rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]  # (36, 42)
        (self.mStart, self.mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]  # (48, 68)

        # start the video stream thread
        print("[INFO] starting video stream thread...")
        # vs = FileVideoStream(args["video"]).start()
        self.fileStream = True
        # vs = VideoStream(src=0).start()
        # video_capture = cv2.VideoCapture(0)
        self.fileStream = False
        self.textColor = (255, 0, 0)
        # =========================================================================================
            if self.bio_assay == 1:
                try:
                    # flag, self.image = self.cap.read()
                    # show = cv2.resize(self.image, (640, 480))
                    # show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
                    # showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
                    # self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
                    # 眉毛直线拟合数据缓冲
                    line_brow_x = []
                    line_brow_y = []
                    ret = success
                    img = imutils.resize(img)  # , width=450)
                    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                    # detect faces in the grayscale frame
                    rects = self.detector(gray, 0)
                    # 每帧数据延时1ms,延时为0读取的是静态帧
                    k = cv2.waitKey(1)

                    # loop over the face detections
                    cv2.putText(img, "Faces: " + str(len(rects)), (500, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                (0, 0, 255), 1, cv2.LINE_AA)
                    for rect in rects:
                        for k, d in enumerate(rects):
                            # 用红色矩形框出人脸
                            # cv2.rectangle(img, (x - 10, y - 10), (x + w + 10, y + h + 10), color, 2)
                            cv2.rectangle(img, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 255), 2)
                            # 计算人脸热别框边长
                            self.face_width = d.right() - d.left()
                            # 使用预测器得到68点数据的坐标
                            shape = self.predictor(img, d)
                            # 圆圈显示每个特征点
                            for i in range(68):
                                cv2.circle(img, (shape.part(i).x, shape.part(i).y), 2, (0, 255, 0), -1, 8)
                                # cv2.putText(im_rd, str(i), (shape.part(i).x, shape.part(i).y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                #            (255, 255, 255))
                            # =============================================================================================================
                            # 分析任意n点的位置关系来作为表情识别的依据
                            mouth_width = (shape.part(54).x - shape.part(48).x) / self.face_width  # 嘴巴咧开程度
                            mouth_higth = (shape.part(66).y - shape.part(62).y) / self.face_width  # 嘴巴张开程度
                            # print("嘴巴宽度与识别框宽度之比:",mouth_width_arv)
                            # print("嘴巴高度与识别框高度之比:",mouth_higth_arv)

                            # 通过两个眉毛上的10个特征点,分析挑眉程度和皱眉程度
                            brow_sum = 0  # 高度之和
                            frown_sum = 0  # 两边眉毛距离之和
                            for j in range(17, 21):
                                brow_sum += (shape.part(j).y - d.top()) + (shape.part(j + 5).y - d.top())
                                frown_sum += shape.part(j + 5).x - shape.part(j).x
                                line_brow_x.append(shape.part(j).x)
                                line_brow_y.append(shape.part(j).y)

                            # self.brow_k, self.brow_d = self.fit_slr(line_brow_x, line_brow_y)  # 计算眉毛的倾斜程度
                            tempx = np.array(line_brow_x)
                            tempy = np.array(line_brow_y)
                            z1 = np.polyfit(tempx, tempy, 1)  # 拟合成一次直线
                            self.brow_k = -round(z1[0], 3)  # 拟合出曲线的斜率和实际眉毛的倾斜方向是相反的

                            brow_hight = (brow_sum / 10) / self.face_width  # 眉毛高度占比
                            brow_width = (frown_sum / 5) / self.face_width  # 眉毛距离占比
                            # print("眉毛高度与识别框高度之比:",round(brow_arv/self.face_width,3))
                            # print("眉毛间距与识别框高度之比:",round(frown_arv/self.face_width,3))

                            # 眼睛睁开程度
                            eye_sum = (shape.part(41).y - shape.part(37).y + shape.part(40).y - shape.part(38).y +
                                       shape.part(47).y - shape.part(43).y + shape.part(46).y - shape.part(44).y)
                            eye_hight = (eye_sum / 4) / self.face_width
                            # print("眼睛睁开距离与识别框高度之比:",round(eye_open/self.face_width,3))

                            # 分情况讨论
                            # 张嘴,可能是开心或者惊讶
                            if round(mouth_higth >= 0.03):
                                if eye_hight >= 0.056:
                                    cv2.putText(img, "amazing", (d.left(), d.bottom() + 20),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                                                (0, 0, 255), 2, 4)
                                else:
                                    cv2.putText(img, "happy", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX,
                                                0.8,
                                                (0, 0, 255), 2, 4)

                            # 没有张嘴,可能是正常和生气
                            else:
                                if self.brow_k <= -0.3:
                                    cv2.putText(img, "angry", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX,
                                                0.8,
                                                (0, 0, 255), 2, 4)
                                else:
                                    cv2.putText(img, "nature", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX,
                                                0.8,
                                                (0, 0, 255), 2, 4)
                            # =============================================================================================================
                        shape = self.predictor(gray, rect)
                        shape = face_utils.shape_to_np(shape)
                        '''
                        第一步检查眼睛纵横比是否低于我们的眨眼阈值,如果是,我们递增指示正在发生眨眼的连续帧数。否则,我们将处理眼高宽比不低于眨眼阈值的情况,我们对其进行检查,
                        看看是否有足够数量的连续帧包含低于我们预先定义的阈值的眨眼率。如果检查通过,我们增加总的闪烁次数。然后我们重新设置连续闪烁次数 COUNTER。
                        '''
                        # ==========================================================================================
                        # extract the left and right eye coordinates, then use the
                        # coordinates to compute the eye aspect ratio for both eyes
                        leftEye = shape[self.lStart:self.lEnd]
                        rightEye = shape[self.rStart:self.rEnd]
                        mouth = shape[self.mStart:self.mEnd]

                        leftEAR = eye_aspect_ratio(leftEye)
                        rightEAR = eye_aspect_ratio(rightEye)
                        mouthRatio = mouth_aspect_ratio(mouth)
                        leftRightRatio = left_right_face_ratio(shape)

                        # average the eye aspect ratio together for both eyes
                        ear = (leftEAR + rightEAR) / 2.0  # 欧几里德距离,垂直/水平比

                        # compute the convex hull for the left and right eye, then
                        # visualize each of the eyes
                        leftEyeHull = cv2.convexHull(leftEye)
                        rightEyeHull = cv2.convexHull(rightEye)
                        mouthHull = cv2.convexHull(mouth)
                        cv2.drawContours(img, [leftEyeHull], -1, (0, 255, 0), 1)
                        cv2.drawContours(img, [rightEyeHull], -1, (0, 255, 0), 1)
                        cv2.drawContours(img, [mouthHull], -1, (0, 255, 0), 1)

                        print("leftRightRatio:", leftRightRatio)  #
                        if mouthRatio > 0.7:
                            self.OPEN_MOUTH_COUNTER += 1
                        else:
                            if self.OPEN_MOUTH_COUNTER >= 2:
                                self.MOUTH_TOTAL += 1
                            self.OPEN_MOUTH_COUNTER = 0
                        if leftRightRatio >= 2.0:
                            self.TRUN_LEFT_COUNTER += 1
                        elif leftRightRatio <= 0.5:
                            self.TRUN_RIGHT_COUNTER += 1
                        else:
                            if self.TRUN_LEFT_COUNTER >= 2:
                                self.TRUN_LEFT_TOTAL += 1
                            if self.TRUN_RIGHT_COUNTER >= 2:
                                self.TRUN_RIGHT_TOTAL += 1
                            self.TRUN_LEFT_COUNTER = 0
                            self.TRUN_RIGHT_COUNTER = 0

                            # check to see if the eye aspect ratio is below the blink
                            # threshold, and if so, increment the blink frame counter
                        if ear < self.EYE_AR_THRESH:
                            self.COUNTER += 1
                            # otherwise, the eye aspect ratio is not below the blink
                            # threshold
                        else:
                            # if the eyes were closed for a sufficient number of
                            # then increment the total number of blinks 闭眼时间大于等于2帧图片
                            if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:
                                self.TOTAL += 1
                                # 检测完成后,眨眼重新检测
                                if self.TRUN_RIGHT_TOTAL >= 1 and self.TRUN_LEFT_TOTAL >= 1 and self.MOUTH_TOTAL >= 1:
                                    self.TRUN_LEFT_TOTAL = 0
                                    self.TRUN_RIGHT_TOTAL = 0
                                    self.MOUTH_TOTAL = 0
                                    self.random_number = random.randint(1, 2)

                                    # reset the eye frame counter
                            self.COUNTER = 0
                        # ============================================================================
                        cv2.putText(img, "COUNTER: {}".format(self.COUNTER), (150, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                        cv2.putText(img, "Blinks: {}".format(self.TOTAL), (10, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                        cv2.putText(img, "EAR: {:.2f}".format(ear), (300, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                        # ============================================================================
                        if self.random_number == 1:
                            if self.TRUN_LEFT_TOTAL > 0:
                                if self.TRUN_RIGHT_TOTAL > 0:
                                    if self.MOUTH_TOTAL > 0:
                                        cv2.putText(img, "live detection finish", (50, 150),
                                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, self.textColor, 2)
                                        self.MsgTE.setText("活体检测结果:" + "活体")
                                        # self.living = True
                                        self.MsgTE.setText("闭合嘴动作:" + "开始")
                                        self.MsgTE.setText("转右脸动作:" + "开始")
                                        self.MsgTE.setText("转左脸动作:" + "开始")
                                    else:
                                        cv2.putText(img, "Open Mouth", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                                    self.textColor, 2)
                                        self.MsgTE.setText("闭合嘴动作:" + "开始")
                                        # Speak("请张合嘴巴")
                                        # self.living = False
                                else:
                                    cv2.putText(img, "Trun Right Face", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                                self.textColor, 2)
                                    self.MsgTE.setText("转右脸动作:" + "开始")
                                    # Speak("请转动右脸")
                                    # self.living = False
                                    self.MOUTH_TOTAL = 0
                            else:
                                cv2.putText(img, "Trun Left Face", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                            self.textColor, 2)
                                self.MsgTE.setText("转左脸动作:" + "开始")
                                # Speak("请转动左脸")
                                # self.living = False
                                self.MOUTH_TOTAL = 0
                                self.TRUN_RIGHT_TOTAL = 0
                        elif self.random_number == 2:
                            if self.MOUTH_TOTAL > 0:
                                if self.TRUN_RIGHT_TOTAL > 0:
                                    if self.TRUN_LEFT_TOTAL > 0:
                                        cv2.putText(img, "live detection finish", (50, 150),
                                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, self.textColor, 2)
                                        self.MsgTE.setText("活体检测结果:" + "活体")
                                        # self.living = True
                                        self.MsgTE.setText("闭合嘴动作:" + "完成")
                                        self.MsgTE.setText("转右脸动作:" + "完成")
                                        self.MsgTE.setText("转左脸动作:" + "完成")
                                    else:
                                        cv2.putText(img, "Trun Left Face", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                                    self.textColor, 2)
                                        self.MsgTE.setText("转左脸动作:" + "开始")
                                        # Speak("请转动左脸")
                                        # self.living = False
                                else:
                                    cv2.putText(img, "Trun Right Face", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                                self.textColor, 2)
                                    self.MsgTE.setText("转右脸动作:" + "开始")
                                    # Speak("请转动右脸")
                                    self.TRUN_LEFT_TOTAL = 0
                                    self.living = False
                            else:
                                cv2.putText(img, "Open Mouth", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                            self.textColor, 2)
                                self.MsgTE.setText("闭合嘴动作:" + "开始")
                                # Speak("请张合嘴巴")
                                # self.living = False
                                self.TRUN_LEFT_TOTAL = 0
                                self.TRUN_RIGHT_TOTAL = 0
                    if len(rects) == 0:
                        cv2.putText(img, "No Face Detected!", (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
                                    2)
                        self.MsgTE.setText("人脸检测结果:" + "未检测")
                        # self.living = False
                        self.MsgTE.setText("活体检测结果:")
                        # self.living == True
                        self.MsgTE.setText("闭合嘴动作:")
                        self.MsgTE.setText("转右脸动作:")
                        self.MsgTE.setText("转左脸动作:")
                    else:
                        self.MsgTE.setText("人脸检测结果:" + "已检测")
                        # show the frame
                        img = img
                    # 日志
                    logger.info("活体检测 Success")
                except Exception as ee:
                    logger.error(ee)

 I hope I can help you,If you have any questions, please  comment on this blog or send me a private message. I will reply in my free time.

你可能感兴趣的:(English,blog,Python)