import dlib
import joblib
import numpy as np
import copy
import pandas as pd
import pygame
from imutils import face_utils
from scipy.spatial import distance
from tkinter import *
from PIL import Image, ImageTk
import tkinter.ttk
import numpy
from PIL import Image, ImageDraw, ImageFont
import cv2
from collections import OrderedDict
from tkinter import filedialog
import time
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import os
global canva_r, canva_l, FPS, root,switch,txt
upload_flag=0
eyes_blink = 0.2
eyes_ratio = 30
count = 0
eye_close = False
yawn = False
yawn_flag = 0
t=0
thresh_mouth = 0.65
num =1
zt=1
switch=1
txt=''
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"]
pygame.mixer.init()
SHAPE_68_INDEX = OrderedDict([
("left_eye", (42, 48)),
("right_eye", (36, 42))
])
(lstart,lend) = SHAPE_68_INDEX['left_eye']
(rstart,rend) = SHAPE_68_INDEX['right_eye']
if not os.path.exists("./bianli/input"):
os.makedirs("./bianli/input")
if not os.path.exists("./bianli/output"):
os.makedirs("./bianli/output")
class general_pose_model(object):
def __init__(self, modelpath, mode="MPI"):
self.inWidth = 368
self.inHeight = 368
self.threshold = 0.1
if mode == "BODY25":
self.pose_net = self.general_body25_model(modelpath)
elif mode == "COCO":
self.pose_net = self.general_coco_model(modelpath)
elif mode == "MPI":
self.pose_net = self.get_mpi_model(modelpath)
def get_mpi_model(self, modelpath):
self.points_name = {
"Head": 0, "Neck": 1,
"RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist":
7, "RHip": 8, "RKnee": 9, "RAnkle": 10,
"LHip": 11, "LKnee": 12, "LAnkle": 13,
"Chest": 14, "Background": 15 }
self.num_points = 15
self.point_pairs = [[0, 1], [1, 2], [2, 3],
[3, 4], [1, 5], [5, 6],
[6, 7], [1, 14],[14, 8],
[8, 9], [9, 10], [14, 11],
[11, 12], [12, 13]
]
prototxt = os.path.join(
modelpath,
"D:\opencv\opencv\sources\samples\dnn\mpi\pose_deploy_linevec.prototxt")
caffemodel = os.path.join(
modelpath,
"D:\opencv\opencv\sources\samples\dnn\mpi\pose_iter_160000.caffemodel")
mpi_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel)
return mpi_model
def general_coco_model(self, modelpath):
self.points_name = {
"Nose": 0, "Neck": 1,
"RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7,
"RHip": 8, "RKnee": 9, "RAnkle": 10,
"LHip": 11, "LKnee": 12, "LAnkle": 13,
"REye": 14, "LEye": 15,
"REar": 16, "LEar": 17,
"Background": 18}
self.num_points = 18
self.point_pairs = [[1, 0], [1, 2], [1, 5],
[2, 3], [3, 4], [5, 6],
[6, 7], [1, 8], [8, 9],
[9, 10], [1, 11], [11, 12],
[12, 13], [0, 14], [0, 15],
[14, 16], [15, 17]]
prototxt = os.path.join(
modelpath,
"D:\opencv\opencv\sources\samples\dnn\coco\pose_deploy_linevec.prototxt")
caffemodel = os.path.join(
modelpath,
"D:\opencv\opencv\sources\samples\dnn\coco\pose_iter_440000.caffemodel")
coco_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel)
return coco_model
def general_body25_model(self, modelpath):
self.num_points = 25
self.point_pairs = [[1, 0], [1, 2], [1, 5],
[2, 3], [3, 4], [5, 6],
[6, 7], [0, 15], [15, 17],
[0, 16], [16, 18], [1, 8],
[8, 9], [9, 10], [10, 11],
[11, 22], [22, 23], [11, 24],
[8, 12], [12, 13], [13, 14],
[14, 19], [19, 20], [14, 21]]
prototxt = os.path.join(
modelpath,
"C:\\Users\\lenovo\\Desktop\\bishe\\myopenpose\\models\\pose\\body_25\\pose_deploy.prototxt")
caffemodel = os.path.join(
modelpath,
"C:\\Users\\lenovo\\Desktop\\OpenPose_models\\pose\\body_25\\pose_iter_584000.caffemodel")
body25_model = cv2.dnn.readNetFromCaffe(prototxt, caffemodel)
return body25_model
def predict(self, imgfile):
img_cv2 = cv2.imread(imgfile)
img_height, img_width, _ = img_cv2.shape
inpBlob = cv2.dnn.blobFromImage(img_cv2,
1.0 / 255,
(self.inWidth, self.inHeight),
(0, 0, 0),
swapRB=False,
crop=False)
self.pose_net.setInput(inpBlob)
self.pose_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self.pose_net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
output = self.pose_net.forward()
H = output.shape[2]
W = output.shape[3]
print(output.shape)
points = []
for idx in range(self.num_points):
probMap = output[0, idx, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = (img_width * point[0]) / W
y = (img_height * point[1]) / H
if prob > self.threshold:
points.append((int(x), int(y)))
else:
points.append(None)
print('检测到的point:',points)
return points
def vis_pose(self, imgfile, points):
img_cv2 = cv2.imread(imgfile)
img_cv2_copy = np.copy(img_cv2)
for idx in range(len(points)):
if points[idx]:
cv2.circle(img_cv2_copy,
points[idx],
8,
(0, 255, 255),
thickness=-1,
lineType=cv2.FILLED)
cv2.putText(img_cv2_copy,
"{}".format(idx),
points[idx],
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 255),
2,
lineType=cv2.LINE_AA)
cv2.imwrite("./bianli/output/"+str(zt)+"_pose.jpg",img_cv2_copy)
for pair in self.point_pairs:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(img_cv2,
points[partA],
points[partB],
(0, 255, 255), 3)
cv2.circle(img_cv2,
points[partA],
8,
(0, 0, 255),
thickness=-1,
lineType=cv2.FILLED)
cv2.imwrite("./bianli/output/"+str(zt)+"_skeleton.jpg",img_cv2)
def handup(self,imgfile,point):
global canva_r,text
img_cv2 = cv2.imread(imgfile)
if point[4] and point[1] and point[4][1]<point[1][1]:
cv2.putText(img_cv2, 'HANDS UP!', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
print("识别到举手!-------识别到举手------------")
text = canva_r.create_text(200, 200, text="举手!!!!!!!!!!!", font=("Lucida Console", 15), fill="red")
show_text_for_seconds(2)
cv2.imwrite("./bianli/output/"+str(zt)+"_pose.jpg",img_cv2)
if point[7] and point[1] and point[7][1]<point[1][1]:
cv2.putText(img_cv2, 'HANDS UP!', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
print("识别到举手!--------识别到举手-----------")
text = canva_r.create_text(200, 200, text="举手!!!!!!!!", font=("Lucida Console", 15), fill="red")
show_text_for_seconds(2)
cv2.imwrite("./bianli/output/"+str(zt)+"_pose.jpg",img_cv2)
def duanzheng(self,imgfile,point):
img_cv2 = cv2.imread(imgfile)
if point[2]and point[5] and point[3] and point[6] and point[4] and abs(point[2][1]-point[5][1])<100 and abs(point[3][1]-point[6][1])<100 and abs(point[4][1]-point[3][1])<100 and abs(point[4][1]-point[6][1])<100:
cv2.putText(img_cv2, 'sitting upright!', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.imwrite("./bianli/output/"+str(zt)+"_pose.jpg",img_cv2)
print("识别到坐姿端正!------识别到坐姿端正-----------")
text = canva_r.create_text(200, 200, text="坐姿端正!!!!!!!!", font=("Lucida Console", 15), fill="red")
show_text_for_seconds(2)
if point[2]and point[5] and point[3] and point[6] and point[7] and abs(point[2][1]-point[5][1])<100 and abs(point[3][1]-point[6][1])<100 and abs(point[7][1]-point[3][1])<100 and abs(point[7][1]-point[6][1])<100:
cv2.putText(img_cv2, 'sitting upright!', (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.imwrite("./bianli/output/"+str(zt)+"_pose.jpg",img_cv2)
print("识别到坐姿端正!------识别到坐姿端正-----------")
text = canva_r.create_text(200, 200, text="坐姿端正!!!!!!!!", font=("Lucida Console", 15), fill="red")
show_text_for_seconds(2)
def main_detect(cap):
print("开始进行疲劳检测!!!!!!")
while switch == 1:
start = cv2.getTickCount()
canva_r.delete("all")
global t, eye_close,count,yawn,yawn_flag
ret, frame = cap.read()
if ret is False:
canva_r.create_text(200, 200, text="视频播放完成", font=("Lucida Console", 15), fill="red")
print("视频播放完毕")
break
frame = frame[0:1080, 0:1440]
frame = cv2.resize(frame, (int(frame.shape[1] / 2.25), int(frame.shape[0] / 2.25)))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detection(gray,0)
for face in faces:
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lstart:lend]
rightEye = shape[rstart:rend]
leftEyeDistance = calculate_EAR(leftEye)
rightEyeDistance = calculate_EAR(rightEye)
ER = (leftEyeDistance+rightEyeDistance) / 2
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
mouth = shape[mStart:mEnd]
mouthRatio = mouth_aspect_ratio(mouth)
mymouth = cv2.convexHull(mouth)
cv2.drawContours(frame, [mymouth], -1, (0, 255, 0), 1)
if mouthRatio > thresh_mouth:
yawn = True
yawn_flag = 0
if yawn == True and yawn_flag < 40:
canva_r.create_text(200, 200, text="检测到您打了一个哈欠,\n请注意不要疲劳驾驶!", font=("Lucida Console", 15), fill="red")
if yawn == True and t == 0:
t = 1
pygame.mixer.music.stop()
pygame.mixer.music.load('sound\\yawn.mp3')
pygame.mixer.music.play()
yawn_flag = yawn_flag + 1
elif yawn == True and yawn_flag == 40:
yawn = False
yawn_flag = 0
t = 0
if (ER < eyes_blink):
count +=1
if count >= eyes_ratio:
eye_close = True
eye_flag = 0
else:
count = 0
if eye_close == True and eye_flag < 40:
canva_r.create_text(200, 200, text="警告!!!\n检测到您的眼睛已经闭合,\n请注意不要疲劳驾驶!", justify=LEFT,
font=("Lucida Console", 15), fill="red")
if eye_close == True and t == 0:
t = 1
pygame.mixer.music.stop()
pygame.mixer.music.load('sound\\eyes.mp3')
pygame.mixer.music.play()
eye_flag = eye_flag + 1
elif eye_close == True and eye_flag == 40:
eye_close = False
eye_flag = 0
t = 0
end = cv2.getTickCount()
during1 = (end - start) / cv2.getTickFrequency()
FPS.set("FPS:" + str(round(1 / during1, 2)))
Showimage(frame, canva_l, "fit")
root.update()
def load_dlib():
global detection,predictor
detection = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
def eye_aspect_ratio(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ratio = (A + B) / (2.0 * C)
return ratio
def mouth_aspect_ratio(mouth):
A = distance.euclidean(mouth[2], mouth[10])
B = distance.euclidean(mouth[4], mouth[8])
C = distance.euclidean(mouth[0], mouth[6])
ratio = (A + B) / (2.0 * C)
return ratio
def calculate_EAR(eye):
A = distance.euclidean(eye[1], eye[5])
B = distance.euclidean(eye[2], eye[4])
C = distance.euclidean(eye[0], eye[3])
ear_aspect_ratio = (A + B) / (2.0 * C)
return ear_aspect_ratio
def mouth_aspect_ratio(mouth):
A = distance.euclidean(mouth[2], mouth[10])
B = distance.euclidean(mouth[4], mouth[8])
C = distance.euclidean(mouth[0], mouth[6])
ratio = (A + B) / (2.0 * C)
return ratio
def upload_file():
global cap,txt,upload_flag,ret,frame
selectFile = tkinter.filedialog.askopenfilename()
entry1.insert(0, selectFile)
txt=entry1.get()
print("上传的文件地址是:")
print(txt)
cap = cv2.VideoCapture(txt)
print("成功捕捉视频")
upload_flag=1
if upload_flag==1:
canva_l.delete('all')
canva_l.create_text(200, 200, text="成功打开视频", font=("Lucida Console", 15), fill="red")
main_detect(cap=cap)
def swi():
global num,cut
print(1111111)
if cap.isOpened():
ret,frame = cap.read()
cv2.imwrite('./pic/'+str(num)+".jpg",frame)
print("已保存一张图片")
num += 1
if ret is False:
canva_r.create_text(200, 200, text="视频播放完成", font=("Lucida Console", 15), fill="red")
print("视频播放完毕")
def open_camera():
global cap
canva_l.delete('all')
canva_l.create_text(200, 200, text="成功打开摄像头", font=("Lucida Console", 15), fill="red")
cap=cap = cv2.VideoCapture(0)
main_detect(cap=cap)
def Showimage(imgCV_in, canva, layout="null"):
global imgTK
canvawidth = int(canva.winfo_reqwidth())
canvaheight = int(canva.winfo_reqheight())
sp = imgCV_in.shape
cvheight = sp[0]
cvwidth = sp[1]
if (layout == "fill"):
imgCV = cv2.resize(imgCV_in, (canvawidth, canvaheight), interpolation=cv2.INTER_AREA)
elif (layout == "fit"):
if (float(cvwidth / cvheight) > float(canvawidth / canvaheight)):
imgCV = cv2.resize(imgCV_in, (canvawidth, int(canvawidth * cvheight / cvwidth)),
interpolation=cv2.INTER_AREA)
else:
imgCV = cv2.resize(imgCV_in, (int(canvaheight * cvwidth / cvheight), canvaheight),
interpolation=cv2.INTER_AREA)
else:
imgCV = imgCV_in
imgCV2 = cv2.cvtColor(imgCV, cv2.COLOR_BGR2RGBA)
current_image = Image.fromarray(imgCV2)
imgTK = ImageTk.PhotoImage(image=current_image)
canva.create_image(0, 0, anchor=NW, image=imgTK)
def show_text_for_seconds(seconds):
for i in range(seconds):
time.sleep(1)
canva_r.update()
def GUI_init():
global entry1,canva_l,canva_r,FPS,root,cut
root = Tk()
root.title("姿态检测系统")
root.minsize(710, 410)
canva_l = Canvas(root, width=480, height=360, bg="white")
canva_l.grid(row=0, column=0)
canva_r = Canvas(root, width=350, height=360, bg="white")
canva_r.grid(row=0, column=1)
FPS = tkinter.StringVar()
FPS_show = tkinter.Label(root, textvariable=FPS, bg="white", font=("Lucida Console", 10))
FPS_show.grid(row=1, column=0)
if upload_flag==0:
canva_l.create_text(200, 200, text="欢迎使用本系统", font=("Lucida Console", 15), fill="red")
cut = tkinter.Button(root, text="拍照", font=("Lucida Console", 14),command=swi)
cut.grid(row=2,column=1)
cut.place(x=350, y=366)
upload = tkinter.Button(root, text='上传文件',command=upload_file)
upload.grid(row=1,column=1)
entry1 = tkinter.Entry(root, width='40')
entry1.grid(row=2,column=1)
camera = tkinter.Button(root,text="开启摄像头",command=open_camera)
camera.grid(row=3,column=1)
estimate = tkinter.Button(root,text="姿态识别",font=("Lucida Console", 14),command=zitai)
estimate.grid(row=2,column=0)
root.mainloop()
def zitai():
global image_ids,image_id,f,zt,cap,canva_r
if cap.isOpened():
ret,frame = cap.read()
cv2.imwrite('./bianli/input/'+str(zt)+".jpg",frame)
print("已openpose一张图片")
print("开始进行OPENPOSE检测!!")
img_file = "./bianli/input/" + str(zt) + ".jpg"
modelpath = "./myopenpose/models/pose/"
pose_model = general_pose_model(modelpath, mode="COCO")
res_points = pose_model.predict(img_file)
pose_model.vis_pose(img_file, res_points)
pose_model.handup(img_file,res_points)
pose_model.duanzheng(img_file,res_points)
print("已保存骨骼图及点位图:",zt,'--------------------------------')
print("openpose识别完成!---------------------------------------")
zt += 1
if __name__ == '__main__':
load_dlib()
GUI_init()