手势控制鼠标(源码)

写在前面:程序不是很难,只是调用了很多的库,安装好库后点击运行即可。

pip install 以下这些
mediapipe==0.8.9
numpy
autopy
numpy
opencv-python

如果还有缺少按照相应的报错提示安装对应的库即可。

通过食指控制鼠标移动,五指控制上滑下滑,双指右键。

具体效果不展示了,因为不愿意露脸,大家自己尝试一下就知道了。

demo_windows.py

"""
功能:手势操作电脑鼠标
1、使用OpenCV读取摄像头视频流;
2、识别手掌关键点像素坐标;
3、根据坐标计算不同的手势模式
4、控制对应的鼠标操作:移动、单击、双击、右击、向上滑、向下滑、拖拽
"""

# 导入其他依赖包
import time

import autopy
# 导入OpenCV
import cv2
import numpy as np
import pyautogui

# 导入handprocess
import handProcess
from utils import Utils


# 识别控制类
class VirtualMouse:
    def __init__(self):
        
        # image实例,以便另一个类调用
        self.image=None
    
    # 主函数
    def recognize(self):

        handprocess = handProcess.HandProcess(False,1)
        utils = Utils()
        
        fpsTime = time.time()
        cap = cv2.VideoCapture(0)
        # 视频分辨率
        resize_w = 960
        resize_h = 720

        # 控制边距
        frameMargin = 100
        
        # 屏幕尺寸
        screenWidth, screenHeight = pyautogui.size() 

        # 柔和处理参数
        stepX, stepY = 0, 0
        finalX, finalY = 0, 0
        smoothening = 7

        action_trigger_time = {
            'single_click':0,
            'double_click':0,
            'right_click':0
        }

        mouseDown = False

        # fps = cap.get(cv2.CAP_PROP_FPS)
        # fps = 18
        # videoWriter = cv2.VideoWriter('./record_video/out'+str(time.time())+'.mp4', cv2.VideoWriter_fourcc(*'H264'), fps, (618,720))


        while cap.isOpened():
            action_zh = ''
            success, self.image = cap.read()
            # 裁剪
            
            self.image = cv2.resize(self.image, (resize_w, resize_h))
            if not success:
                print("空帧")
                continue

            # 提高性能
            self.image.flags.writeable = False
            # 转为RGB
            self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
            # 镜像,需要根据镜头位置来调整
            self.image = cv2.flip(self.image, 1)
            # 处理手掌
            self.image = handprocess.processOneHand(self.image)

            # 画框框
            cv2.rectangle(self.image, (frameMargin, frameMargin), (resize_w - frameMargin, resize_h - frameMargin),(255, 0, 255), 2)

            # 获取动作
            self.image,action,key_point = handprocess.checkHandAction(self.image,drawKeyFinger=True)

            action_zh = handprocess.action_labels[action]

            if key_point:
                 # 映射距离
                x3 = np.interp(key_point[0], (frameMargin, resize_w - frameMargin), (0, screenWidth))
                y3 = np.interp(key_point[1], (frameMargin, resize_h - frameMargin), (0, screenHeight))
                
                # 柔和处理
                finalX = stepX + (x3 - stepX) / smoothening
                finalY = stepY + (y3 - stepY) / smoothening

                now = time.time() 

                if action_zh == '鼠标拖拽':
                    # 原始方法
                    # pyautogui.dragTo(finalX, finalY)     
                    # 解决windows可能无效及卡顿问题
                    if not mouseDown:
                        pyautogui.mouseDown(button='left')
                        mouseDown = True
                    autopy.mouse.move(finalX, finalY)
                else:
                    
                    if mouseDown:
                        pyautogui.mouseUp(button='left')

                    mouseDown = False


                if action_zh == '鼠标移动':
                    # pyautogui.moveTo(finalX, finalY)
                    # 解决移动卡顿的问题
                    autopy.mouse.move(finalX, finalY)

                elif action_zh == '单击准备':
                    pass
                elif action_zh == '触发单击'  and  (now - action_trigger_time['single_click'] > 0.3):
                    pyautogui.click()
                    action_trigger_time['single_click'] = now
              
                    
                elif action_zh == '右击准备':
                    pass
                elif action_zh == '触发右击' and  (now - action_trigger_time['right_click'] > 2):
                    pyautogui.click(button='right')  
                    action_trigger_time['right_click'] = now

                elif action_zh == '向上滑页':
                    pyautogui.scroll(30)
                elif action_zh == '向下滑页':
                    pyautogui.scroll(-30)

                

                
                stepX, stepY = finalX, finalY


            self.image.flags.writeable = True
            self.image = cv2.cvtColor(self.image, cv2.COLOR_RGB2BGR)
            
            
            # 显示刷新率FPS
            cTime = time.time()
            fps_text = 1/(cTime-fpsTime)
            fpsTime = cTime
         
            self.image = utils.cv2AddChineseText(self.image, "帧率: " + str(int(fps_text)),  (10, 30), textColor=(255, 0, 255), textSize=50)
            

            # 显示画面
            # videoWriter.write(self.image) 
            self.image = cv2.resize(self.image, (resize_w//2, resize_h//2))
            cv2.imshow('virtual mouse', self.image)
            if cv2.waitKey(5) & 0xFF == 27:
                break
        cap.release()


# 开始程序
control = VirtualMouse()
control.recognize()

handProcess.py

"""
! author: enpei
! date: 2021-12-15
功能:封装手部识别常用功能,简化Demo代码复杂度
"""
# 导入OpenCV
import cv2
# 导入mediapipe
import mediapipe as mp
import time
import math
import numpy as np
from utils import Utils


class HandProcess:

    def __init__(self,static_image_mode=False,max_num_hands=2):
        # 参数
        self.mp_drawing = mp.solutions.drawing_utils
        self.mp_drawing_styles = mp.solutions.drawing_styles
        self.mp_hands = mp.solutions.hands
        self.hands = self.mp_hands.Hands(static_image_mode=static_image_mode,
                                         min_detection_confidence=0.7,
                                         min_tracking_confidence=0.5,
                                         max_num_hands=max_num_hands)

        self.landmark_list = []

        self.action_labels = {
            'none': '无',
            'move': '鼠标移动',
            'click_single_active': '触发单击',
            'click_single_ready': '单击准备',
            'click_right_active': '触发右击',
            'click_right_ready': '右击准备',
            'scroll_up': '向上滑页',
            'scroll_down': '向下滑页',
            'drag': '鼠标拖拽'
        }
        self.action_deteted = ''


    # 检查左右手在数组中的index
    def checkHandsIndex(self,handedness):
        # 判断数量
        if len(handedness) == 1:
            handedness_list = [handedness[0].classification[0].label]
        else:
            handedness_list = [handedness[0].classification[0].label,handedness[1].classification[0].label]
        
        return handedness_list
    
    # 计算两点点的距离
    def getDistance(self,pointA,pointB):
        return math.hypot((pointA[0]-pointB[0]),(pointA[1]-pointB[1]))

    # 获取坐标    
    def getFingerXY(self,index):
        return (self.landmark_list[index][1],self.landmark_list[index][2])

    # 绘制相关点
    def drawInfo(self,img,action):
        thumbXY,indexXY,middleXY = map(self.getFingerXY,[4,8,12])

        if action == 'move':
            img = cv2.circle(img,indexXY,20,(255,0,255),-1)

        elif action == 'click_single_active':
            middle_point = int(( indexXY[0]+ thumbXY[0])/2),int((  indexXY[1]+ thumbXY[1] )/2)
            img = cv2.circle(img,middle_point,30,(0,255,0),-1)

        elif action == 'click_single_ready':
            img = cv2.circle(img,indexXY,20,(255,0,255),-1)
            img = cv2.circle(img,thumbXY,20,(255,0,255),-1)
            img = cv2.line(img,indexXY,thumbXY,(255,0,255),2)
        

        elif action == 'click_right_active':
            middle_point = int(( indexXY[0]+ middleXY[0])/2),int((  indexXY[1]+ middleXY[1] )/2)
            img = cv2.circle(img,middle_point,30,(0,255,0),-1)

        elif action == 'click_right_ready':
            img = cv2.circle(img,indexXY,20,(255,0,255),-1)
            img = cv2.circle(img,middleXY,20,(255,0,255),-1)
            img = cv2.line(img,indexXY,middleXY,(255,0,255),2)


        return img

    # 返回手掌各种动作
    def checkHandAction(self,img,drawKeyFinger=True):
        upList = self.checkFingersUp()
        action = 'none'

        if len(upList) == 0:
            return img,action,None
        
        # 侦测距离
        dete_dist = 100
        # 中指
        key_point = self.getFingerXY(8)
    
        # 移动模式:单个食指在上,鼠标跟随食指指尖移动,需要smooth处理防抖
        if (upList == [0,1,0,0,0]):
            action = 'move'

        # 单击:食指与拇指出现暂停移动,如果两指捏合,触发单击
        if (upList == [1,1,0,0,0]):
            l1 = self.getDistance(self.getFingerXY(4),self.getFingerXY(8))
            action = 'click_single_active'  if l1 < dete_dist else  'click_single_ready'                
            

        # 右击:食指、中指出现暂停移动,如果两指捏合,触发右击
        if (upList == [0,1,1,0,0]):
            l1 = self.getDistance(self.getFingerXY(8),self.getFingerXY(12))
            action = 'click_right_active' if l1 < dete_dist else 'click_right_ready'
            
        # 向上滑:五指向上
        if (upList == [1,1,1,1,1]):
            action = 'scroll_up'

        # 向下滑:除拇指外四指向上
        if (upList == [0,1,1,1,1]):
            action = 'scroll_down'

        # 拖拽:拇指、食指外的三指向上
        if (upList == [0,0,1,1,1]):
            # 换成中指
            key_point = self.getFingerXY(12)
            action = 'drag'

        # 根据动作绘制相关点
        img = self.drawInfo(img,action) if drawKeyFinger else img

        self.action_deteted = self.action_labels[action]

        return img,action,key_point
    
    # 返回向上手指的数组
    def checkFingersUp(self):

        fingerTipIndexs = [4,8,12,16,20]
        upList = []
        if len(self.landmark_list) == 0:
            return upList

        # 拇指,比较x坐标
        if self.landmark_list[fingerTipIndexs[0]][1] < self.landmark_list[fingerTipIndexs[0]-1][1]:
            upList.append(1)
        else:
            upList.append(0)

        # 其他指头,比较Y坐标
        for i in range(1,5):
            if self.landmark_list[fingerTipIndexs[i]][2] < self.landmark_list[fingerTipIndexs[i]-2][2]:
                upList.append(1)
            else:
                upList.append(0)
        
        return upList

    # 分析手
    def processOneHand(self,img,drawBox=True,drawLandmarks=True):
        utils = Utils()

        results = self.hands.process(img)
        self.landmark_list = []
        
        if results.multi_hand_landmarks:
            
            for hand_index,hand_landmarks in enumerate(results.multi_hand_landmarks):
                
                if drawLandmarks:
                    self.mp_drawing.draw_landmarks(
                        img,
                        hand_landmarks,
                        self.mp_hands.HAND_CONNECTIONS,
                        self.mp_drawing_styles.get_default_hand_landmarks_style(),
                        self.mp_drawing_styles.get_default_hand_connections_style())

                # 遍历landmark
                
                for landmark_id, finger_axis in enumerate(hand_landmarks.landmark):
                    h,w,c = img.shape
                    p_x,p_y = math.ceil(finger_axis.x * w), math.ceil(finger_axis.y * h)

                    self.landmark_list.append([
                        landmark_id, p_x, p_y,
                        finger_axis.z
                    ])

                # 框框和label
                if drawBox:
                    x_min,x_max =  min(self.landmark_list,key=lambda i : i[1])[1], max(self.landmark_list,key=lambda i : i[1])[1]
                    y_min,y_max =  min(self.landmark_list,key=lambda i : i[2])[2], max(self.landmark_list,key=lambda i : i[2])[2]

                    img = cv2.rectangle(img,(x_min-30,y_min-30),(x_max+30,y_max+30),(0, 255, 0),2)
                    img = utils.cv2AddChineseText(img, self.action_deteted,  (x_min-20,y_min-120), textColor=(255, 0, 255), textSize=60)
                        
        return img

utils.py 

# 导入PIL
from PIL import Image, ImageDraw, ImageFont
# 导入OpenCV
import cv2
import numpy as np


class Utils:
    def __init__(self):
        pass
    # 添加中文
    def cv2AddChineseText(self,img, text, position, textColor=(0, 255, 0), textSize=30):
        if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
            img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        # 创建一个可以在给定图像上绘图的对象
        draw = ImageDraw.Draw(img)
        # 字体的格式
        fontStyle = ImageFont.truetype(
            "./fonts/simsun.ttc", textSize, encoding="utf-8")
        # 绘制文本
        draw.text(position, text, textColor, font=fontStyle)
        # 转换回OpenCV格式
        return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

你可能感兴趣的:(opencv,计算机视觉,图像处理)