Mediapipe5种手势识别

本次给大家带来的项目是使用谷歌Mediapipe训练好的模型进行特点的手势识别,包括五种特定的手势识别,thumbs_up、victory、thumbs_down、pointing_up等。

第一步 安装Mediapipe库

pip install -q mediapipe==0.10.0

 第二步 导入库

import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision

第三步 完整代码

# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 15:31:05 2023

@author: zhaohao
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 15:28:16 2023

@author: zhaohao
"""

#@markdown We implemented some functions to visualize the gesture recognition results. 
Run the following cell to activate the functions. from matplotlib import pyplot as plt import mediapipe as mp from mediapipe.framework.formats import landmark_pb2 import math plt.rcParams.update({ 'axes.spines.top': False, 'axes.spines.right': False, 'axes.spines.left': False, 'axes.spines.bottom': False, 'xtick.labelbottom': False, 'xtick.bottom': False, 'ytick.labelleft': False, 'ytick.left': False, 'xtick.labeltop': False, 'xtick.top': False, 'ytick.labelright': False, 'ytick.right': False }) mp_hands = mp.solutions.hands mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles def display_one_image(image, title, subplot, titlesize=16): """Displays one image along with the predicted category name and score.""" plt.subplot(*subplot) plt.imshow(image) if len(title) > 0: plt.title(title, fontsize=int(titlesize), color='black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return (subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images_with_gestures_and_hand_landmarks(images, results): """Displays a batch of images with the gesture category and its score along with the hand landmarks.""" # Images and labels. images = [image.numpy_view() for image in images] gestures = [top_gesture for (top_gesture, _) in results] multi_hand_landmarks_list = [multi_hand_landmarks for (_, multi_hand_landmarks) in results] # Auto-squaring: this will drop data that does not fit into square or square-ish rectangle. rows = int(math.sqrt(len(images))) cols = len(images) // rows # Size and spacing. FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols, 1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) # Display gestures and hand landmarks. for i, (image, gestures) in enumerate(zip(images[:rows*cols], gestures[:rows*cols])): title = f"{gestures.category_name} ({gestures.score:.2f})" dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols) * 40 + 3 annotated_image = image.copy() for hand_landmarks in multi_hand_landmarks_list[i]: hand_landmarks_proto = landmark_pb2.NormalizedLandmarkList() hand_landmarks_proto.landmark.extend([ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in hand_landmarks ]) mp_drawing.draw_landmarks( annotated_image, hand_landmarks_proto, mp_hands.HAND_CONNECTIONS, mp_drawing_styles.get_default_hand_landmarks_style(), mp_drawing_styles.get_default_hand_connections_style()) subplot = display_one_image(annotated_image, title, subplot, titlesize=dynamic_titlesize) # Layout. plt.tight_layout() plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() # STEP 1: Import the necessary modules. import mediapipe as mp from mediapipe.tasks import python from mediapipe.tasks.python import vision # STEP 2: Create an GestureRecognizer object. base_options = python.BaseOptions(model_asset_path='gesture_recognizer.task') options = vision.GestureRecognizerOptions(base_options=base_options) recognizer = vision.GestureRecognizer.create_from_options(options) IMAGE_FILENAMES = ['thumbs_down.jpg', 'victory.jpg', 'thumbs_up.jpg', 'pointing_up.jpg'] images = [] results = [] for image_file_name in IMAGE_FILENAMES: # STEP 3: Load the input image. image = mp.Image.create_from_file(image_file_name) # STEP 4: Recognize gestures in the input image. recognition_result = recognizer.recognize(image) # STEP 5: Process the result. In this case, visualize it. images.append(image) top_gesture = recognition_result.gestures[0][0] hand_landmarks = recognition_result.hand_landmarks results.append((top_gesture, hand_landmarks)) display_batch_of_images_with_gestures_and_hand_landmarks(images, results)

测试图片效果:

Mediapipe5种手势识别_第1张图片  

 完整代码+训练好的手势识别器+测试图片打包文件

下载地址1:

 完整代码+训练好的手势识别器+测试图片资源-细胞盘

下载地址2: 

完整代码+训练好的手势识别器+测试图片资源-CSDN文库

你可能感兴趣的:(python,开发语言)