[毕业设计]opencv的实时手势识别

前言


    大四是整个大学期间最忙碌的时光,一边要忙着备考或实习为毕业后面临的就业升学做准备,一边要为毕业设计耗费大量精力。近几年各个学校要求的毕设项目越来越难,有不少课题是研究生级别难度的,对本科同学来说是充满挑战。为帮助大家顺利通过和节省时间与精力投入到更重要的就业和考试中去,学长分享优质的选题经验和毕设项目与技术思路。

对毕设有任何疑问都可以问学长哦!

本次分享的课题是

opencv的手势识别

实现技术思路

数据集

手势数据收集,通过OpenCV程序打开了一个摄像头,在摄像头前面尝试了三种手势变换,分别是,让OpenCV在读取视频流的过程中,对每一帧数据进行了保存,最终挑选得到1000张手势数据。

import cv2 as cv

# image = cv.imread("D:/vcprojects/images/three.png")
capture = cv.VideoCapture("D:/vcprojects/images/visit.mp4")
detector = cv.CascadeClassifier(cv.data.haarcascades + "haarcascade_frontalface_alt.xml")
while True:
    ret, image = capture.read()
    if ret is True:
        cv.imshow("frame", image)
        faces = detector.detectMultiScale(image, scaleFactor=1.05, minNeighbors=1,
                                          minSize=(30, 30), maxSize=(120, 120))
        for x, y, width, height in faces:
            cv.rectangle(image, (x, y), (x+width, y+height), (0, 0, 255), 2, cv.LINE_8, 0)
        cv.imshow("faces", image)
        c = cv.waitKey(50)
        if c == 27:
            break
    else:
        break

cv.destroyAllWindows()

数据标注

每标注一张图像保存时候它就会生成一个对应的xml文件,这些XML文件格式符合PASCAL VOC2012格式,也是ImageNet中数据集的标准格式。

[毕业设计]opencv的实时手势识别_第1张图片

 数据集制作与训练集生成

def generate_classes_text():
    print("start to generate classes text...")
    ann_dir = "D:/hand_data/VOC2012/Annotations/"

    handone_train = open("D:/hand_data/VOC2012/ImageSets/Main/handone_train.txt", 'w')
    handone_val = open("D:/hand_data/VOC2012/ImageSets/Main/handone_val.txt", 'w')

    handfive_train = open("D:/hand_data/VOC2012/ImageSets/Main/handfive_train.txt", 'w')
    handfive_val = open("D:/hand_data/VOC2012/ImageSets/Main/handfive_val.txt", 'w')

    handtwo_train = open("D:/hand_data/VOC2012/ImageSets/Main/handtwo_train.txt", 'w')
    handtwo_val = open("D:/hand_data/VOC2012/ImageSets/Main/handtwo_val.txt", 'w')

    files = os.listdir(ann_dir)
    for xml_file in files:
        if os.path.isfile(os.path.join(ann_dir, xml_file)):
            xml_path = os.path.join(ann_dir, xml_file)
            tree = ET.parse(xml_path)
            root = tree.getroot()
            for elem in root.iter('filename'):
                filename = elem.text
            for elem in root.iter('name'):
                name = elem.text

            if name == "handone":
                handone_train.write(filename.replace(".jpg", " ") + str(1) + "\n")
                handone_val.write(filename.replace(".jpg", " ") + str(1) + "\n")

                handfive_train.write(filename.replace(".jpg", " ") + str(-1) + "\n")
                handfive_val.write(filename.replace(".jpg", " ") + str(-1) + "\n")

                handtwo_train.write(filename.replace(".jpg", " ") + str(-1) + "\n")
                handtwo_val.write(filename.replace(".jpg", " ") + str(-1) + "\n")
            if name == "handtwo":
                handone_train.write(filename.replace(".jpg", " ") + str(-1) + "\n")
                handone_val.write(filename.replace(".jpg", " ") + str(-1) + "\n")

                handfive_train.write(filename.replace(".jpg", " ") + str(-1) + "\n")
                handfive_val.write(filename.replace(".jpg", " ") + str(-1) + "\n")

                handtwo_train.write(filename.replace(".jpg", " ") + str(1) + "\n")
                handtwo_val.write(filename.replace(".jpg", " ") + str(1) + "\n")

            if name == "handfive":
                handone_train.write(filename.replace(".jpg", " ") + str(-1) + "\n")
                handone_val.write(filename.replace(".jpg", " ") + str(-1) + "\n")

                handfive_train.write(filename.replace(".jpg", " ") + str(1) + "\n")
                handfive_val.write(filename.replace(".jpg", " ") + str(1) + "\n")

                handtwo_train.write(filename.replace(".jpg", " ") + str(-1) + "\n")
                handtwo_val.write(filename.replace(".jpg", " ") + str(-1) + "\n")

    handone_train.close()
    handone_val.close()
    handfive_train.close()
    handfive_val.close()
    handtwo_train.close()
    handtwo_val.close()

模型导出与使用

训练好之后可以通过tensorflow object detection API自带的工具直接导出模型

实现一个读摄像头视频流,实时手势检测

PATH_TO_CKPT = 'D:/tensorflow/handset/export/frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('D:/tensorflow/handset/data', 'hand_label_map.pbtxt')

NUM_CLASSES = 3
detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)


def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
      (im_height, im_width, 3)).astype(np.uint8)


out = cv2.VideoWriter("D:/test.mp4", cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'), 15,
                     (np.int(640), np.int(480)), True)
with detection_graph.as_default():
  with tf.Session(graph=detection_graph) as sess:
    while True:
      ret, image_np = cap.read()
      print(image_np.shape)
      # image_np == [1, None, None, 3]
      image_np_expanded = np.expand_dims(image_np, axis=0)
      image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
      boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
      scores = detection_graph.get_tensor_by_name('detection_scores:0')
      classes = detection_graph.get_tensor_by_name('detection_classes:0')
      num_detections = detection_graph.get_tensor_by_name('num_detections:0')
      # Actual detection.
      (boxes, scores, classes, num_detections) = sess.run(
          [boxes, scores, classes, num_detections],
          feed_dict={image_tensor: image_np_expanded})
      # Visualization of the results of a detection.
      vis_util.visualize_boxes_and_labels_on_image_array(
          image_np,
          np.squeeze(boxes),
          np.squeeze(classes).astype(np.int32),
          np.squeeze(scores),
          category_index,
          use_normalized_coordinates=True,
          line_thickness=8)
      out.write(image_np)
      cv2.imshow('object detection', image_np)
      c = cv2.waitKey(10)
      if c == 27: # ESC
        cv2.imwrite("D:/tensorflow/run_result.png", image_np)
        cv2.destroyAllWindows()
        break

out.release()
cap.release()
cv2.destroyAllWindows()

实现效果图样例

[毕业设计]opencv的实时手势识别_第2张图片

我是海浪学长,创作不易,欢迎点赞、关注、收藏、留言。

毕设帮助,疑难解答,欢迎打扰!

你可能感兴趣的:(opencv毕业设计,opencv,计算机视觉,人工智能,算法,python)