python+ opencv实现摄像头实时人脸识别并实现汉字标框

opencv的puttxt()函数不能汉字输出,这也是困惑好多人都问题,经过几天的查资料,改代码终于成功实现opencv汉字输出。

第一种方法是 是通过写一段代码,能够转码,封装一下再调用,从而实现汉字输出。

第二种方法是 使用PIL进行转换一下

以下这个是ft2.py  实现转码的代码

# -*- coding: utf-8 -*-
# http://blog.csdn.net/zizi7/article/details/70145150
                                                      
import numpy as np
import freetype
import copy
import pdb

class put_chinese_text(object):
    def __init__(self, ttf):
        self._face = freetype.Face(ttf)

    def draw_text(self, image, pos, text, text_size, text_color):
        '''
        draw chinese(or not) text with ttf
        :param image:     image(numpy.ndarray) to draw text
        :param pos:       where to draw text
        :param text:      the context, for chinese should be unicode type
        :param text_size: text size
        :param text_color:text color
        :return:          image
        '''
        self._face.set_char_size(text_size * 64)
        metrics = self._face.size
        ascender = metrics.ascender/64.0

        #descender = metrics.descender/64.0
        #height = metrics.height/64.0
        #linegap = height - ascender + descender
        ypos = int(ascender)

        if not isinstance(text, str):
            #对于Python 2中的unicode和Python 3中的str,对于Python 2中的str/bytes和Python 3中的bytes的二进制文件
            text = text.decode('utf-8')
        img = self.draw_string(image, pos[0], pos[1]+ypos, text, text_color)
        return img

    def draw_string(self, img, x_pos, y_pos, text, color):
        '''
        draw string
        :param x_pos: text x-postion on img
        :param y_pos: text y-postion on img
        :param text:  text (unicode)
        :param color: text color
        :return:      image
        '''
        prev_char = 0
        pen = freetype.Vector()
        pen.x = x_pos << 6   # div 64
        pen.y = y_pos << 6

        hscale = 1.0
        matrix = freetype.Matrix(int((hscale)*0x10000), int(0.2*0x10000),\
                                 int(0.0*0x10000), int(1.1*0x10000))
        cur_pen = freetype.Vector()
        pen_translate = freetype.Vector()

        image = copy.deepcopy(img)
        for cur_char in text:
            self._face.set_transform(matrix, pen_translate)

            self._face.load_char(cur_char)
            kerning = self._face.get_kerning(prev_char, cur_char)
            pen.x += kerning.x
            slot = self._face.glyph
            bitmap = slot.bitmap

            cur_pen.x = pen.x
            cur_pen.y = pen.y - slot.bitmap_top * 64
            self.draw_ft_bitmap(image, bitmap, cur_pen, color)

            pen.x += slot.advance.x
            prev_char = cur_char

        return image

    def draw_ft_bitmap(self, img, bitmap, pen, color):
        '''
        draw each char
        :param bitmap: bitmap
        :param pen:    pen
        :param color:  pen color e.g.(0,0,255) - red
        :return:       image
        '''
        x_pos = pen.x >> 6
        y_pos = pen.y >> 6
        cols = bitmap.width
        rows = bitmap.rows

        glyph_pixels = bitmap.buffer

        for row in range(rows):
            for col in range(cols):
                if glyph_pixels[row*cols + col] != 0:
                    img[y_pos + row][x_pos + col][0] = color[0]
                    img[y_pos + row][x_pos + col][1] = color[1]
                    img[y_pos + row][x_pos + col][2] = color[2]


if __name__ == '__main__':
    # just for test
    import cv2

    line = '你好'
    img = np.zeros([300,300,3])

    color_ = (0,255,0) # Green
    pos = (3, 3)
    text_size = 24

    #ft = put_chinese_text('wqy-zenhei.ttc')
    ft = put_chinese_text('msyh.ttf')
    image = ft.draw_text(img, pos, line, text_size, color_)

    cv2.imshow('diplay', image)
    cv2.waitKey(0)

里面包含了几个函数,其中需要一个字体 msyh.ttf在同一文件夹下

(有时候这种方法实现不了,也可以使用PIL库转换的方法,不过那种方法的缺点在于需要加载库,不过简单易懂)

以下是实现人脸识别的代码

# -*- coding: utf-8 -*-
# 摄像头头像识别
import face_recognition
import cv2
import ft2

source = "rtsp://admin:[email protected]/Streaming/Channels/1"
cam = cv2.VideoCapture(source)

# 本地图像
zwh_image = face_recognition.load_image_file("zwh.jpg")
zwh_face_encoding = face_recognition.face_encodings(zwh_image)[0]

# 本地图像二
chenduling_image = face_recognition.load_image_file("chenduling.jpg")
chenduling_face_encoding = face_recognition.face_encodings(chenduling_image)[0]

# 本地图片三
liujunbo_image = face_recognition.load_image_file("liujunbo.jpg")
liujunbo_face_encoding = face_recognition.face_encodings(liujunbo_image)[0]

# Create arrays of known face encodings and their names
# 脸部特征数据的集合
known_face_encodings = [
    zwh_face_encoding,
    chenduling_face_encoding,
    liujunbo_face_encoding
]

# 人物名称的集合
known_face_names = [
    "张文豪",
    "陈都灵",
    "刘军波"
]

face_locations = []
face_encodings = []
face_names = []
process_this_frame = True

while(cam.isOpened()):
    # 读取摄像头画面
    ret, frame = cam.read()
    if not ret:
        #等同于 if ret is not none
        break

    # 改变摄像头图像的大小,图像小,所做的计算就少
    small_frame = cv2.resize(frame, (0, 0), fx=0.33, fy=0.33)

    # opencv的图像是BGR格式的,而我们需要是的RGB格式的,因此需要进行一个转换。
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # 根据encoding来判断是不是同一个人,是就输出true,不是为flase
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # 默认为unknown
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.48)
            #阈值太低容易造成无法成功识别人脸,太高容易造成人脸识别混淆 默认阈值tolerance为0.6
            #print(matches)
            name = "Unknown"

            # if match[0]:
            #     name = "michong"
            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame

    # 将捕捉到的人脸显示出来
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        #由于我们检测到的帧被缩放到1/4大小,所以要缩小面位置
        top *= 3
        right *= 3
        bottom *= 3
        left *= 3

        # 矩形框
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 3)
        ft = ft2.put_chinese_text('msyh.ttf')
        #引入ft2中的字体
        #加上标签
        #cv2.rectangle(frame, (left, bottom - 20), (right, bottom), (0, 0, 255), cv2.FILLED)
       # cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.8, (255, 255, 255), 1)这是不输入汉字时可以用的代码

        frame = ft.draw_text(frame, (left+10 , bottom ), name, 20, (255, 255, 255))

         #cv2.imshow("frame",image)会出来两个框一个monitor 一个frame后者显示image但只有frame为true时才会显示
        #def draw_text(self, image, pos, text, text_size, text_color)

    cv2.imshow('monitor', frame)
    if cv2.waitKey(1) & 0xFF == 27:
        break

cam.release()
cv2.destroyAllWindows()

使用PIL转换  由于cv2和PIL中颜色的hex码储存顺序不同,需要进行转换   下面这段代码可以替换上面的汉字标框的方法。

 cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同
pilimg = Image.fromarray(cv2img)
 draw = ImageDraw.Draw(pilimg) # 图片上打印
font = ImageFont.truetype("msyh.ttf", 27, encoding="utf-8") # 参数1:字体文件路径,参数2:字体大小
draw.text((left+10 , bottom ), name, (220, 20, 60), font=font) # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体

            # PIL图片转cv2 图片
frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR)

 

最基本的要装上python和opencv,距离远不能看出人脸的解决方法 :可以通过调整获取每帧图像的大小可以进行改造,代价就是会变卡,延迟高,这就需要通过调整摄像头的参数进行修改分辨率视频帧率 码率上限 视频编码来进行降低分辨率。

人脸混淆较近看不出人脸的解决方法:修改阈值 阈值太低容易造成无法成功识别人脸,太高容易造成人脸识别混淆 默认阈值tolerance为0.6 亚洲人一般要用低点

 

https://github.com/niehen6174/face-recognition-and-put-in-chinese/tree/master/code

上面链接有完整代码  字体和一个照片文件  

其他人脸识别模块介绍   https://blog.csdn.net/Nirvana_6174/article/details/89599441

如有问题,或有什么建议可加群:894243022或发邮箱[email protected] 

使用本文章或代码还请声明。

你可能感兴趣的:(学习,人工智能,人脸识别)