python利用神经网络+opencv实现将抠图和粘贴

python利用神经网络+opencv实现将抠图和粘贴

菜鸡本鸡最近在看银魂,然后抱着对于银他妈的热爱突发奇想,能否实现对两张图片一张人物图片,一张银桑的图片的头像贴图呢?就是自动识别两张图片上的头部,然后将银桑的头部贴到另一张图片上呢?大致效果就是这样:python利用神经网络+opencv实现将抠图和粘贴_第1张图片
历时两天,终于搞定了初步版本,我的流程步骤是大概这样的:

  • 1,利用opencv检测出人物图片的头部位置
  • 2,对动漫图像进行人物抠图,去除背景
  • 3,提取动漫头像
  • 4,将动漫头部粘贴到人物图像位置
    仅供参考,各位大佬有好的想法可以互相交流:

1,实现头部识别

当然,要实现这样的目标,我的第一反应就是如何识别两张图片的头部呢?而且一张是动漫人物,一张是真正人物的图片。初步想法是历用神经网络来做,但是经过万能的互联网发现我可以直接通过opencv自带的人脸检测识别,而且更加开心的是在github上有大佬发布了关于检测动漫人物头像位置的opencv配置文件。所以我利用opencv识别人物头部

def locationFace(img, people):
    if not people:
        classfier = cv2.CascadeClassifier(
            "opencv动漫人脸检测配置文件")
        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faceRects = classfier.detectMultiScale(grey,
                                                scaleFactor = 1.1,
                                                minNeighbors = 0,
                                                minSize = (24, 24))
        if len(faceRects) > 0:  # 大于0则检测到人脸
            maxArea = 0
            mx = 0
            my = 0
            mw = 0
            mh = 0
            for faceRect in faceRects:  # 框出每一张人脸
                x, y, w, h = faceRect
                area = w*h
                if maxArea<area:
                    mx, my, mw, mh= faceRect
                    maxArea = area
            imgH = img[my-20:my+mh-20, mx:mx + mw]
            return imgH
        else:
            print("没有发现动漫人物")
    else:
        classfier = cv2.CascadeClassifier(
             "opencv人脸检测配置文件")
        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faceRects = classfier.detectMultiScale(grey,
                                               scaleFactor=1.2,
                                               minNeighbors=3,
                                               minSize=(32, 32))
        if len(faceRects)>0:
            for faceRect in faceRects:
                x, y, w, h = faceRect
                lx = x-30 if x-30 > 0 else 0
                ly = y-30 if y-30 > 0 else 0
                imgH = img[ly:y+h, lx:x+w]
        else:
            print("没有发现现实人物")
        return imgH, lx, ly

实现人物抠图

人物抠图是一个比较困难的工作,菜鸡本鸡开始是想着自己在GitHub上找大家研究好的项目clone一下的,但是自己是在是菜鸡。根本看不明白那些老外说的是啥。。。。(英语渣渣)正在一筹莫展之际我发现了百度的paddlehub上有发布好的,可以直接使用的预训练模型deeplabv3p_xception65_humanseg。真是天助我也。于是就可以调用该网络模型实现动漫人物的抠图。

    input_dict = {"image": image_path_list}
    module = hub.Module(directory="C:/Users/.paddlehub/modules/deeplabv3p_xception65_humanseg")
    results = module.segmentation(data=input_dict)
    prediction = results[0]["data"]
    if prediction is not None:
        newimg = np.zeros(imgA.shape)
        newimg[:,:,0] = imgA[:,:,0] * (prediction>0)
        newimg[:,:,1] = imgA[:,:,1] * (prediction>0)
        newimg[:,:,2] = imgA[:,:,2] * (prediction>0)
        newimg = newimg.astype(np.uint8)

实现头部贴图:

我这里可能做得有一点复杂了,我把两张图片的头像部分都截取出来了,然后通过两张的图片的与操作实现贴图:

def synthesisPic(imgA, imgP):
        imgAB = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(imgAB,10,255,cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)
        img1_bg = cv2.bitwise_and(imgP, imgP, mask=mask_inv)
        dst = cv2.add(img1_bg,imgA)
        return dst

最后将头部贴图填回原图片

这步步骤就是标题说的那样:

def pastePec(headSyn, picRawPath, lx, ly):
    cv2.imwrite("headSyn.png", headSyn)
    icon = Image.open("headSyn.png")
    img = Image.open(picRawPath)
    img = img.convert("RGBA")
    img.paste(icon, (lx,ly))
    return img

全部代码

from PIL import Image
import numpy as np
import cv2
def locationFace(img, people):
    if not people:
        classfier = cv2.CascadeClassifier(
            "D:/machinelearn/faceLocation/lbpcascade_animeface.xml")
        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faceRects = classfier.detectMultiScale(grey,
                                                scaleFactor = 1.1,
                                                minNeighbors = 0,
                                                minSize = (24, 24))
        if len(faceRects) > 0:  # 大于0则检测到人脸
            maxArea = 0
            mx = 0
            my = 0
            mw = 0
            mh = 0
            for faceRect in faceRects:  # 框出每一张人脸
                x, y, w, h = faceRect
                area = w*h
                if maxArea<area:
                    mx, my, mw, mh= faceRect
                    maxArea = area
            imgH = img[my-20:my+mh-20, mx:mx + mw]
            return imgH
        else:
            print("没有发现动漫人物")
    else:
        classfier = cv2.CascadeClassifier(
            "D:/machinelearn/faceLocation/haarcascade_frontalface_alt2.xml")
        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faceRects = classfier.detectMultiScale(grey,
                                               scaleFactor=1.2,
                                               minNeighbors=3,
                                               minSize=(32, 32))
        if len(faceRects)>0:
            for faceRect in faceRects:
                x, y, w, h = faceRect
                lx = x-30 if x-30 > 0 else 0
                ly = y-30 if y-30 > 0 else 0
                imgH = img[ly:y+h, lx:x+w]
        else:
            print("没有发现现实人物")
        return imgH, lx, ly
#imgA:动漫头部图片   imgP:人物头部图片  返回值:两者融合后的头部图片
#注意这里动漫图片和人物图片的大小相同
def synthesisPic(imgA, imgP):
        imgAB = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(imgAB,10,255,cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)
        img1_bg = cv2.bitwise_and(imgP, imgP, mask=mask_inv)
        dst = cv2.add(img1_bg,imgA)
        return dst
def pastePec(headSyn, picRawPath, lx, ly):
    cv2.imwrite("headSyn.png", headSyn)
    icon = Image.open("headSyn.png")
    img = Image.open(picRawPath)
    img = img.convert("RGBA")
    img.paste(icon, (lx,ly))
    return img


import paddlehub as hub
#方案1:先进行轮廓识别,然后再检测头像
def main(pathA, pathP):
    imgA = cv2.imread(pathA)
    imgP = cv2.imread(pathP)
    image_path_list = [pathA]
    input_dict = {"image": image_path_list}
    module = hub.Module(directory="C:/Users/.paddlehub/modules/deeplabv3p_xception65_humanseg")
    results = module.segmentation(data=input_dict)
    prediction = results[0]["data"]
    if prediction is not None:
        newimg = np.zeros(imgA.shape)
        newimg[:,:,0] = imgA[:,:,0] * (prediction>0)
        newimg[:,:,1] = imgA[:,:,1] * (prediction>0)
        newimg[:,:,2] = imgA[:,:,2] * (prediction>0)
        newimg = newimg.astype(np.uint8)
        cv2.imwrite("tem2.jpg", newimg)
        headA = locationFace(newimg, False)
        headP, lx, ly = locationFace(imgP, True)
        if headP is not None and headA is not None:
            headAR = cv2.resize(headA,(headP.shape[1], headP.shape[0]))
            headSyn = synthesisPic(headAR, headP)
            finalPic = pastePec(headSyn, pathP, lx, ly)
            finalPic.save("finalPic.png")

#方案2:先检测头部后再进行轮廓识别
def main2(pathA, pathP):
    imgA = cv2.imread(pathA)
    imgP = cv2.imread(pathP)
    headA = locationFace(imgA, False)
    headP, lx, ly = locationFace(imgP, True)
    if headP is not None and headA is not None:
        headAR = cv2.resize(headA, (headP.shape[1], headP.shape[0]))
        cv2.imwrite("tem.jpg", headAR, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
        image_path_list = ["tem.jpg"]
        input_dict = {"image": image_path_list}
        module = hub.Module(directory="C:/Users/.paddlehub/modules/deeplabv3p_xception65_humanseg")
        results = module.segmentation(data=input_dict)
        prediction = results[0]["data"]
        if prediction is not None:
            newimg = np.zeros(headAR.shape)
            newimg[:, :, 0] = headAR[:, :, 0] * (prediction > 0)
            newimg[:, :, 1] = headAR[:, :, 1] * (prediction > 0)
            newimg[:, :, 2] = headAR[:, :, 2] * (prediction > 0)
            newimg = newimg.astype(np.uint8)
            cv2.imwrite("tem1.jpg", newimg)
            headSyn = synthesisPic(newimg, headP)
            finalPic = pastePec(headSyn, pathP, lx, ly)
            finalPic.save("finalPic.png")

if __name__ == "__main__":
    main("./silverFace/1.jpg", "./silverFace/2.jpg")

写在最后

完成之后发现逻辑很简单,复杂的部分都已经有人帮我做好了,我只需要调用接口就行了。但是却做了两天,,实在汗颜啊。在过程中耗时最多的时候是在这两个地方

  • 1,调用模型的时候路径上存在中文导致无法调用模型。我研究了半天如何添加环境变量,如何更改路径。后来发现,添加HUB_HONE这个环境变量只会在命令行安装和调用的时候会存放在HUB_HOME,在代码中仍然使用原来的路径。最后发现可以使用directory直接指定模型的位置。。。
  • 2,实现头部融合的时候调用cv2.bitwise_and的时候总是报错。最后发现是我的mask不是灰度图(灰度图只有两个通道),再一看是我的图片开始的时候没有变成灰度图导致mask也没有变成灰度图。(当时头都快爆炸了)

总结来说我出现这些错误都是因为对于代码函数的参数要求不理解,不熟悉导致的。工欲善其事必先利其器,在使用一个自己不了解的接口前,预先调查它的参数和返回值是很重要的。然后就是paddlehub提供的神经网络对于人物的轮廓检测是很精准的,但是对于动漫的人物检测还是不尽人意,很多都检测不出来。这就让我的满意度大大下降,,一点成就感都没有。我下一步将会自己训练一个针对银他妈人物轮廓提取的网络用于替换该网络。敬请期待,,奥里给!!!!

你可能感兴趣的:(神经网络,opencv,图像识别,计算机视觉,深度学习)