def ladmask(img):
faces_loaction=face_recognition.face_locations(img,number_of_times_to_upsample = 0,model ='cnn')
face_feature=face_recognition.face_landmarks(img,face_locations=faces_loaction)
face_feature1=pd.DataFrame(face_feature)
def listexpend(X):
a=[]
for i in list(X):
a.extend(i)
return a
h1=[]
for j in range (0,9):
h=listexpend(face_feature1.iloc[:,j])
h1+=h
points=[]
def all_tuplelist(X):
a=[]
for i in list(X):
a.append(list(i))
return a
points=np.array(all_tuplelist(h1))
return points
def transformation_from_points(points1, points2):
'''0 - 先确定是float数据类型 '''
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
'''1 - 消除平移的影响 '''
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
'''2 - 消除缩放的影响 '''
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
'''3 - 计算矩阵M=BA^T;对矩阵M进行SVD分解;计算得到R '''
A = points1.T
B = points2.T
M = np.dot(B, A.T)
U, S, Vt = numpy.linalg.svd(M)
R = np.dot(U, Vt)
'''4 - 构建仿射变换矩阵 '''
s = s2/s1
sR = s*R
c1 = c1.reshape(2,1)
c2 = c2.reshape(2,1)
T = c2 - np.dot(sR,c1)
trans_mat = numpy.hstack([sR,T])
return trans_mat
def get_affine_image(image1, image2, face_landmarks1, face_landmarks2,M):
"""
获取图片1仿射变换后的图片
:param image1: 图片1, 要进行仿射变换的图片
:param image2: 图片2, 只要用来获取图片大小,生成与之大小相同的仿射变换图片
:param face_landmarks1: 图片1的人脸特征点
:param face_landmarks2: 图片2的人脸特征点
:return: 仿射变换后的图片
"""
three_points_index = [18, 8, 25]
M = M
dsize = (image2.shape[1], image2.shape[0])
affine_image = cv2.warpAffine(image1, M, dsize)
return affine_image.astype(np.uint8)
def get_face_mask(img,face_landmarks):
"""
获取人脸掩模
:param image_size: 图片大小
:param face_landmarks: 68个特征点
:return: image_mask, 掩模图片
"""
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
image_size = (img.shape[0], img.shape[1])
mask = np.zeros_like(img_gray)
mask = np.zeros(image_size, dtype=np.uint8)
points = np.concatenate([face_landmarks[0:16], face_landmarks[26:17:-1]])
cv2.fillPoly(img=mask ,pts=[points],color=(255,255,255))
return mask
def get_mask_union(mask1, mask2):
"""
获取两个掩模掩盖部分的并集
:param mask1: mask_image, 掩模1
:param mask2: mask_image, 掩模2
:return: 两个掩模掩盖部分的并集
"""
mask = np.min([mask1, mask2], axis=0)
mask = ((cv2.blur(mask, (3, 3)) == 255) * 255).astype(np.uint8)
mask = cv2.blur(mask, (5, 5))
return mask
def get_mask_center_point(image_mask):
"""
获取掩模的中心点坐标
:param image_mask: 掩模图片
:return: 掩模中心
"""
image_mask_index = np.argwhere(image_mask > 0)
miny, minx = np.min(image_mask_index, axis=0)
maxy, maxx = np.max(image_mask_index, axis=0)
center_point = ((maxx + minx) // 2, (maxy + miny) // 2)
return center_point
import numpy
import cv2
import numpy as np
import pandas as pd
import os
from matplotlib import pyplot as plt
import face_recognition
img1=cv2.imread("lif.jpg")
img2=cv2.imread("222.jpg")
points1=ladmask(img1)
points2=ladmask(img2)
img1_mask=get_face_mask(img1,points1)
img2_mask=get_face_mask(img2,points2)
landmarks1=ladmask(img1)
landmarks2=ladmask(img2)
M = transformation_from_points(landmarks1,landmarks2)
affine_im1 = get_affine_image(img1, img2, landmarks1, landmarks2,M)
affine_im1_mask = get_affine_image(img1_mask, img2, landmarks1, landmarks2,M)
union_mask = get_mask_union(img2_mask, affine_im1_mask)
point = get_mask_center_point(union_mask)
seamless_im = cv2.seamlessClone(np.uint8(affine_im1), img2, mask=union_mask, p=point, flags=cv2.NORMAL_CLONE)
plt.imshow(cv2.cvtColor(seamless_im, cv2.COLOR_BGR2RGB))
plt.xticks([]),plt.yticks([])
plt.show()