一.背景说明
一般短视频项目中会使用类似Face++
这样的商业sdk实现瘦脸大眼特效,想到苹果的原生框架Vision
也可以进行人脸识别,提取人脸特征点,应该也能实现。没想到挺顺利,参考了网上的相关算法,个把小时就实现了效果。
Vision
与Face++
对比:
1.Vision
原生框架,体积小,免费;Face++
需要付费,包大概50M左右。
2.Vision
要求在ios11以上,Face++
貌似没有。
3.Vision
检测人脸关键点数量在iphone 5S,iphone7上为74个,iphone XS上为87个。Face++
检测人脸关键点数量为106个。
4.Vision
特征点貌似有点飘(稳定性一般),边缘检测不是很准。Face++
特征点相对贴合的要准一点。
Vision官方文档
Face++官方文档
二.流程说明
1.使用GPUImageVideoCamera
采集摄像头数据。
2.将采集到的数据CVPixelBufferRef
送入Vision
处理,拿到人脸特征点。
3.自定义的瘦脸大眼滤镜,添加到GPUImage
的滤镜链上。
4.在自定义滤镜中重写- (void)renderToTextureWithVertices:(const GLfloat *)vertices textureCoordinates:(const GLfloat *)textureCoordinates
方法,将特征点送入片元着色器中处理。
5.着色器中使用瘦脸大眼相关算法:圆内放大算法,圆内缩小算法,定点拉伸算法。算法原理解析
6.最后通过GPUImageView
展示。
三.关键代码
1.将采集到的图像原始数据CVPixelBufferRef
送入Vision
处理
#pragma mark - GPUImageVideoCameraDelegate
- (void)willOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
{
CMSampleBufferRef imageCopy;
CMSampleBufferCreateCopy(CFAllocatorGetDefault(), sampleBuffer, &imageCopy);
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer((__bridge CMSampleBufferRef)(CFBridgingRelease(imageCopy)));
[[FaceBeautyFaceDetector shareInstance] getLandmarksFromPixelBuffer:pixelBuffer orientation:kCGImagePropertyOrientationLeftMirrored complete:^(NSArray * _Nonnull landmarks) {
//绘制辅助调试的UI
}];
}
2.Vision
提取人脸特征点,需要注意的是特征点的坐标转换。
- (void)getLandmarksFromPixelBuffer:(CVPixelBufferRef)pixelBuffer orientation:(CGImagePropertyOrientation)orientation complete:(void(^)(NSArray *landmarks))complete
{
if (@available(iOS 11.0,*)) {
VNImageRequestHandler *handler = [[VNImageRequestHandler alloc] initWithCVPixelBuffer:pixelBuffer orientation:orientation options:@{}];
VNDetectFaceLandmarksRequest *landmarksRequest = [[VNDetectFaceLandmarksRequest alloc] initWithCompletionHandler:^(VNRequest * _Nonnull request, NSError * _Nullable error) {
[self handleWithObservations:request.results complete:complete];
}];
[handler performRequests:@[landmarksRequest] error:nil];
}else{
}
}
- (void)handleWithObservations:(NSArray *)observations complete:(void(^)(NSArray *landmarks))complete{
if (@available(iOS 11.0,*)) {
if (!observations.count) {
!complete?:complete(nil);
return;
}
VNFaceObservation *observation = observations.firstObject;
VNFaceLandmarks *landmarks = observation.landmarks;
CGFloat boxX = observation.boundingBox.origin.x;
CGFloat boxY = observation.boundingBox.origin.y;
CGFloat boxW = observation.boundingBox.size.width;
CGFloat boxH = observation.boundingBox.size.height;
NSMutableArray *array = [NSMutableArray array];
[self getAllkeyWithClass:VNFaceLandmarks2D.class isProperty:YES block:^(NSString *key) {
if ([key isEqualToString:@"allPoints"] ||
[key isEqualToString:@"constellation"] ||
[key isEqualToString:@"occlusionFlagsPerPoint"] ||
[key isEqualToString:@"precisionEstimatesPerPoint"]) {
return;
}
VNFaceLandmarkRegion2D *region2D = [landmarks valueForKey:key];
for (int i= 0; i
3.在FaceBeautyThinFaceFilter
滤镜中处理特征点。
- (void)setUniformsWithLandmarks:(NSArray *)landmarks{
if (!landmarks.count) {
[self setInteger:0 forUniform:hasFaceUniform program:filterProgram];
return;
}
[self setInteger:1 forUniform:hasFaceUniform program:filterProgram];
CGFloat aspect = inputTextureSize.width/inputTextureSize.height;
[self setFloat:aspect forUniform:aspectRatioUniform program:filterProgram];
[self setFloat:self.thinFaceDelta forUniform:thinFaceDeltaUniform program:filterProgram];
[self setFloat:self.bigEyeDelta forUniform:bigEyeDeltaUniform program:filterProgram];
#warning 不同机型获取的特征点数量不一样,如6s上为74个,XS上为87个
GLsizei size = 74 * 2;
GLfloat *facePoints = malloc(size*sizeof(GLfloat));
int index = 0;
for (NSValue *value in landmarks) {
CGPoint point = [value CGPointValue];
*(facePoints + index) = point.x;
*(facePoints + index + 1) = point.y;
index += 2;
if (index == size) {
break;
}
}
[self setFloatArray:facePoints length:size forUniform:facePointsUniform program:filterProgram];
free(facePoints);
}
4.在片元着色器处理特征点数据。
NSString *const kGPUImageThinFaceFragmentShaderString = SHADER_STRING
(
precision highp float;
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
uniform int hasFace;
uniform float facePoints[74 * 2];
uniform highp float aspectRatio;
uniform float thinFaceDelta;
uniform float bigEyeDelta;
//圓內放大
vec2 enlargeEye(vec2 textureCoord, vec2 originPosition, float radius, float delta) {
float weight = distance(vec2(textureCoord.x, textureCoord.y / aspectRatio), vec2(originPosition.x, originPosition.y / aspectRatio)) / radius;
weight = 1.0 - (1.0 - weight * weight) * delta;
weight = clamp(weight,0.0,1.0);
textureCoord = originPosition + (textureCoord - originPosition) * weight;
return textureCoord;
}
// 曲线形变处理
vec2 curveWarp(vec2 textureCoord, vec2 originPosition, vec2 targetPosition, float delta) {
vec2 offset = vec2(0.0);
vec2 result = vec2(0.0);
vec2 direction = (targetPosition - originPosition) * delta;
float radius = distance(vec2(targetPosition.x, targetPosition.y / aspectRatio), vec2(originPosition.x, originPosition.y / aspectRatio));
float ratio = distance(vec2(textureCoord.x, textureCoord.y / aspectRatio), vec2(originPosition.x, originPosition.y / aspectRatio)) / radius;
ratio = 1.0 - ratio;
ratio = clamp(ratio, 0.0, 1.0);
offset = direction * ratio;
result = textureCoord - offset;
return result;
}
vec2 thinFace(vec2 currentCoordinate){
vec2 faceIndexs[8];
// faceIndexs[0] = vec2(0., 45.);
// faceIndexs[1] = vec2(10.,45.);
faceIndexs[0] = vec2(1., 46.);
faceIndexs[1] = vec2(9., 46.);
faceIndexs[2] = vec2(2., 50.);
faceIndexs[3] = vec2(8., 50.);
faceIndexs[4] = vec2(3., 50.);
faceIndexs[5] = vec2(7., 50.);
faceIndexs[6] = vec2(4., 50.);
faceIndexs[7] = vec2(6., 50.);
for(int i = 0;i < 8;i++){
int originIndex = int(faceIndexs[i].x);
int targetIndex = int(faceIndexs[i].y);
vec2 originPoint = vec2(facePoints[originIndex * 2],
facePoints[originIndex *2 + 1]);
vec2 targetPoint = vec2(facePoints[targetIndex * 2],
facePoints[targetIndex *2 + 1]);
currentCoordinate = curveWarp(currentCoordinate,originPoint,targetPoint,thinFaceDelta);
}
return currentCoordinate;
}
vec2 bigEye(vec2 currentCoordinate) {
vec2 faceIndexs[2];
faceIndexs[0] = vec2(72., 13.);
faceIndexs[1] = vec2(73., 21.);
for(int i = 0; i < 2; i++)
{
int originIndex = int(faceIndexs[i].x);
int targetIndex = int(faceIndexs[i].y);
vec2 originPoint = vec2(facePoints[originIndex * 2], facePoints[originIndex * 2 + 1]);
vec2 targetPoint = vec2(facePoints[targetIndex * 2], facePoints[targetIndex * 2 + 1]);
float radius = distance(vec2(targetPoint.x, targetPoint.y / aspectRatio), vec2(originPoint.x, originPoint.y / aspectRatio));
radius = radius * 5.;
currentCoordinate = enlargeEye(currentCoordinate, originPoint, radius, bigEyeDelta);
}
return currentCoordinate;
}
void main()
{
vec2 positionToUse = textureCoordinate;
if (hasFace == 1) {
positionToUse = thinFace(positionToUse);
positionToUse = bigEye(positionToUse);
}
gl_FragColor = texture2D(inputImageTexture,positionToUse);
}
);
四.实现效果
第一张为原图,第二张为瘦脸大眼效果。可以看到,大眼效果不太自然,原因是系数设置的较大。
五.圆内放大算法
1.如图所示,取出左眼瞳孔特征点72的坐标和上方特征点13的坐标。
2.以瞳孔72为圆心,以72和13的距离的5倍为半径,确定放大范围。
3.按照圆内放大算法,离圆心越近的像素向圆圈外部偏移量越大,离圆心越远的像素偏移量越小。所以眼睛的纵向被拉伸的程度比较明显。而且又能让放大区域和未放大区域实现平滑过渡。
4.其他圆内缩小,定点拉伸的算法其实也是类似,就不再赘述。
Github:Demo地址
欢迎留言或私信探讨问题及Star,谢谢~