二、使用CoreImage实现人脸识别
人脸识别是目前非常热门的一种图像处理技术,CoreImage内置了对人脸进行识别的相关功能接口,并且可以对人脸面部特征进行抓取,下面我们来实现一个简单的实时识别人脸特征的Demo。
首先创建一个视图作为图像扫描视图,如下:
.h文件
//.h 文件
@interface FaceView : UIView
@end
.m文件
//
// FaceView.m
// CoreImageDemo
//
// Created by jaki on 2018/12/22.
// Copyright © 2018年 jaki. All rights reserved.
//
#import "FaceView.h"
#import
#import "FaceHandle.h"
//定义线程
#define FACE_SCAN_QUEUE "FACE_SCAN_QUEUE"
@interface FaceView()
@property(nonatomic,strong)AVCaptureSession *captureSession;
@property(nonatomic,strong)AVCaptureDeviceInput * captureInput;
@property(nonatomic,strong)AVCaptureVideoDataOutput * captureOutput;
@property(nonnull,strong)AVCaptureVideoPreviewLayer * videoLayer;
@property(nonatomic,strong)dispatch_queue_t queue;
@property(nonatomic,assign)BOOL hasHandle;
@property(nonatomic,strong)UIView * faceView;
@end
@implementation FaceView
#pragma mark - Override
-(instancetype)init{
self = [super init];
if (self) {
[self install];
}
return self;
}
-(instancetype)initWithFrame:(CGRect)frame{
self = [super initWithFrame:frame];
if (self) {
[self install];
}
return self;
}
-(void)layoutSubviews{
[super layoutSubviews];
self.videoLayer.frame = self.bounds;
}
#pragma mark - InnerFunc
-(void)install{
if (![UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera]) {
NSLog(@"不支持");
return;
}
self.queue = dispatch_queue_create(FACE_SCAN_QUEUE, NULL);
[self.captureSession startRunning];
AVAuthorizationStatus status = [AVCaptureDevice authorizationStatusForMediaType:AVMediaTypeVideo];
if (status!=AVAuthorizationStatusAuthorized) {
NSLog(@"需要权限");
return;
}
self.videoLayer = [AVCaptureVideoPreviewLayer layerWithSession:self.captureSession];
self.videoLayer.frame = CGRectZero;
self.videoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.layer addSublayer:self.videoLayer];
[self addSubview:self.faceView];
self.faceView.frame = CGRectMake(0, 0, self.frame.size.width, self.frame.size.height);
}
//将人脸特征点标记出来
-(void)renderReactWithInfo:(NSDictionary *)info{
for (UIView * v in self.faceView.subviews) {
[v removeFromSuperview];
}
NSArray * faceArray = info[FACE_HANDLE_INFO_FACE_ARRAY];
for (int i = 0;i < faceArray.count; i++) {
NSDictionary * face = faceArray[i];
NSValue * faceValue = face[FACE_HANDLE_INFO_FACE_FRAME];
if (faceValue) {
CGRect faceR = [faceValue CGRectValue];
UIView * faceView = [[UIView alloc]initWithFrame:faceR];
faceView.backgroundColor = [UIColor clearColor];
faceView.layer.borderColor = [UIColor redColor].CGColor;
faceView.layer.borderWidth = 2;
[self.faceView addSubview:faceView];
}
NSValue * leftEye = face[FACE_HANDLE_INFO_FACE_LEFT_EYE_FRAME];
if (leftEye) {
CGRect leftEyeR = [leftEye CGRectValue];
UIView * eye = [[UIView alloc]initWithFrame:leftEyeR];
eye.backgroundColor = [UIColor clearColor];
eye.layer.borderColor = [UIColor greenColor].CGColor;
eye.layer.borderWidth = 2;
[self.faceView addSubview:eye];
}
NSValue * rightEye = face[FACE_HANDLE_INFO_FACE_RIGHT_EYE_FRAME];
if (rightEye) {
CGRect rightEyeR = [rightEye CGRectValue];
UIView * eye = [[UIView alloc]initWithFrame:rightEyeR];
eye.backgroundColor = [UIColor clearColor];
eye.layer.borderColor = [UIColor greenColor].CGColor;
eye.layer.borderWidth = 2;
[self.faceView addSubview:eye];
}
NSValue * mouth = face[FACE_HANDLE_INFO_FACE_MOUTH_FRAME];
if (mouth) {
CGRect mouthR = [mouth CGRectValue];
UIView * mouth = [[UIView alloc]initWithFrame:mouthR];
mouth.backgroundColor = [UIColor clearColor];
mouth.layer.borderColor = [UIColor orangeColor].CGColor;
mouth.layer.borderWidth = 2;
[self.faceView addSubview:mouth];
}
}
}
#pragma AVDelegate
//进行画面的捕获
-(void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
if (self.hasHandle) {
return;
}
self.hasHandle = YES;
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress,width, height, 8, bytesPerRow, colorSpace,kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
UIImage *image= [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationRight];
CGImageRelease(newImage);
//image
//进行人脸识别的核心工具类
[[FaceHandle sharedInstance] handleImage:image viewSize:self.frame.size completed:^(BOOL success, NSDictionary *info) {
self.hasHandle = NO;
[self renderReactWithInfo:info];
}];
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
}
#pragma mark - setter and getter
-(AVCaptureSession *)captureSession{
if (!_captureSession) {
_captureSession = [[AVCaptureSession alloc]init];
[_captureSession addInput:self.captureInput];
[_captureSession addOutput:self.captureOutput];
}
return _captureSession;
}
-(AVCaptureDeviceInput *)captureInput{
if (!_captureInput) {
_captureInput = [AVCaptureDeviceInput deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo] error:nil];
}
return _captureInput;
}
-(AVCaptureVideoDataOutput *)captureOutput{
if (!_captureOutput) {
_captureOutput = [[AVCaptureVideoDataOutput alloc]init];
_captureOutput.alwaysDiscardsLateVideoFrames = YES;
[_captureOutput setSampleBufferDelegate:self queue:self.queue];
_captureOutput.videoSettings = @{(__bridge NSString *)kCVPixelBufferPixelFormatTypeKey:@(kCVPixelFormatType_32BGRA)};
}
return _captureOutput;
}
-(UIView *)faceView{
if (!_faceView) {
_faceView = [[UIView alloc]init];
_faceView.backgroundColor = [UIColor clearColor];
}
return _faceView;
}
@end
在真机上运行工程,通过摄像头可以将实时的画面捕获到屏幕上,下面实现核心的人脸识别代码:
创建继承于NSObject的FaceHandle类,如下:
.h文件
extern const NSString * FACE_HANDLE_INFO_FACE_ARRAY;
extern const NSString * FACE_HANDLE_INFO_FACE_FRAME;
extern const NSString * FACE_HANDLE_INFO_FACE_LEFT_EYE_FRAME;
extern const NSString * FACE_HANDLE_INFO_FACE_RIGHT_EYE_FRAME;
extern const NSString * FACE_HANDLE_INFO_FACE_MOUTH_FRAME;
extern const NSString * FACE_HANDLE_INFO_ERROR;
@interface FaceHandle : NSObject
+(instancetype)sharedInstance;
-(void)handleImage:(UIImage *)image viewSize:(CGSize )viewSize completed:(void(^)(BOOL success,NSDictionary * info))completion;
@end
.m文件
#import "FaceHandle.h"
#define FACE_HANDLE_DISPATCH_QUEUE "FACE_HANDLE_DISPATCH_QUEUE"
const NSString * FACE_HANDLE_INFO_FACE_FRAME = @"FACE_HANDLE_INFO_FACE_FRAME";
const NSString * FACE_HANDLE_INFO_FACE_LEFT_EYE_FRAME = @"FACE_HANDLE_INFO_FACE_LEFT_EYE_FRAME";
const NSString * FACE_HANDLE_INFO_FACE_RIGHT_EYE_FRAME = @"FACE_HANDLE_INFO_FACE_RIGHT_EYE_FRAME";
const NSString * FACE_HANDLE_INFO_FACE_MOUTH_FRAME = @"FACE_HANDLE_INFO_FACE_MOUTH_FRAME";
const NSString * FACE_HANDLE_INFO_ERROR = @"FACE_HANDLE_INFO_ERROR";
const NSString * FACE_HANDLE_INFO_FACE_ARRAY = @"FACE_HANDLE_INFO_FACE_ARRAY";
@interface FaceHandle()
@property(nonatomic,strong)dispatch_queue_t workingQueue;
@end
@implementation FaceHandle
+(instancetype)sharedInstance{
static dispatch_once_t onceToken;
static FaceHandle * sharedInstance = nil;
if (!sharedInstance) {
dispatch_once(&onceToken, ^{
sharedInstance = [[FaceHandle alloc] init];
});
}
return sharedInstance;
}
#pragma mark - Override
-(instancetype)init{
self = [super init];
if (self) {
self.workingQueue = dispatch_queue_create(FACE_HANDLE_DISPATCH_QUEUE, NULL);
}
return self;
}
#pragma mark - InnerFunc
-(void)handleImage:(UIImage *)image viewSize:(CGSize )viewSize completed:(void (^)(BOOL , NSDictionary *))completion{
if (!image) {
if (completion) {
completion(NO,@{FACE_HANDLE_INFO_ERROR:@"图片捕获出错"});
}
return;
}
dispatch_async(self.workingQueue, ^{
UIImage * newImage = [self strectImage:image withSize:viewSize];
if (newImage) {
NSArray * faceArray = [self analyseFaceImage:newImage];
if (completion) {
dispatch_async(dispatch_get_main_queue(), ^{
completion(YES,@{FACE_HANDLE_INFO_FACE_ARRAY:faceArray});
});
}
}else{
if (completion) {
dispatch_async(dispatch_get_main_queue(), ^{
completion(NO,@{FACE_HANDLE_INFO_ERROR:@"图片识别出错"});
});
}
}
});
}
//图片放大处理
-(UIImage *)strectImage:(UIImage *)img withSize:(CGSize)size{
UIGraphicsBeginImageContext(size);
CGRect thumbnailRect = CGRectZero;
thumbnailRect.origin = CGPointMake(0, 0);
thumbnailRect.size.width = size.width;
thumbnailRect.size.height = size.height;
[img drawInRect:thumbnailRect];
UIImage * newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
if (newImage) {
return newImage;
}
return nil;
}
-(NSArray *)analyseFaceImage:(UIImage *)image{
NSMutableArray * dataArray = [NSMutableArray array];
CIImage * cImage = [CIImage imageWithCGImage:image.CGImage];
NSDictionary* opts = [NSDictionary dictionaryWithObject:
CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
//进行分析
CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeFace
context:nil options:opts];
//获取特征数组
NSArray* features = [detector featuresInImage:cImage];
CGSize inputImageSize = [cImage extent].size;
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformScale(transform, 1, -1);
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
for (CIFaceFeature *faceFeature in features){
NSMutableDictionary * faceDic = [NSMutableDictionary dictionary];
CGRect faceViewBounds = CGRectApplyAffineTransform(faceFeature.bounds, transform);
[faceDic setValue:[NSValue valueWithCGRect:faceViewBounds] forKey:(NSString *)FACE_HANDLE_INFO_FACE_FRAME];
CGFloat faceWidth = faceFeature.bounds.size.width;
if(faceFeature.hasLeftEyePosition){
CGPoint faceViewLeftPoint = CGPointApplyAffineTransform(faceFeature.leftEyePosition, transform);
CGRect leftEyeBounds = CGRectMake(faceViewLeftPoint.x-faceWidth*0.1, faceViewLeftPoint.y-faceWidth*0.1, faceWidth*0.2, faceWidth*0.2);
[faceDic setValue:[NSValue valueWithCGRect:leftEyeBounds] forKey:(NSString *)FACE_HANDLE_INFO_FACE_LEFT_EYE_FRAME];
}
if(faceFeature.hasRightEyePosition){
//获取人右眼对应的point
CGPoint faceViewRightPoint = CGPointApplyAffineTransform(faceFeature.rightEyePosition, transform);
CGRect rightEyeBounds = CGRectMake(faceViewRightPoint.x-faceWidth*0.1, faceViewRightPoint.y-faceWidth*0.1, faceWidth*0.2, faceWidth*0.2);
[faceDic setValue:[NSValue valueWithCGRect:rightEyeBounds] forKey:(NSString *)FACE_HANDLE_INFO_FACE_RIGHT_EYE_FRAME];
}
if(faceFeature.hasMouthPosition){
//获取人嘴巴对应的point
CGPoint faceViewMouthPoint = CGPointApplyAffineTransform(faceFeature.mouthPosition, transform);
CGRect mouthBounds = CGRectMake(faceViewMouthPoint.x-faceWidth*0.2, faceViewMouthPoint.y-faceWidth*0.2, faceWidth*0.4, faceWidth*0.4);
[faceDic setValue:[NSValue valueWithCGRect:mouthBounds] forKey:(NSString *)FACE_HANDLE_INFO_FACE_MOUTH_FRAME];
}
[dataArray addObject:faceDic];
}
return [dataArray copy];
}
@end
打开百度,随便搜索一些人脸图片进行识别,可以看到识别率还是很高,如下图:
三、CIImage中提供了其他图像识别功能
CIDetector除了可以用来进行人脸识别外,还支持进行二维码、矩形、文字等检测。
矩形区域识别,用来检测图像中的矩形边界,核心代码如下:
-(NSArray *)analyseRectImage:(UIImage *)image{
NSMutableArray * dataArray = [NSMutableArray array];
CIImage * cImage = [CIImage imageWithCGImage:image.CGImage];
NSDictionary* opts = [NSDictionary dictionaryWithObject:
CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeRectangle
context:nil options:opts];
NSArray* features = [detector featuresInImage:cImage];
CGSize inputImageSize = [cImage extent].size;
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformScale(transform, 1, -1);
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
for (CIRectangleFeature *feature in features){
NSLog(@"%lu",features.count);
NSMutableDictionary * dic = [NSMutableDictionary dictionary];
CGRect viewBounds = CGRectApplyAffineTransform(feature.bounds, transform);
[dic setValue:[NSValue valueWithCGRect:viewBounds] forKey:@"rectBounds"];
CGPoint topLeft = CGPointApplyAffineTransform(feature.topLeft, transform);
[dic setValue:[NSValue valueWithCGPoint:topLeft] forKey:@"topLeft"];
CGPoint topRight = CGPointApplyAffineTransform(feature.topRight, transform);
[dic setValue:[NSValue valueWithCGPoint:topRight] forKey:@"topRight"];
CGPoint bottomLeft = CGPointApplyAffineTransform(feature.bottomLeft, transform);
[dic setValue:[NSValue valueWithCGPoint:bottomLeft] forKey:@"bottomLeft"];
CGPoint bottomRight = CGPointApplyAffineTransform(feature.bottomRight, transform);
[dic setValue:[NSValue valueWithCGPoint:bottomRight] forKey:@"bottomRight"];
[dataArray addObject:dic];
}
return [dataArray copy];
}
效果如下图所示:
二维码扫描不仅可以分析出图片中的二维码位置,还可以解析出二维码数据,核心代码如下:
-(NSArray *)analyseQRImage:(UIImage *)image{
NSMutableArray * dataArray = [NSMutableArray array];
CIImage * cImage = [CIImage imageWithCGImage:image.CGImage];
NSDictionary* opts = [NSDictionary dictionaryWithObject:
CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeQRCode
context:nil options:opts];
NSArray* features = [detector featuresInImage:cImage];
CGSize inputImageSize = [cImage extent].size;
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformScale(transform, 1, -1);
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
for (CIQRCodeFeature *feature in features){
NSMutableDictionary * dic = [NSMutableDictionary dictionary];
CGRect viewBounds = CGRectApplyAffineTransform(feature.bounds, transform);
[dic setValue:[NSValue valueWithCGRect:viewBounds] forKey:@"rectBounds"];
CGPoint topLeft = CGPointApplyAffineTransform(feature.topLeft, transform);
[dic setValue:[NSValue valueWithCGPoint:topLeft] forKey:@"topLeft"];
CGPoint topRight = CGPointApplyAffineTransform(feature.topRight, transform);
[dic setValue:[NSValue valueWithCGPoint:topRight] forKey:@"topRight"];
CGPoint bottomLeft = CGPointApplyAffineTransform(feature.bottomLeft, transform);
[dic setValue:[NSValue valueWithCGPoint:bottomLeft] forKey:@"bottomLeft"];
CGPoint bottomRight = CGPointApplyAffineTransform(feature.bottomRight, transform);
[dic setValue:[NSValue valueWithCGPoint:bottomRight] forKey:@"bottomRight"];
[dic setValue:feature.messageString forKey:@"content"];
[dataArray addObject:dic];
}
return [dataArray copy];
}
CIImage框架中还支持对文本区域进行分析,核心代码如下:
-(NSArray *)analyseTextImage:(UIImage *)image{
NSMutableArray * dataArray = [NSMutableArray array];
CIImage * cImage = [CIImage imageWithCGImage:image.CGImage];
NSDictionary* opts = [NSDictionary dictionaryWithObject:
CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeText
context:nil options:nil];
NSArray* features = [detector featuresInImage:cImage options:@{CIDetectorReturnSubFeatures:@YES}];
CGSize inputImageSize = [cImage extent].size;
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformScale(transform, 1, -1);
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
for (CITextFeature *feature in features){
NSLog(@"%@",feature.subFeatures);
NSMutableDictionary * dic = [NSMutableDictionary dictionary];
CGRect viewBounds = CGRectApplyAffineTransform(feature.bounds, transform);
[dic setValue:[NSValue valueWithCGRect:viewBounds] forKey:@"rectBounds"];
CGPoint topLeft = CGPointApplyAffineTransform(feature.topLeft, transform);
[dic setValue:[NSValue valueWithCGPoint:topLeft] forKey:@"topLeft"];
CGPoint topRight = CGPointApplyAffineTransform(feature.topRight, transform);
[dic setValue:[NSValue valueWithCGPoint:topRight] forKey:@"topRight"];
CGPoint bottomLeft = CGPointApplyAffineTransform(feature.bottomLeft, transform);
[dic setValue:[NSValue valueWithCGPoint:bottomLeft] forKey:@"bottomLeft"];
CGPoint bottomRight = CGPointApplyAffineTransform(feature.bottomRight, transform);
[dic setValue:[NSValue valueWithCGPoint:bottomRight] forKey:@"bottomRight"];
[dataArray addObject:dic];
}
return [dataArray copy];
}
效果如下图所示:
四、CoreImage中的相关核心类
1.CIColor类
CIColor类是CoreImage中描述色彩的类。
//通过CGColor创建CIColor
+ (instancetype)colorWithCGColor:(CGColorRef)c;
//构造方法
+ (instancetype)colorWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b alpha:(CGFloat)a;
+ (instancetype)colorWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b;
+ (nullable instancetype)colorWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b alpha:(CGFloat)a colorSpace:(CGColorSpaceRef)colorSpace;
+ (nullable instancetype)colorWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b colorSpace:(CGColorSpaceRef)colorSpace;
- (instancetype)initWithCGColor:(CGColorRef)c;
//通过字符串创建CIColor对象
+ (instancetype)colorWithString:(NSString *)representation;
- (instancetype)initWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b alpha:(CGFloat)a;
- (instancetype)initWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b;
- (nullable instancetype)initWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b alpha:(CGFloat)a colorSpace:(CGColorSpaceRef)colorSpace;
- (nullable instancetype)initWithRed:(CGFloat)r green:(CGFloat)g blue:(CGFloat)b colorSpace:(CGColorSpaceRef)colorSpace;
//获取颜色分量个数
@property (readonly) size_t numberOfComponents;
//颜色分量
@property (readonly) const CGFloat *components;
//颜色透明度
@property (readonly) CGFloat alpha;
//色彩空间
@property (readonly) CGColorSpaceRef colorSpace;
//红绿蓝分量
@property (readonly) CGFloat red;
@property (readonly) CGFloat green;
@property (readonly) CGFloat blue;
//下面是定义的一些便捷的颜色变量
@property (class, strong, readonly) CIColor *blackColor ;
@property (class, strong, readonly) CIColor *whiteColor ;
@property (class, strong, readonly) CIColor *grayColor ;
@property (class, strong, readonly) CIColor *redColor ;
@property (class, strong, readonly) CIColor *greenColor ;
@property (class, strong, readonly) CIColor *blueColor ;
@property (class, strong, readonly) CIColor *cyanColor ;
@property (class, strong, readonly) CIColor *magentaColor ;
@property (class, strong, readonly) CIColor *yellowColor ;
@property (class, strong, readonly) CIColor *clearColor
2.CIImage类
CIImage是CoreImage中最核心的类,它描述了图像对象。
//创建一个新的CIImage实例
+ (CIImage *)imageWithCGImage:(CGImageRef)image;
//通过字典创建一个新的CIImage实例
/*
字典中的键
kCIImageColorSpace 设置颜色空间 为CGColorSpaceRef对象
kCIImageNearestSampling 是否临近采样 布尔值
kCIImageProperties 设置图片属性字典
kCIImageApplyOrientationProperty 布尔值 是否根据方向进行转换
kCIImageTextureTarget NSNumber值 设置OpebGL目标纹理常数
kCIImageTextureFormat NSNumber值 设置OpebGL format
kCIImageAuxiliaryDepth 布尔值 是否返回深度图像
kCIImageAuxiliaryDisparity 布尔值 是否返回辅助时差图像
kCIImageAuxiliaryPortraitEffectsMatte 布尔值 是否返回肖像模板
*/
+ (CIImage *)imageWithCGImage:(CGImageRef)image
options:(nullable NSDictionary *)options;
//通过CALayer进行CIImage的创建
+ (CIImage *)imageWithCGLayer:(CGLayerRef)layer NS_DEPRECATED_MAC(10_4,10_11);
+ (CIImage *)imageWithCGLayer:(CGLayerRef)layer
options:(nullable NSDictionary *)options;
//使用bitmap数据创建CIImage
+ (CIImage *)imageWithBitmapData:(NSData *)data
bytesPerRow:(size_t)bytesPerRow
size:(CGSize)size
format:(CIFormat)format
colorSpace:(nullable CGColorSpaceRef)colorSpace;
//通过纹理创建CIImage
+ (CIImage *)imageWithTexture:(unsigned int)name
size:(CGSize)size
flipped:(BOOL)flipped
colorSpace:(nullable CGColorSpaceRef)colorSpace;
+ (CIImage *)imageWithTexture:(unsigned int)name
size:(CGSize)size
flipped:(BOOL)flipped
options:(nullable NSDictionary *)options;
+ (nullable CIImage *)imageWithMTLTexture:(id)texture
options:(nullable NSDictionary *)options;
//通过url创建CIImage
+ (nullable CIImage *)imageWithContentsOfURL:(NSURL *)url;
+ (nullable CIImage *)imageWithContentsOfURL:(NSURL *)url
options:(nullable NSDictionary *)options;
//通过NSDate创建CIImage
+ (nullable CIImage *)imageWithData:(NSData *)data;
+ (nullable CIImage *)imageWithData:(NSData *)data
options:(nullable NSDictionary *)options;
//通过CVImageBufferRef创建CIImage
+ (CIImage *)imageWithCVImageBuffer:(CVImageBufferRef)imageBuffer;
+ (CIImage *)imageWithCVImageBuffer:(CVImageBufferRef)imageBuffer
options:(nullable NSDictionary *)options;
//通过CVPixelBufferRef创建CIImage
+ (CIImage *)imageWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer;
+ (CIImage *)imageWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer
options:(nullable NSDictionary *)options;
//通过颜色创建CIImage
+ (CIImage *)imageWithColor:(CIColor *)color;
//创建空CIImage
+ (CIImage *)emptyImage;
//初始化方法
- (instancetype)initWithCGImage:(CGImageRef)image;
- (instancetype)initWithCGImage:(CGImageRef)image
options:(nullable NSDictionary *)options;
- (instancetype)initWithCGLayer:(CGLayerRef)layer);
- (instancetype)initWithCGLayer:(CGLayerRef)layer;
- (instancetype)initWithBitmapData:(NSData *)data
bytesPerRow:(size_t)bytesPerRow
size:(CGSize)size
format:(CIFormat)format
colorSpace:(nullable CGColorSpaceRef)colorSpace;
- (instancetype)initWithTexture:(unsigned int)name
size:(CGSize)size
flipped:(BOOL)flipped
colorSpace:(nullable CGColorSpaceRef)colorSpace;
- (instancetype)initWithTexture:(unsigned int)name
size:(CGSize)size
flipped:(BOOL)flipped
options:(nullable NSDictionary *)options;
- (nullable instancetype)initWithMTLTexture:(id)texture
options:(nullable NSDictionary *)options;
- (nullable instancetype)initWithContentsOfURL:(NSURL *)url;
- (nullable instancetype)initWithContentsOfURL:(NSURL *)url
options:(nullable NSDictionary *)options;
- (instancetype)initWithCVImageBuffer:(CVImageBufferRef)imageBuffer;
- (instancetype)initWithCVImageBuffer:(CVImageBufferRef)imageBuffer
options:(nullable NSDictionary *)options;
- (instancetype)initWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer;
- (instancetype)initWithCVPixelBuffer:(CVPixelBufferRef)pixelBuffer
options:(nullable NSDictionary *)options;
- (instancetype)initWithColor:(CIColor *)color;
//追加变换 返回结果CIImage对象
- (CIImage *)imageByApplyingTransform:(CGAffineTransform)matrix;
- (CIImage *)imageByApplyingOrientation:(int)orientation;
- (CIImage *)imageByApplyingCGOrientation:(CGImagePropertyOrientation)orientation;
//根据方向获取变换
- (CGAffineTransform)imageTransformForOrientation:(int)orientation;
- (CGAffineTransform)imageTransformForCGOrientation:(CGImagePropertyOrientation)orientation;
//进行混合
- (CIImage *)imageByCompositingOverImage:(CIImage *)dest;
//区域裁剪
- (CIImage *)imageByCroppingToRect:(CGRect)rect;
//返回图像边缘
- (CIImage *)imageByClampingToExtent;
//设置边缘 返回新图像对象
- (CIImage *)imageByClampingToRect:(CGRect)rect;
//用过滤器进行过滤
- (CIImage *)imageByApplyingFilter:(NSString *)filterName
withInputParameters:(nullable NSDictionary *)params;
- (CIImage *)imageByApplyingFilter:(NSString *)filterName;
//图像边缘
@property (NS_NONATOMIC_IOSONLY, readonly) CGRect extent;
//属性字典
@property (atomic, readonly) NSDictionary *properties;
//通过URL创建的图像的URL
@property (atomic, readonly, nullable) NSURL *url;
//颜色空间
@property (atomic, readonly, nullable) CGColorSpaceRef colorSpace;
//通过CGImage创建的CGImage对象
@property (nonatomic, readonly, nullable) CGImageRef CGImage;
3.CIContext类
CIContext是CoreImage中的上下文对象,用来进行图片的渲染,已经转换为其他框架的图像对象。
//通过CGContextRef上下文创建CIContext上下文
/*
配置字典中可以进行配置的:
kCIContextOutputColorSpace 设置输出的颜色空间
kCIContextWorkingColorSpace 设置工作的颜色空间
kCIContextWorkingFormat 设置缓冲区数据格式
kCIContextHighQualityDownsample 布尔值
kCIContextOutputPremultiplied 设置输出是否带alpha通道
kCIContextCacheIntermediates 布尔值
kCIContextUseSoftwareRenderer 设置是否使用软件渲染
kCIContextPriorityRequestLow 是否低质量
*/
+ (CIContext *)contextWithCGContext:(CGContextRef)cgctx
options:(nullable NSDictionary *)options;
//创建上下文对象
+ (CIContext *)contextWithOptions:(nullable NSDictionary *)options;
+ (CIContext *)context;
- (instancetype)initWithOptions:(nullable NSDictionary *)options;
//使用指定的处理器创建CIContext
+ (CIContext *)contextWithMTLDevice:(id)device;
+ (CIContext *)contextWithMTLDevice:(id)device
options:(nullable NSDictionary *)options;
//工作的颜色空间
@property (nullable, nonatomic, readonly) CGColorSpaceRef workingColorSpace;
//缓冲区格式
@property (nonatomic, readonly) CIFormat workingFormat;
//进行CIImage图像的绘制
- (void)drawImage:(CIImage *)image
atPoint:(CGPoint)atPoint
fromRect:(CGRect)fromRect;
- (void)drawImage:(CIImage *)image
inRect:(CGRect)inRect
fromRect:(CGRect)fromRect;
//使用CIImage创建CGImageRef
- (nullable CGImageRef)createCGImage:(CIImage *)image;
fromRect:(CGRect)fromRect;
- (nullable CGImageRef)createCGImage:(CIImage *)image
fromRect:(CGRect)fromRect
format:(CIFormat)format
colorSpace:(nullable CGColorSpaceRef)colorSpace;
//创建CALayer
- (nullable CGLayerRef)createCGLayerWithSize:(CGSize)size
info:(nullable CFDictionaryRef)info;
//将图片写入bitMap数据
- (void)render:(CIImage *)image
toBitmap:(void *)data
rowBytes:(ptrdiff_t)rowBytes
bounds:(CGRect)bounds
format:(CIFormat)format;
//将图片写入缓存
- (void)render:(CIImage *)image
toCVPixelBuffer:(CVPixelBufferRef)buffer
colorSpace:(nullable CGColorSpaceRef)colorSpace;
- (void)render:(CIImage *)image
toCVPixelBuffer:(CVPixelBufferRef)buffer
bounds:(CGRect)bounds
colorSpace:(nullable CGColorSpaceRef)colorSpace;
//将图片写入纹理
- (void)render:(CIImage *)image
toMTLTexture:(id)texture
commandBuffer:(nullable id)commandBuffer
bounds:(CGRect)bounds
colorSpace:(CGColorSpaceRef)colorSpace;
//清除缓存
- (void)clearCaches;
//输入图像的最大尺寸
- (CGSize)inputImageMaximumSize;
//输出图像的最大尺寸
- (CGSize)outputImageMaximumSize;
//将CIImage写成TIFF数据
- (nullable NSData*) TIFFRepresentationOfImage:(CIImage*)image
format:(CIFormat)format
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options;
//将CIImage写成JPEG数据
- (nullable NSData*) JPEGRepresentationOfImage:(CIImage*)image
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options;
//将CIImage写成HEIF数据
- (nullable NSData*) HEIFRepresentationOfImage:(CIImage*)image
format:(CIFormat)format
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options;
//将CIImage写成PNG数据
- (nullable NSData*) PNGRepresentationOfImage:(CIImage*)image
format:(CIFormat)format
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options;
//将CIImage写入TIFF文件
- (BOOL) writeTIFFRepresentationOfImage:(CIImage*)image
toURL:(NSURL*)url
format:(CIFormat)format
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options
error:(NSError **)errorPtr;
//将CIImage写入PNG文件
- (BOOL) writePNGRepresentationOfImage:(CIImage*)image
toURL:(NSURL*)url
format:(CIFormat)format
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options
error:(NSError **)errorPtr;
//将CIImage写入JPEG文件
- (BOOL) writeJPEGRepresentationOfImage:(CIImage*)image
toURL:(NSURL*)url
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options
error:(NSError **)errorPtr;
//将CIImage写HEIF文件
- (BOOL) writeHEIFRepresentationOfImage:(CIImage*)image
toURL:(NSURL*)url
format:(CIFormat)format
colorSpace:(CGColorSpaceRef)colorSpace
options:(NSDictionary*)options
error:(NSError **)errorPtr;
4.CIDetector类
前面有过CIDetector类的功能演示,这是CIImage框架中非常强大的一个类,使用它可以进行复杂的图片识别技术,解析如下:
//创建CIDetector实例
/*
type用来指定识别的类型
CIDetectorTypeFace 人脸识别模式
CIDetectorTypeRectangle 矩形检测模式
CIDetectorTypeText 文本区域检测模式
CIDetectorTypeQRCode 二维码扫描模式
option可以指定配置字典 可配置的键如下
CIDetectorAccuracy 设置检测精度 CIDetectorAccuracyLow 低 CIDetectorAccuracyHigh 高
CIDetectorTracking 设置是否跟踪特征
CIDetectorMinFeatureSize 设置特征最小尺寸 0-1之间 相对图片
CIDetectorMaxFeatureCount 设置最大特征数
CIDetectorImageOrientation 设置方向
CIDetectorEyeBlink 设置布尔值 是否提取面部表情 眨眼
CIDetectorSmile 设置布尔值 是否提取面部表情 微笑
CIDetectorFocalLength 设置焦距
CIDetectorAspectRatio 设置检测到矩形的宽高比
CIDetectorReturnSubFeatures 设置是否提取子特征
*/
+ (nullable CIDetector *)detectorOfType:(NSString*)type
context:(nullable CIContext *)context
options:(nullable NSDictionary *)options;
//进行图片分析 提取特征数组
- (NSArray *)featuresInImage:(CIImage *)image;
- (NSArray *)featuresInImage:(CIImage *)image
options:(nullable NSDictionary *)options;
5.CIFeature相关类
CIFeature与其相关子类定义了特征数据模型。
@interface CIFeature : NSObject {}
//特征类型
/*
CIFeatureTypeFace
CIFeatureTypeRectangle
CIFeatureTypeQRCode
CIFeatureTypeText
*/
@property (readonly, retain) NSString *type;
//特征在图片中的bounds
@property (readonly, assign) CGRect bounds;
@end
//人脸特征对象
@interface CIFaceFeature : CIFeature
//位置尺寸
@property (readonly, assign) CGRect bounds;
//左眼位置
@property (readonly, assign) BOOL hasLeftEyePosition;
@property (readonly, assign) CGPoint leftEyePosition;
//是否有左眼特征
@property (readonly, assign) BOOL hasRightEyePosition;
//右眼位置
@property (readonly, assign) CGPoint rightEyePosition;
//是否有右眼特征
@property (readonly, assign) BOOL hasMouthPosition;
//口部特征
@property (readonly, assign) CGPoint mouthPosition;
//是否有跟踪特征ID
@property (readonly, assign) BOOL hasTrackingID;
//跟踪特征ID
@property (readonly, assign) int trackingID;
@property (readonly, assign) BOOL hasTrackingFrameCount;
@property (readonly, assign) int trackingFrameCount;
@property (readonly, assign) BOOL hasFaceAngle;
@property (readonly, assign) float faceAngle;
//是否微笑
@property (readonly, assign) BOOL hasSmile;
//左眼是否闭眼
@property (readonly, assign) BOOL leftEyeClosed;
//右眼是否闭眼
@property (readonly, assign) BOOL rightEyeClosed;
@end
//矩形特征对象
@interface CIRectangleFeature : CIFeature
//位置尺寸
@property (readonly) CGRect bounds;
@property (readonly) CGPoint topLeft;
@property (readonly) CGPoint topRight;
@property (readonly) CGPoint bottomLeft;
@property (readonly) CGPoint bottomRight;
@end
//二维码特征对象
@interface CIQRCodeFeature : CIFeature
//位置尺寸信息
@property (readonly) CGRect bounds;
@property (readonly) CGPoint topLeft;
@property (readonly) CGPoint topRight;
@property (readonly) CGPoint bottomLeft;
@property (readonly) CGPoint bottomRight;
//二维码内容
@property (nullable, readonly) NSString* messageString;
//二维码描述数据
@property (nullable, readonly) CIQRCodeDescriptor *symbolDescriptor NS_AVAILABLE(10_13, 11_0);
@end
//文本特征对象
@interface CITextFeature : CIFeature
//位置信息
@property (readonly) CGRect bounds;
@property (readonly) CGPoint topLeft;
@property (readonly) CGPoint topRight;
@property (readonly) CGPoint bottomLeft;
@property (readonly) CGPoint bottomRight;
//子特征
@property (nullable, readonly) NSArray *subFeatures;
@end