ios 从采集到gpuimage,再到人脸定位

//按照BGRA采集,转换为RGB24,送入gpuimage

BGRA_TO_RGB24(baseAddress,tempCaptureCapability.width*tempCaptureCapability.height*4);

//此处为gpuimage的处理,输出为ARGB

addfilter_on_rawdata(baseAddress,tempCaptureCapability.width,tempCaptureCapability.height,temp_argb);

//ARGB转为RGBA

ARGB_TO_RGBA(temp_argb,tempCaptureCapability.width*tempCaptureCapability.height*4,temp_rgba);

//此处由RGBA转为UIImage,参照另外一篇博文

UIImage *imgJPeg = [self convertBitmapRGBA8ToUIImage:temp_rgba withWidth:tempCaptureCapability.width  withHeight:tempCaptureCapability.height];

//此处为CoreImage framework内容

CIImage* cgImage = [CIImage imageWithCGImage:imgJPeg.CGImage];

NSDictionary  *opts = [NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh

forKey:CIDetectorAccuracy];

CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeFace

context:nil

options:opts];

//得到面部数据

NSArray* features = [detector featuresInImage:cgImage];

CIFaceFeature *feature = nil;

CGRect rect;

for (CIFaceFeature *f in features)

{

CGRect aRect = f.bounds;

NSLog(@"%f, %f, %f, %f", aRect.origin.x, aRect.origin.y, aRect.size.width, aRect.size.height);

//眼睛和嘴的位置

if(f.hasLeftEyePosition) NSLog(@"Left eye %g %g\n", f.leftEyePosition.x, f.leftEyePosition.y);

if(f.hasRightEyePosition) NSLog(@"Right eye %g %g\n", f.rightEyePosition.x, f.rightEyePosition.y);

if(f.hasMouthPosition)

{

NSLog(@"Mouth %g %g %g %g\n", f.mouthPosition.x, f.mouthPosition.y,f.bounds.size.height,f.bounds.size.width);

feature = f;

rect = CGRectMake(f.mouthPosition.x - 100 , f.mouthPosition.y  - 60, 200, 250);

}

}

你可能感兴趣的:(ios 从采集到gpuimage,再到人脸定位)