IOS自定义相机总结

自定义相机分一下几个步骤

1,判断当前相机设备是否可用与是否授权

2,自定义相机的相关参数

3,相机切换与闪光灯

4,拍照处理

授权及设备判断

1,摄像头是否可用

//相机是否可用
func isCameraAvailable() -> Bool {
    return UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)
}
//前置摄像头是否可用
func isFrontCameraAvailable() -> Bool {
    return UIImagePickerController.isCameraDeviceAvailable(UIImagePickerControllerCameraDevice.front)
}
//后置摄像头是否可用
func isBackCameraAvailable() -> Bool {
    return UIImagePickerController.isCameraDeviceAvailable(UIImagePickerControllerCameraDevice.rear)
}

2,用户是否授权

   //判断相机是否授权
    func isCanUseCamera()->Bool{
        let status = AVCaptureDevice.authorizationStatus(for: AVMediaType.video)
        if status == AVAuthorizationStatus.authorized {
            return true
        }
        return false
    }

相机参数配置

1,基础配置

    //设备
    device = AVCaptureDevice.default(for: AVMediaType.video)
    //输入源
    input = try! AVCaptureDeviceInput.init(device: device)
    //输出
    output = AVCaptureStillImageOutput.init();
    //会话
    session = AVCaptureSession.init()

    if (session.canAddInput(input)) {
        session.addInput(input)
    }
    if session.canAddOutput(output) {
        session.addOutput(output)
    }
    let layer = AVCaptureVideoPreviewLayer.init(session: session)
    
    session .startRunning()

2,可选配置

    if session .canSetSessionPreset(AVCaptureSession.Preset.photo) {
    //该项用来设置输出图像的质量
        session.sessionPreset = AVCaptureSession.Preset.photo
    }


    try! device.lockForConfiguration()  //锁住设备

    if device.isFlashModeSupported(AVCaptureDevice.FlashMode.auto) {
    //设置闪光灯样式
        device.flashMode = AVCaptureDevice.FlashMode.auto
    }
    
    if device.isWhiteBalanceModeSupported(AVCaptureDevice.WhiteBalanceMode.autoWhiteBalance) {
    //设置白平衡样式
        device.whiteBalanceMode = AVCaptureDevice.WhiteBalanceMode.autoWhiteBalance
    }
    //解锁设备
    device.unlockForConfiguration()

拍摄

func takePhoto(){
    let connection = output.connection(with: AVMediaType.video)
    if connection == nil {
        print("拍摄失败")
        return
    }
    output.captureStillImageAsynchronously(from: connection!) { (buffer, error) in
        let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer!)

    }
}

实时滤镜相机

要实现实时滤镜效果,则需要获得相机捕获的每一帧,并进行加滤镜的操作

1,改变输出源头

    output = AVCaptureVideoDataOutput.init()
    //设置代理与回调队列
    output.setSampleBufferDelegate(self, queue: queue)
    //设置回调获得的图像参数(这里设置为32位BGR格式)还可以设置宽高等等
    output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String:NSNumber.init(value: kCVPixelFormatType_32BGRA)]

2,回调代理方法

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
    //这里获得当前帧的图像 可以对其进行加工展示 实现 实时滤镜的效果(在这里我使用的GPUImage2的滤镜)
    let im = self.imageFromSampleBuffer(sampleBuffer: sampleBuffer)
    // 创建图片输入
    let brightnessAdjustment = BrightnessAdjustment()
    brightnessAdjustment.brightness = 0.2
    let pictureInput = PictureInput(image: im)
    // 创建图片输出
    let pictureOutput = PictureOutput()
    // 给闭包赋值
    pictureOutput.imageAvailableCallback = { image in
        // 这里的image是处理完的数据,UIImage类型
        OperationQueue.main.addOperation {

            self.imv.image = image.imageRotatedByDegrees(degrees: 90, flip: false)
        }
    }
    // 绑定处理链
    pictureInput --> brightnessAdjustment --> pictureOutput
    // 开始处理 synchronously: true 同步执行 false 异步执行,处理完毕后会调用imageAvailableCallback这个闭包
    pictureInput.processImage(synchronously: true)

}

补充buffer转换为UIImage 和 UIImage进行旋转(因为得到处理的图片需要旋转才正确)的方法 (代码为Swift4.0版本)

extension UIImage {
    //  false为旋转(面向图片顺时针) true为逆时针
    public func imageRotatedByDegrees(degrees: CGFloat, flip: Bool) -> UIImage {
        let radiansToDegrees: (CGFloat) -> CGFloat = {
            return $0 * (180.0 / CGFloat(M_PI))
        }
        let degreesToRadians: (CGFloat) -> CGFloat = {
            return $0 / 180.0 * CGFloat(M_PI)
        }

        // calculate the size of the rotated view's containing box for our drawing space
        let rotatedViewBox = UIView(frame: CGRect(origin: CGPoint.zero, size: size))
        let t = CGAffineTransform(rotationAngle: degreesToRadians(degrees));
        rotatedViewBox.transform = t
        let rotatedSize = rotatedViewBox.frame.size

        // Create the bitmap context
        UIGraphicsBeginImageContext(rotatedSize)
        let bitmap = UIGraphicsGetCurrentContext()

        // Move the origin to the middle of the image so we will rotate and scale around the center.
        bitmap?.translateBy(x: rotatedSize.width / 2.0, y: rotatedSize.height / 2.0)
        //   // Rotate the image context
        bitmap?.rotate(by: degreesToRadians(degrees))

        // Now, draw the rotated/scaled image into the context
        var yFlip: CGFloat

        if(flip){
            yFlip = CGFloat(-1.0)
        } else {
            yFlip = CGFloat(1.0)
        }
        bitmap?.scaleBy(x: yFlip, y: -1.0)
        bitmap?.draw(self.cgImage!, in: CGRect.init(x: -size.width / 2, y: -size.height / 2, width: size.width, height: size.height))

        let newImage = UIGraphicsGetImageFromCurrentImageContext()
        UIGraphicsEndImageContext()

        return newImage!
    }
}
func imageFromSampleBuffer(sampleBuffer : CMSampleBuffer) -> UIImage
{
    // Get a CMSampleBuffer's Core Video image buffer for the media data
    let  imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
    // Lock the base address of the pixel buffer
    CVPixelBufferLockBaseAddress(imageBuffer!, CVPixelBufferLockFlags.readOnly);


    // Get the number of bytes per row for the pixel buffer
    let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer!);

    // Get the number of bytes per row for the pixel buffer
    let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!);
    // Get the pixel buffer width and height
    let width = CVPixelBufferGetWidth(imageBuffer!);
    let height = CVPixelBufferGetHeight(imageBuffer!);

    // Create a device-dependent RGB color space
    let colorSpace = CGColorSpaceCreateDeviceRGB();

    // Create a bitmap graphics context with the sample buffer data
    var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
    bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
    //let bitmapInfo: UInt32 = CGBitmapInfo.alphaInfoMask.rawValue
    let context = CGContext.init(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
    // Create a Quartz image from the pixel data in the bitmap graphics context
    let quartzImage = context?.makeImage();
    // Unlock the pixel buffer
    CVPixelBufferUnlockBaseAddress(imageBuffer!, CVPixelBufferLockFlags.readOnly);

    // Create an image object from the Quartz image
    let image = UIImage.init(cgImage: quartzImage!);

    return image
}

你可能感兴趣的:(IOS自定义相机总结)