ARCore与ARKit实现人脸贴纸、更换材质等动画效果

近两年市面上出现了很多有关有关美颜滤镜、贴纸等各种贴纸效果的相机出现,由于项目的需要调研了google开源的一个能够提供3D角度的ARCore框架,本人也结合ARKit在iOS手机上实现了类抖音的效果
系统要求

  1. iOS11.0以上系统
    2.iPhone6s以上的Iphone手机

首先看下效果


IMG_3543.PNG

1.引入需求的库

target 'ARCoreFaceDemo'
platform :ios, '10.0'
pod 'ARCore/AugmentedFaces', '~> 1.13.0'
pod 'SnapKit', '~> 4.2.0'

2.使用AVFoundation框架进行数据的采集工作

/// Setup a camera capture session from the front camera to receive captures.
    private func setupCamera() {
        guard let device =
            AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back),
            let input = try? AVCaptureDeviceInput(device: device)
            else {
                NSLog("Failed to create capture device from front camera.")
                return
        }
        
        let output = AVCaptureVideoDataOutput()
        output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
        output.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: .userInteractive))
        
        session.sessionPreset = .high
        
        videoInput = input
        
        session.addInput(input)
        session.addOutput(output)
        captureSession = session
        captureDevice = device
        
        cameraImageLayer.contentsGravity = .center
        cameraImageLayer.frame = self.view.bounds
        view.layer.insertSublayer(cameraImageLayer, at: 0)
        
        startCameraCapture()
    }

3.设置场景

private func setupScene() {
        let scene = SCNScene(named: "Face.scnassets/fox_face.scn")
        //    Face.scnassets/face_texture.png
        guard let faceImage = UIImage(named: "multiply01.png"),
            //      let modelRoot = scene?.rootNode.childNode(withName: "asset", recursively: false)
            let modelRoot = scene?.rootNode.childNodes.first
            else {
                NSLog("Failed to load face scene!")
                return
        }
        // SceneKit uses meters for units, while the canonical face mesh asset uses centimeters.
        modelRoot.simdScale = simd_float3(1, 1, 1) * kCentimetersToMeters
        foreheadLeftNode = modelRoot.childNode(withName: "FOREHEAD_LEFT", recursively: true)
        foreheadRightNode = modelRoot.childNode(withName: "FOREHEAD_RIGHT", recursively: true)
        noseTipNode = modelRoot.childNode(withName: "NOSE_TIP", recursively: true)
        
        faceNode.addChildNode(faceTextureNode)
        faceNode.addChildNode(faceOccluderNode)
        
        //    if let catImage = UIImage(named: "cap01.png"){
        //        let catWidth: CGFloat = 22
        //        let catHeight = catWidth * catImage.size.width/catImage.size.height
        //        let geometry = SCNBox(width: catWidth, height: catHeight, length: 0, chamferRadius: 0)
        //        //    let geometry = SCNTorus(ringRadius: 5, pipeRadius: 1)
        //        geometry.firstMaterial?.diffuse.contents = UIImage(named: "cap01.png")
        //        capNode.geometry = geometry
        //
        //
        //        if var position = foreheadRightNode?.position  {
        //            position.x = 0
        //            position.y += 1
        //            position.z += 1
        //            capNode.position = position
        //        }
        //
        //
        //        modelRoot.addChildNode(capNode)
        //    }
        
        
        
        //    let plan = SCNBox(width: 38, height: 30, length: 0, chamferRadius: 0)
        //    plan.firstMaterial?.diffuse.contents = UIImage(named: "bigcat.png")
        //    bigCapNode.geometry = plan
        //    modelRoot.addChildNode(bigCapNode)
        
        var gifImages:[UIImage] = []
        for i in 0..<23 {
            let name = String(format: "xiaohuanghua_%d.png", i)
            if let image = UIImage(named: name){
                gifImages.append(image)
            }
        }
        let layer = GifLayer()
        layer.frame = CGRect(x: 0, y: 0, width: 300, height: 400)
        layer.loadData(gifImages)
        
        let gifPlane = SCNBox(width: 30, height: 15, length: 0, chamferRadius: 0)
        gifPlane.firstMaterial?.diffuse.contents = layer
        
        headAnimationNode.geometry = gifPlane
        
        modelRoot.addChildNode(headAnimationNode)
        
        
        textNode = SCNNode()
        if var position = noseTipNode?.position {
            position.z = position.z + 1
            textNode.position = position
        }
        
        let text = SCNText(string: "Vaffle", extrusionDepth: 0.3)
        text.firstMaterial?.diffuse.contents = UIColor.red
        text.font = UIFont.systemFont(ofSize: 1)
        textNode.geometry = text
        noseTipNode?.addChildNode(textNode)
        
        scene?.rootNode.addChildNode(faceNode)
        
        let cameraNode = SCNNode()
        cameraNode.camera = sceneCamera
        scene?.rootNode.addChildNode(cameraNode)
        
        sceneView.scene = scene
        sceneView.frame = self.view.bounds
        sceneView.delegate = self
        sceneView.rendersContinuously = true
//        sceneView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
        sceneView.backgroundColor = .clear
        view.addSubview(sceneView)
        
        faceTextureMaterial.diffuse.contents = faceImage
        // SCNMaterial does not premultiply alpha even with blendMode set to alpha, so do it manually.
        faceTextureMaterial.shaderModifiers =
            [SCNShaderModifierEntryPoint.fragment : "_output.color.rgb *= _output.color.a;"]
        faceOccluderMaterial.colorBufferWriteMask = []
    }

4.初始化设备的运动信息

private lazy var motionManager = CMMotionManager()
private func setupMotion() {
        guard motionManager.isDeviceMotionAvailable else {
            NSLog("Device does not have motion sensors.")
            return
        }
        motionManager.deviceMotionUpdateInterval = kMotionUpdateInterval
        motionManager.startDeviceMotionUpdates()
    }

5.开始启动相机并采集数据

private func startCameraCapture() {
        getVideoPermission(permissionHandler: { granted in
            guard granted else {
                NSLog("Permission not granted to use camera.")
                return
            }
            self.captureSession?.startRunning()
        })
    }
@available(iOS 11.0, *)
extension CameraController : AVCaptureVideoDataOutputSampleBufferDelegate {
    
    public func captureOutput(
        _ output: AVCaptureOutput,
        didOutput sampleBuffer: CMSampleBuffer,
        from connection: AVCaptureConnection
        ) {
        guard let imgBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
            let deviceMotion = motionManager.deviceMotion
            else { return }
        
        let frameTime = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
        
        // Use the device's gravity vector to determine which direction is up for a face. This is the
        // positive counter-clockwise rotation of the device relative to landscape left orientation.
        let rotation =  2 * .pi - atan2(deviceMotion.gravity.x, deviceMotion.gravity.y) + .pi / 2
        let rotationDegrees = (UInt)(rotation * 180 / .pi) % 360
        
        debugPrint(rotationDegrees)
        
        faceSession?.update(
            with: imgBuffer,
            timestamp: frameTime,
            recognitionRotation: rotationDegrees)
        
        
        
        
    }
    
    
}

6.场景回调

@available(iOS 11.0, *)
extension CameraController : SCNSceneRendererDelegate {
    
    public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
        guard nextFaceFrame != nil && nextFaceFrame != currentFaceFrame else { return }
        
        currentFaceFrame = nextFaceFrame
        
        if let face = currentFaceFrame?.face {
            faceTextureNode.geometry = faceMeshConverter.geometryFromFace(face)
            faceTextureNode.geometry?.firstMaterial = faceTextureMaterial
            faceOccluderNode.geometry = faceTextureNode.geometry?.copy() as? SCNGeometry
            faceOccluderNode.geometry?.firstMaterial = faceOccluderMaterial
            
            faceNode.simdWorldTransform = face.centerTransform
            updateTransform(face.transform(for: .nose), for: noseTipNode)
            updateTransform(face.transform(for: .foreheadLeft), for: foreheadLeftNode)
            updateTransform(face.transform(for: .foreheadRight), for: foreheadRightNode)
            
            if let simdScale = noseTipNode?.simdScale{
                textNode.simdScale = simdScale
            }
            
            capNode.simdScale = faceTextureNode.simdScale
            
            updateTransform(face.transform(for: .nose), for: capNode)
            
            
            if let leftPosition = foreheadLeftNode?.position, let rightPosition = foreheadRightNode?.position{
                var y = CGFloat((leftPosition.y + rightPosition.y)/2.0)
                var x = CGFloat((leftPosition.x + rightPosition.x)/2.0)
                x -= 1
                var z = CGFloat((leftPosition.z + rightPosition.z)/2.0)
                z += 1.5
                capNode.position = SCNVector3(x, y, z)
            }
            
            updateTransform(face.transform(for: .nose), for: bigCapNode)
            
            
            if let leftPosition = foreheadLeftNode?.position, let rightPosition = foreheadRightNode?.position{
                var y = CGFloat((leftPosition.y + rightPosition.y)/2.0)
                var x = CGFloat((leftPosition.x + rightPosition.x)/2.0)
                x -= 0
                var z = CGFloat((leftPosition.z + rightPosition.z)/2.0)
                z -= 3
                bigCapNode.position = SCNVector3(x, y, z)
            }
            
            updateTransform(face.transform(for: .nose), for: headAnimationNode)
            if let leftPosition = foreheadLeftNode?.position, let rightPosition = foreheadRightNode?.position{
                var y = CGFloat((leftPosition.y + rightPosition.y)/2.0)
                var x = CGFloat((leftPosition.x + rightPosition.x)/2.0)
                x -= 12
                var z = CGFloat((leftPosition.z + rightPosition.z)/2.0)
                z -= 15
                headAnimationNode.position = SCNVector3(x, y, z)
            }
            
            
            //        if let position = faceTextureNode.position{
            //
            //        }
            
        }
        
        // Only show AR content when a face is detected
        sceneView.scene?.rootNode.isHidden = currentFaceFrame?.face == nil
    }
    
    public func renderer(
        _ renderer: SCNSceneRenderer,
        didRenderScene scene: SCNScene,
        atTime time: TimeInterval
        ) {
        guard let frame = currentFaceFrame else { return }
        
        CATransaction.begin()
        CATransaction.setAnimationDuration(0)
        cameraImageLayer.contents = frame.capturedImage as CVPixelBuffer
        cameraImageLayer.setAffineTransform(
            frame.displayTransform(
                forViewportSize: cameraImageLayer.bounds.size,
                presentationOrientation: .portrait,
                mirrored: videoInput?.device.position == .front ? true : false)
        )
        CATransaction.commit()
    }
    
}

你可能感兴趣的:(ARCore与ARKit实现人脸贴纸、更换材质等动画效果)