【iOS】CMSampleBuffer转UIImage,UIImage转CVPixelBuffer

CMSampleBuffer转UIImage

    // CMSampleBuffer -> UIImage
    func sampleBufferToImage(sampleBuffer: CMSampleBuffer) -> UIImage {
        // 获取CMSampleBuffer的核心视频图像缓冲的媒体数据
        let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! as CVPixelBuffer

        // 锁定像素缓冲区的基址
        CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))

        // 获取像素缓冲区的每行字节数
        let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
        
        // 获取像素缓冲区的每行字节数
        let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
        // 获取像素缓冲的宽度和高度
        let width = CVPixelBufferGetWidth(imageBuffer)
        let height = CVPixelBufferGetHeight(imageBuffer)
        
        // 创建一个设备相关的RGB颜色空间
        let colorSpace = CGColorSpaceCreateDeviceRGB()
        
        // 使用示例缓冲区数据创建位图图形上下文
        let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8,
                                bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
        // 根据位图图形上下文中的像素数据创建一个Quartz图像
        let quartzImage:CGImage = context!.makeImage()!
        // 解锁像素缓冲区
        CVPixelBufferUnlockBaseAddress(imageBuffer,CVPixelBufferLockFlags(rawValue: 0))
        
        let image = UIImage(cgImage: quartzImage)
        return image
    }

UIImage转CVPixelBuffer

    // UIImage -> CVPixelBuffer
    func imageToCVPixelBuffer(image:UIImage) -> CVPixelBuffer? {
        let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
        var pixelBuffer : CVPixelBuffer?
        let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
        guard (status == kCVReturnSuccess) else {
            return nil
        }
        
        CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
        
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
        
        context?.translateBy(x: 0, y: image.size.height)
        context?.scaleBy(x: 1.0, y: -1.0)
        
        UIGraphicsPushContext(context!)
        image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
        UIGraphicsPopContext()
        CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        return pixelBuffer
    }

你可能感兴趣的:(【iOS】CMSampleBuffer转UIImage,UIImage转CVPixelBuffer)