前言
反转视频和核心方法有两种
- 用AVAssetImageGenerator倒序的读取每一个time的图片,并使用AVAssetWriterInputPixelBufferAdaptor、AVAssetWriter和AVAssetWriterInput写出新的视频
- 使用AVAssetReader读取每一帧,保存到数组中再将其倒序并使用AVAssetWriterInputPixelBufferAdaptor、AVAssetWriter和AVAssetWriterInput写出新的视频
第一种方式效率较低,4s的视频就需要非常长的一段时间,但耗内存少。而第二种方式效率高一些,但内存占用大,容易crash。本文主要使用第二种方法,并介绍如何在使用第二种方法的情况下保持内存平稳。
开始
以下代码已经包含性能优化的部分
需要使用的变量和属性
fileprivate let FRAMES_PER_SEC: Double = 25
fileprivate let FRAME_SCALE: CMTimeScale = 600
//每次填充下一帧需要增长的时间
fileprivate let INCREMENT_TIME: Double = 1 / FRAMES_PER_SEC
//每次读取的最大帧数
fileprivate let MAX_READ_SAMPLE_COUNT = 50
//保存临时asset的path
fileprivate let SAVE_PATH = String.qe.documentPath() + "/ReverseVideos"
/// 此进度不是连续增长
public var progress: Float = 0
public var completionClosure: ((_ composition: AVMutableComposition) -> Void)?
private var composition: AVMutableComposition?
private let reverseTimeRange: CMTimeRange
private var assetWriter: AVAssetWriter?
private var assetWriterInput: AVAssetWriterInput?
private var assetWriterPixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor?
private var sampleBuffers: [CMSampleBuffer] = []
private var tempVideoPartPaths: [String] = []
private var incrementTime: Double = 0
初始化
本文的初衷是为了反转整个视频的一部分视频,而不是全部。所以在设计上需要的数据源是AVMutableComposition
和CMTimeRange
。以下是初始化方法
init(with composition: AVMutableComposition, at timeRange: CMTimeRange) throws {
self.composition = composition
reverseTimeRange = timeRange
let point = UnsafeMutablePointer.allocate(capacity: 1)
point.initialize(to: ObjCBool(true))
if !FileManager.default.fileExists(atPath: SAVE_PATH, isDirectory: point) {
try FileManager.default.createDirectory(atPath: SAVE_PATH, withIntermediateDirectories: true, attributes: nil)
}
}
writer的写入部分
private func writeSample() {
var timeElapsed: Double = 0
for i in (0..
AVAssetWriter的startWriting和finishWriting要成对出现,所以以下我们对它进行封装
private func writeAsset() {
do {
try startWriting()
} catch {
QELog("start writing failed, reason: \(error.localizedDescription)")
return
}
writeSample()
endWriting()
sampleBuffers.removeAll()
}
private func startWriting() throws {
//每一次asset存储的path
let outputPath = SAVE_PATH + "/\(String.qe.timestamp())_\(arc4random()).mov"
tempVideoPartPaths.append(outputPath)
let outputURL = URL(fileURLWithPath: outputPath)
assetWriter = try AVAssetWriter(outputURL: outputURL, fileType: .mov)
guard assetWriter != nil else {
throw ReverseToolError.initWriterFailed
}
let settings: [String: Any] = [AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: self.composition!.naturalSize.width,
AVVideoHeightKey: self.composition!.naturalSize.height]
assetWriterInput = AVAssetWriterInput(mediaType: .video, outputSettings: settings)
guard assetWriterInput != nil else {
throw ReverseToolError.initWriterFailed
}
assetWriterInput!.expectsMediaDataInRealTime = true
assetWriter!.add(assetWriterInput!)
assetWriterPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterInput!, sourcePixelBufferAttributes: nil)
guard assetWriterPixelBufferAdaptor != nil else {
throw ReverseToolError.initAdaptorFailed
}
assetWriter!.startWriting()
assetWriter!.startSession(atSourceTime: CMTime(value: 0, timescale: FRAME_SCALE))
}
private func endWriting() {
//这是使用信号量把异步转同步
let semaphoe = DispatchSemaphore(value: 0)
assetWriter?.finishWriting {
semaphoe.signal()
}
semaphoe.wait()
}
反转的核心逻辑
反转视频是一个耗时操作,务必开一条线程去处理
public func reverse() {
DispatchQueue.global().async {
QELog("开始反转视频")
self.readImages()
}
}
private func readImages() {
let track = composition!.tracks(withMediaType: .video).first!
let reader: AVAssetReader
do {
reader = try AVAssetReader(asset: composition!)
} catch {
QELog("init reader failed, reason: \(error.localizedDescription)")
return
}
//设置timeRange可以使得reader只读取这个range内的视频,提高效率
reader.timeRange = reverseTimeRange
let settings: [String: Any] = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]
let readerOutput = AVAssetReaderTrackOutput(track: track, outputSettings: settings)
reader.add(readerOutput)
reader.startReading()
//读取每一帧
while let sample = readerOutput.copyNextSampleBuffer() {
sampleBuffers.append(sample)
//分段加载
if sampleBuffers.count >= MAX_READ_SAMPLE_COUNT {
writeAsset()
}
}
if sampleBuffers.count > 0 {
writeAsset()
}
reader.cancelReading()
//组合临时存储的asset
let outputComposition = AVMutableComposition()
var insertTime: CMTime = .zero
while let path = tempVideoPartPaths.popLast() {
let url = URL(fileURLWithPath: path)
let options: [String: Any] = [AVURLAssetPreferPreciseDurationAndTimingKey: NSNumber(value: true)]
let asset = AVURLAsset(url: url, options: options)
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
do {
try outputComposition.insertTimeRange(timeRange, of: asset, at: insertTime)
} catch {
QELog("merge assets failed, reason:\(error.localizedDescription)")
}
insertTime = CMTimeAdd(insertTime, asset.duration)
}
//清除临时视频片段
removeTempVideoParts()
progress = 1
DispatchQueue.main.async {
QELog("开始反转视频结束")
self.completionClosure?(outputComposition)
}
}
性能优化
如果做性能优化的话,那么反转一个10s的视频就会使你的app crash,因为内存占用过大。
那么我们优化的核心思路就是分段加载。
我们设置一个最大读取帧数数量MAX_READ_SAMPLE_COUNT
,每次使用reader读取这个数量的帧到数组中时就将存储帧的数组反转并写入到本地的沙盒中,同时将path保存下来,最后我们只要反转存储path的数组并一一读出再加载到AVMutableComposition中即可。
以下代码为分段加载示意,通过调整MAX_READ_SAMPLE_COUNT
即可决定每次加载到内存中的帧数量。
//组合临时存储的asset
let outputComposition = AVMutableComposition()
var insertTime: CMTime = .zero
while let path = tempVideoPartPaths.popLast() {
let url = URL(fileURLWithPath: path)
let options: [String: Any] = [AVURLAssetPreferPreciseDurationAndTimingKey: NSNumber(value: true)]
let asset = AVURLAsset(url: url, options: options)
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
do {
try outputComposition.insertTimeRange(timeRange, of: asset, at: insertTime)
} catch {
QELog("merge assets failed, reason:\(error.localizedDescription)")
}
insertTime = CMTimeAdd(insertTime, asset.duration)
}
//清除临时视频片段
removeTempVideoParts()
progress = 1
DispatchQueue.main.async {
QELog("开始反转视频结束")
self.completionClosure?(outputComposition)
}