前言
上一篇文章音视频开发的概念(音视频开发概念),这篇介绍音视频的采集的四种方式 (采集实现Demo)
系统封装:
UIImagePickerController
AVFoundation框架实现的两种方式:
AVCaptureSession+AVCaptureMovieFileOutput
AVCaptureSession+AVAssetWriter
第三方框架实现:
GpuImage
UIImagePickerController
UIImagePickerController 类是系统获取选择图片和视频的接口,这种方式只能设置一些简单参数,自定义程度不高,只能自定义界面上的操作按钮,还有视频的画质等
- 懒加载初始化UIImagePickerController
fileprivate lazy var imagePickerViewController : UIImagePickerController = {
let imagePickerViewController = UIImagePickerController()
imagePickerViewController.delegate = self
imagePickerViewController.allowsEditing = true
return imagePickerViewController
}()
- 打开相册
self.imagePickerViewController.sourceType = .photoLibrary
self.present(self.imagePickerViewController, animated: true, completion: nil)
- 拍照和摄像
self.imagePickerViewController.sourceType = .camera
self.imagePickerViewController.videoMaximumDuration = 10.0
self.imagePickerViewController.mediaTypes = [kUTTypeImage as String, kUTTypeMovie as String]
self.present(self.imagePickerViewController, animated: true, completion: nil)
注意:先导入MobileCoreServices框架,mediaTypes 为数组类型,拍照对应kUTTypeImage,录制为kUTTypeMovie,设置其他不生效
- 实现UIImagePickerControllerDelegate的方法,获取到选择的资源,做一系列的操作
extension RecordingSystemViewController : UIImagePickerControllerDelegate, UINavigationControllerDelegate {
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let image = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
self.backImageView.image = image;
}
if let videoUrl = info[UIImagePickerController.InfoKey.mediaURL] {
let videoVC = VideoDetailViewController()
videoVC.url = (videoUrl as! URL)
self.navigationController?.pushViewController(videoVC, animated: true)
}
self.dismiss(animated: true, completion: nil);
}
}
UIImagePickerController属性解释可以看这篇博客:UIImagePickerController类
AVCaptureSession+AVCaptureMovieFileOutput
这种实现能够自定义处理录制页面,实现相比AVCaptureSession+AVAssetWriter更加简单,但是这种方案无法实现滤镜渲染,是没有进行编码的源视频,所以只能录制完成后进行压缩处理
流程
1. 创建捕捉会话
2. 设置视频的输入
3. 设置音频的输入
4. 输出源设置,这里视频,音频数据会合并到一起输出,在代理方法中也可以单独拿到视频或者音频数据,给AVCaptureMovieFileOutput指定路径,开始录制之后就会向这个路径写入数据
5. 添加视频预览层
6. 开始采集数据,这个时候还没有写入数据,用户点击录制后就可以开始写入数据
- 创建捕捉会话
fileprivate lazy var session : AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = .high
return session
}()
- 设置视频的输入(AVCaptureDevice属性、AVCaptureInput 属性)
// 视频的输入
@available(iOS 10.2, *)
func setUpVideo(position: AVCaptureDevice.Position) {
let videoCaptureDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: position)
//在修改AVCaptureDevice相关属性之前,必须调用下述方法上锁(设置isVideoHDREnabled崩溃,还没弄明白)
// do {
// try videoCaptureDevice?.lockForConfiguration()
// videoCaptureDevice?.activeFormat = (videoCaptureDevice?.formats[0])!
// videoCaptureDevice?.automaticallyAdjustsVideoHDREnabled = false
// videoCaptureDevice?.isVideoHDREnabled = true
// } catch {
//
// }
// 视频输入源
do {
videoInput = try AVCaptureDeviceInput.init(device: videoCaptureDevice!)
} catch {
}
if self.session.canAddInput(videoInput) {
self.session.addInput(videoInput)
}
}
- 设置音频的输入
// 音频的输入
func setUpAudio() {
let audioCaptureDevice = AVCaptureDevice.default(for: .audio)
do {
audioInput = try AVCaptureDeviceInput(device: audioCaptureDevice!)
} catch {
}
if session.canAddInput(audioInput) {
self.session.addInput(audioInput)
}
}
- 输出源设置 (AVCaptureFileOutput属性、AVCaptureConnection属性)
func setUpFileOut() {
fileOutput = AVCaptureMovieFileOutput()
// let captureConnection = fileOutput.connection(with: .video)
//
// if ((captureConnection?.isVideoStabilizationSupported)!) {
// // 设置防抖(崩溃也没弄明白)
// captureConnection?.preferredVideoStabilizationMode = .auto
// }
if session.canAddOutput(fileOutput) {
self.session.addOutput(fileOutput)
}
}
- 视频预览 (AVCaptureVideoPreviewLayer属性)
func setUpLayerView() {
view.addSubview(playerView)
previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
previewLayer.frame = CGRect(x: 0, y: 0, width: SCREEN_WIDTH, height: SCREEN_WIDTH)
self.playerView.layer.addSublayer(previewLayer)
}
这时候我们就完成了视频的录制页面,所以我们可以添加录制采集按钮、相机转换按钮等自定义视图,
fileprivate func setUpUI() {
videoCustomView.delegate = self
// UIView子视图与Layer层级关系(相互覆盖),通过zPosition设置优选级
videoCustomView.layer.zPosition = 3
playerView.addSubview(videoCustomView)
videoCustomView.snp.makeConstraints { (make) in
make.edges.equalTo(UIEdgeInsets(top: 0, left: 0, bottom: 0, right: 0))
}
}
实现自定义视图代理的方法
extension MovieFileOutputRecordingViewController : VideoCustomViewControlDelegate {
// 点击录制按钮
func clickPlayButton(isPlay: Bool) {
if isPlay {
writeDataTofile()
} else {
self.session.stopRunning()
self.fileOutput.stopRecording()
}
}
// 录制完成
func recordingEnd() {
self.session.stopRunning()
self.fileOutput.stopRecording()
}
// 相机转换
func switchCamera() {
session.removeInput(videoInput)
if videoInput.device.position == .back {
setUpVideo(position: .front)
} else {
setUpVideo(position: .back)
}
session.startRunning()
}
}
问题: 相机转换为前置,相机类型为builtInDualCamera会崩溃,待查证原因
- 写入数据
func writeDataTofile() {
if FileManager.default.fileExists(atPath: self.videoStoragePath()) {
do {
try FileManager.default.removeItem(atPath: self.videoStoragePath())
} catch {
}
}
let path = self.videoStoragePath()
videoUrl = URL.init(fileURLWithPath: path)
self.fileOutput.startRecording(to: videoUrl, recordingDelegate: self)
}
func videoStoragePath() -> String {
let ducment = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.documentDirectory, .userDomainMask, true).first
return ducment! + "/video.mp4"
}
注意: 写入数据时需要先移除之前路径的数据,无法自动替换,否则一直都是为首次录制的数据,录制10s的视频数据信息如下图所示
如果录制一分钟的视频,内存大约为50MB,上传到服务器压力过大,因此需要在录制完成后进行压缩
func fileOutput(_ output:AVCaptureFileOutput,didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
let exportSession = AVAssetExportSession.init(asset: AVAsset.init(url: videoUrl), presetName: AVAssetExportPresetMediumQuality)
let ducment = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.documentDirectory, .userDomainMask, true).first
let newVideoUrl = URL.init(fileURLWithPath: ducment! + "/newVideo.mp4")
do {
try FileManager.default.removeItem(atPath: ducment! + "/newVideo.mp4")
} catch {
}
// 输出URL
exportSession?.outputURL = newVideoUrl
// 转换后的格式
exportSession?.outputFileType = AVFileType.mp4
// 优化网络
exportSession?.shouldOptimizeForNetworkUse = true
// 异步导出
exportSession?.exportAsynchronously(completionHandler: {
Alamofire.upload(newVideoUrl, to: "http://m.toysplanet.cn/gateway/file/upload", method: .post, headers: nil)
let videoVC = VideoDetailViewController()
videoVC.url = newVideoUrl
self.navigationController?.pushViewController(videoVC, animated: true)
})
}
这时我们再看看压缩后的视频信息
视频大小已经压缩到1MB
扩展: 录制进度动画为CADisplayLink + UIBezierPath实现,具体实现参照Demo
由于篇幅问题,简单总结一下,以上两种方式系统都处理好了数据的存储和显示,简单实现视频的录制,不支持实时滤镜采集,在下一篇文章,会介绍AVCaptureSession+AVAssetWriter和GpuImage分别实现实时滤镜的采集
特别感谢以上朋友的技术分享