//
// AudioRecordManager.h
// Demo
//
// Created by tao on 2020/7/8.
// Copyright © 2020 idst. All rights reserved.
//
#import
#import
@protocol AudioRecorderManagerDelegate
/**
* @discuss Recorder启动回调,在主线程中调用
*/
-(void)recorderDidStart;
/**
* @discuss Recorde停止回调,在主线程中调用
*/
-(void)recorderDidStop;
/**
* @discuss Recorder收录到数据,通常涉及VAD及压缩等操作,为了避免阻塞主线,因此将在在AudioQueue的线程中调用,注意线程安全!!!
*/
-(void)voiceRecorded:(NSData *_Nonnull) frame;
/**
* @discuss Recorder录制录音时返回音量大小
*/
-(void)voiceVolume:(NSInteger)volume;
@end
NS_ASSUME_NONNULL_BEGIN
@interface AudioRecordManager : NSObject
@property (nonatomic, assign) id delegate;
@property (nonatomic, assign) BOOL isRecording;
+ (instancetype)sharedManager;
- (void)start;
- (void)stop;
@end
NS_ASSUME_NONNULL_END
//
// AudioRecordManager.m
// Demo
//
// Created by tao on 2020/7/8.
// Copyright © 2020 idst. All rights reserved.
//
#import "AudioRecordManager.h"
#define BufferSeconds 0.02 // 20ms
@interface AudioRecordManager () {
AudioQueueRef _audioQRef; //音频队列对象指针
AudioStreamBasicDescription _audioRecordFormat; //音频流配置
AudioQueueBufferRef _audioQBufferRefs[3]; //音频流缓冲区对象
}
@property (nonatomic, assign) AudioFileID recordFileID; //音频文件标识
@property (nonatomic, assign) SInt64 recordPacket; //录音文件的当前包
@property (nonatomic, copy) NSString *originAudioSessionCategory; //原本的category
@property (nonatomic, strong) NSMutableData *bufferedAudioData; //缓冲数据
@end
@implementation AudioRecordManager
/*!
@discussion
AudioQueue 音频录制回调函数
@param inAQ
回调函数的音频队列.
@param inBuffer
是一个被音频队列填充新的音频数据的音频队列缓冲区,它包含了回调函数写入文件所需要的新数据.
@param inStartTime
是缓冲区中的一采样的参考时间
@param inNumberPacketDescriptions
参数中包描述符(packet descriptions)的数量,如果你正在录制一个VBR(可变比特率(variable bitrate))格式, 音频队列将会提供这个参数给你的回调函数,这个参数可以让你传递给AudioFileWritePackets函数. CBR (常量比特率(constant bitrate)) 格式不使用包描述符。对于CBR录制,音频队列会设置这个参数并且将inPacketDescs这个参数设置为NULL
*/
static void inputAudioQueueBufferHandler(void * __nullable inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * __nullable inPacketDescs) {
// AudioRecordManager *manager = (__bridge AudioRecordManager*) inUserData;
AudioRecordManager *manager = [AudioRecordManager sharedManager];
if (manager.isRecording) {
// 写入文件
if (inNumberPacketDescriptions > 0) {
AudioFileWritePackets(manager.recordFileID, FALSE, inBuffer->mAudioDataByteSize, inPacketDescs, manager.recordPacket, &inNumberPacketDescriptions, inBuffer->mAudioData);
manager.recordPacket += inNumberPacketDescriptions;
}
NSData *data = [manager bufferPCMData:inBuffer];
if (data) {
[manager handleAudioData:data];
}
//将缓冲器重新放入缓冲队列,以便重复使用该缓冲器
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
} else {
//丢弃末尾的数据
}
}
+ (instancetype)sharedManager {
static AudioRecordManager *manager = nil;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
manager = [[AudioRecordManager alloc] init];
});
return manager;
}
- (instancetype)init {
if (self = [super init]) {
// register for app resign/active notifications for recorder state
[self registerForBackgroundNotifications];
[self initAudioFormat];
}
return self;
}
- (void)initAudioFormat {
_audioRecordFormat.mFormatID = kAudioFormatLinearPCM;//编码格式
_audioRecordFormat.mSampleRate = 16000;//采样率
_audioRecordFormat.mChannelsPerFrame = 1;//声道数量
_audioRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
_audioRecordFormat.mBitsPerChannel = 16;//每采样点占用位数
_audioRecordFormat.mBytesPerPacket = (_audioRecordFormat.mBitsPerChannel >> 3) * _audioRecordFormat.mChannelsPerFrame;//每包的字节数
_audioRecordFormat.mBytesPerFrame = _audioRecordFormat.mBytesPerPacket;//每帧的字节数
_audioRecordFormat.mFramesPerPacket = 1;//每包的帧数
}
- (void)initAudioQueue {
OSStatus status = AudioQueueNewInput(&_audioRecordFormat, inputAudioQueueBufferHandler, (__bridge void * _Nullable)(self), NULL, NULL, 0, &_audioQRef);
if (status != noErr) {
NSLog(@"⚠️=== 初始化失败");
return;
}
//计算缓存区大小 + 创建缓冲器
int frames = [self computeRecordBufferSize:&_audioRecordFormat seconds: BufferSeconds];
int bufferByteSize = frames * _audioRecordFormat.mBytesPerFrame;
for (int i = 0; i < 3; i++) {
AudioQueueAllocateBuffer(_audioQRef, bufferByteSize, &_audioQBufferRefs[i]);
AudioQueueEnqueueBuffer(_audioQRef, _audioQBufferRefs[i], 0, NULL);
}
AudioQueueSetParameter(_audioQRef, kAudioQueueParam_Volume, 1.0f);
}
- (void)initFilePath {
NSString *path = [NSTemporaryDirectory() stringByAppendingPathComponent:@"test.wav"];
CFURLRef url = CFURLCreateWithString(kCFAllocatorDefault, (CFStringRef)path, NULL);
AudioFileCreateWithURL(url, kAudioFileCAFType, &_audioRecordFormat, kAudioFileFlags_EraseFile, &_recordFileID);
CFRelease(url);
}
- (void)start {
if ([[AVAudioSession sharedInstance] respondsToSelector:@selector(requestRecordPermission:)]) {
[[AVAudioSession sharedInstance] performSelector:@selector(requestRecordPermission:) withObject:^(BOOL allow){
if(allow) {
[self startRecord];
}else{
NSLog(@"====> 未开启录音权限!!! ");
}
}];
} else {
[self startRecord];
}
}
- (void)stop {
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
[self stopRecord];
[[AVAudioSession sharedInstance] setCategory:self.originAudioSessionCategory error:nil];
[[AVAudioSession sharedInstance] setActive:NO error:nil];
self.bufferedAudioData = nil;
if(self.delegate && [self.delegate respondsToSelector:@selector(recorderDidStop)]){
dispatch_async(dispatch_get_main_queue(), ^{
[self.delegate recorderDidStop];
});
}
});
}
- (void)startRecord {
_recordPacket = 0;
[self initFilePath];
[self initAudioQueue];
//当有音频设备(比如播放音乐)导致改变时 需要配置
self.originAudioSessionCategory = [[AVAudioSession sharedInstance] category];
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
[[AVAudioSession sharedInstance] setActive:YES error:nil];
OSStatus status = AudioQueueStart(_audioQRef, NULL);
if (status != noErr) {
NSLog(@"⚠️=== 录制出错");
return;
}
self.isRecording = YES;
self.bufferedAudioData = [NSMutableData data];
if(self.delegate && [self.delegate respondsToSelector:@selector(recorderDidStart)]){
[self.delegate recorderDidStart];
}
//更新音量信息需设置此项
UInt32 val = 1;
AudioQueueSetProperty(_audioQRef, kAudioQueueProperty_EnableLevelMetering, &val, sizeof(UInt32));
}
- (void)stopRecord {
if (self.isRecording) {
self.isRecording = NO;
AudioQueueStop(_audioQRef, true);
AudioQueueDispose(_audioQRef, TRUE);
AudioFileClose(self.recordFileID);
}
}
// 音量大小
- (float)getCurrentAudioPower {
float channelAvg = 0;
UInt32 dataSize = sizeof(AudioQueueLevelMeterState) * _audioRecordFormat.mChannelsPerFrame;
AudioQueueLevelMeterState *levelMeter = (AudioQueueLevelMeterState *)malloc(dataSize);
//kAudioQueueProperty_EnableLevelMetering的getter
OSStatus status = AudioQueueGetProperty(_audioQRef, kAudioQueueProperty_CurrentLevelMeter, levelMeter, &dataSize);
if (status == noErr) {
for (int i = 0; i < _audioRecordFormat.mChannelsPerFrame; i++) {
channelAvg += levelMeter[i].mPeakPower; //取个平均值
}
}
free(levelMeter);
return channelAvg;
}
/*
* AudioQueue 返回的 frame长度不确定,这里做一个缓冲,确保满了以后,返回。
* 640 bytes = 320 frames/16bit = 20ms
*/
- (NSData *)bufferPCMData:(AudioQueueBufferRef)audioQBufferRef {
int frames = [self computeRecordBufferSize:&_audioRecordFormat seconds: BufferSeconds];
int bfSize = frames * _audioRecordFormat.mBytesPerFrame;
NSInteger nBufferSpaceLeft = bfSize - self.bufferedAudioData.length;
NSInteger nBytesReceived = audioQBufferRef->mAudioDataByteSize;
NSInteger nBytesToCopy = nBufferSpaceLeft >= nBytesReceived ? nBytesReceived : nBufferSpaceLeft;
NSInteger nBytesLeft = nBytesReceived - nBytesToCopy;
[self.bufferedAudioData appendBytes:audioQBufferRef->mAudioData length:nBytesToCopy];
if (self.bufferedAudioData.length == bfSize){
// buffer is full
NSData *frame = [NSData dataWithData:self.bufferedAudioData];
// reset the buffer
self.bufferedAudioData.length = 0;
// save the left partial data
if(nBytesLeft > 0){
[self.bufferedAudioData appendBytes:(audioQBufferRef->mAudioData + nBytesToCopy) length:nBytesLeft];
}
return frame;
}else{
// DBG(@"Buffering, %@ of %u received",@(nBytesReceived),PCM_FRAME_BYTE_SIZE);
}
return nil;
}
/*
* FIXME - 在回调中,可能涉及了耗时的压缩操作,是否要在主线程中执行?
*/
- (void)handleAudioData:(NSData *)audioData {
float audioPower = [self getCurrentAudioPower];
if (self.delegate && [self.delegate respondsToSelector:@selector(voiceVolume:)]) {
[self.delegate voiceVolume:audioPower * 1000];
}
if (self.delegate && [self.delegate respondsToSelector:@selector(voiceRecorded:)]) {
[self.delegate voiceRecorded:audioData];
}
}
- (int)computeRecordBufferSize:(const AudioStreamBasicDescription*)format seconds:(float)seconds {
int packets, frames, bytes = 0;
frames = (int)ceil(seconds * format->mSampleRate);
if (format->mBytesPerFrame > 0) {
bytes = frames * format->mBytesPerFrame;
} else {
UInt32 maxPacketSize = 0;
if (format->mBytesPerPacket > 0) {
maxPacketSize = format->mBytesPerPacket; // constant packet size
}
if (format->mFramesPerPacket > 0) {
packets = frames / format->mFramesPerPacket;
} else {
packets = frames; // worst-case scenario: 1 frame in a packet
}
if (packets == 0) { // sanity check
packets = 1;
}
bytes = packets * maxPacketSize;
}
return bytes;
}
- (void)dealloc {
[self unregisterForBackgroundNotifications];
AudioQueueStop(_audioQRef, true);
AudioQueueDispose(_audioQRef, TRUE);
AudioFileClose(self.recordFileID);
}
#pragma mark - Background Notifications
- (void)registerForBackgroundNotifications {
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(appResignActive)
name:NSExtensionHostWillResignActiveNotification
object:nil];
[[NSNotificationCenter defaultCenter] addObserver:self
selector:@selector(appEnterForeground)
name:NSExtensionHostWillEnterForegroundNotification
object:nil];
}
- (void)unregisterForBackgroundNotifications {
[[NSNotificationCenter defaultCenter] removeObserver:self];
}
- (void)appResignActive {
}
- (void)appEnterForeground {
}
@end
参考:
1、
利用AudioQueue做音频采集编码和播放(附完整demo)
=====> 代码
2、
iOS 使用AudioQueue进行录音
iOS 使用AudioQueue进行音频播放
=====> 代码