在iOS中,AVCaptureSession、AudioUnit、AudioQueue均可以实现实时录制音频。
其中,AVCaptureSession采集出来的PCM采样率默认为44100Hz,如果想要自定义采样率又不想重采样的话,可以使用AudioUnit和AudioQueue实现。
比如我需要采集编码8k16位的G711,那么采集时就需要设置8000Hz的采样率
code如下:
#import
@protocol AQCaptureDelegate
- (void)returnData:(NSMutableData *)data;
@end
@interface AQCapture : NSObject
@property (nonatomic,strong) iddelegate;
- (void)startRecord;
- (void)stopRecord;
@end
#import "AQCapture.h"
#import
#import
#define QUEUE_BUFFER_SIZE 3 //输出音频队列缓冲个数
#define kDefaultBufferDurationSeconds 0.06 //调整这个值使得录音的缓冲区大小为960,实际会小于或等于960,需要处理小于960的情况
#define kDefaultSampleRate 8000 //定义采样率为8000
static BOOL isRecording = NO;
@interface AQCapture(){
AudioQueueRef _audioQueue; //输出音频播放队列
AudioStreamBasicDescription _recordFormat;
AudioQueueBufferRef _audioBuffers[QUEUE_BUFFER_SIZE]; //输出音频缓存
}
@property (nonatomic, assign) BOOL isRecording;
@end
@implementation AQCapture
- (instancetype)init
{
self = [super init];
if (self) {
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];//PlayAndRecord
[[AVAudioSession sharedInstance] setActive:YES error:nil];//如果录音时同时需要播放媒体,那么必须加上这两行代码
memset(&_recordFormat, 0, sizeof(_recordFormat));
_recordFormat.mSampleRate = kDefaultSampleRate;
_recordFormat.mChannelsPerFrame = 1;
_recordFormat.mFormatID = kAudioFormatLinearPCM;
_recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
_recordFormat.mBitsPerChannel = 16;
_recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame;
_recordFormat.mFramesPerPacket = 1;
//初始化音频输入队列
AudioQueueNewInput(&_recordFormat, inputBufferHandler, (__bridge void *)(self), NULL, NULL, 0, &_audioQueue);
//计算估算的缓存区大小
int frames = (int)ceil(kDefaultBufferDurationSeconds * _recordFormat.mSampleRate);
int bufferByteSize = frames * _recordFormat.mBytesPerFrame;
// NSLog(@"缓存区大小%d",bufferByteSize);
//创建缓冲器
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++){
AudioQueueAllocateBuffer(_audioQueue, bufferByteSize, &_audioBuffers[i]);
AudioQueueEnqueueBuffer(_audioQueue, _audioBuffers[i], 0, NULL);
}
}
return self;
}
void inputBufferHandler(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime,UInt32 inNumPackets, const AudioStreamPacketDescription *inPacketDesc)
{
if (inNumPackets > 0) {
AQCapture *recorder = (__bridge AQCapture*)inUserData;
[recorder processAudioBuffer:inBuffer withQueue:inAQ];
}
if (isRecording) {
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
}
- (void)processAudioBuffer:(AudioQueueBufferRef)audioQueueBufferRef withQueue:(AudioQueueRef)audioQueueRef
{
NSMutableData *data = [NSMutableData dataWithBytes:audioQueueBufferRef->mAudioData length:audioQueueBufferRef->mAudioDataByteSize];
if (data.length < 960) { //处理长度小于960的情况,此处是补00
Byte byte[] = {0x00};
NSData *zeroData = [[NSData alloc] initWithBytes:byte length:1];
for (NSUInteger i = data.length; i < 960; i++) {
[data appendData:zeroData];
}
}
// NSLog(@"%@",data);
[self.delegate returnData:data];
}
- (void)startRecord
{
// 开始录音
AudioQueueStart(_audioQueue, NULL);
isRecording = YES;
}
- (void)stopRecord
{
if (isRecording)
{
isRecording = NO;
//停止录音队列和移除缓冲区,以及关闭session,这里无需考虑成功与否
AudioQueueStop(_audioQueue, true);
//移除缓冲区,true代表立即结束录制,false代表将缓冲区处理完再结束
AudioQueueDispose(_audioQueue, true);
}
// NSLog(@"停止录制");
}
- (void)dealloc {
_audioQueue = nil;
}
@end