iOS 音频录制

iOS实现音频录制常用的三种方式

录制之前应对AVAudioSeeion进行设置

AVAudioSession *audioSession = [AVAudioSession sharedInstance];
[audioSession setCategory:AVAudioSessionCategoryRecord error:nil]; //设置为录音模式, 如果需要可以对采样率sampleRate,缓冲时长ioBufferDuration等进行设置
[audioSession setActive:YES error:nil];

1、AVAudioRecorder:

    AVAudioRecorder的使用相对比较简单
NSError *error;
NSDictionary *settings = [NSDictionary dictionaryWithObjectsAndKeys:
                                        [NSNumber numberWithInt:kAudioFormatLinearPCM], AVFormatIDKey,
                                        [NSNumber numberWithFloat:8000], AVSampleRateKey,
                                        [NSNumber numberWithInt:2], AVNumberOfChannelsKey,
                                        [NSNumber numberWithInt:16], AVLinearPCMBitDepthKey,
                                        [NSNumber numberWithBool:NO], AVLinearPCMIsNonInterleaved,
                                        [NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
                                        [NSNumber numberWithBool:NO], AVLinearPCMIsBigEndianKey,nil];
AVAudioRecorder *recorder = [[AVAudioRecorder alloc] initWithURL:[NSURL fileURLWithPath:filePath] settings:settings error:&error];
if (error) {
    查看具体原因
}
recorder.delegate = self;
if ([recorder prepareToRecord]) {
    [recorder record];
}

这样就实现简单的音频录制。当然AVAudioRecorder还提供了暂停,继续等功能,代理中有录制完成、出错等回掉信息。

2、AVCaptureSession:

_captureSession = [[AVCaptureSession alloc] init]; 
AVCaptureDevice *devices = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
NSError *error;
_deviceInput = [[AVCaptureDeviceInput alloc] initWithDevice:devices error:&error]; 
if ([_captureSession canAddInput: _deviceInput]) {   // 添加输入设备
    [_captureSession addInput:_deviceInput];
}

利用AVCaptureAudioFileOutput自动将音频写入文件

_audioFileOutput = [[AVCaptureAudioFileOutput alloc] init]; 
if ([_captureSession canAddOutput:_audioFileOutput]) {
    [_captureSession addOutput:_audioFileOutput];
}
[self.audioFileOutput setAudioSettings:settings];
[self.audioFileOutput startRecordingToOutputFileURL:[NSURL fileURLWithPath:filePath] outputFileType:AVFileTypeCoreAudioFormat recordingDelegate:self];

#pragma mark - 代理 - 必须实现
- (void)captureOutput:(AVCaptureFileOutput *)output didFinishRecordingToOutputFileAtURL:(NSURL *)outputFileURL fromConnections:(NSArray *)connections error:(nullable NSError *)error {
    //  这里是录制完成的回掉, 在这里可以进行文件移动、转码等操作。
}

利用AVAssetWriter将录制的音频流写入文件

_audioFileOutput = [[AVCaptureAudioDataOutput alloc] init]; //获取音频流,自己进行写入
[_audioFileOutput setSampleBufferDelegate:self queue:_audioOutQueue];
if ([_captureSession canAddOutput:_audioFileOutput]) {
    [_captureSession addOutput:_audioFileOutput];
}
NSError *error;
_asset = [[AVAssetWriter alloc] initWithURL:[NSURL fileURLWithPath:filePath] fileType:AVFileTypeCoreAudioFormat error:&error];
_assetWriteInput = [[AVAssetWriterInput alloc] initWithMediaType:AVMediaTypeAudio outputSettings:settings];
_assetWriteInput.expectsMediaDataInRealTime = YES;
if ([_asset canAddInput:_assetWriteInput]) {
    [_asset addInput:_assetWriteInput];
}
#pragma mark - 数据回掉
- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
    if (_asset.status != AVAssetWriterStatusWriting) {
        CMTime lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
        if (![_asset startWriting]) {
            NSLog(@"%@", _asset.error.localizedDescription);
        }
        [_asset startSessionAtSourceTime:lastSampleTime];
    }
    if (_asset.status == AVAssetWriterStatusWriting) {
        [self.assetWriteInput appendSampleBuffer:sampleBuffer];
    }
}

3、AudioUnit:

static void CheckStatus(OSStatus status, NSString *errMsg, BOOL fatal) { //操作结果判断
    if (status != noErr) {
        char fourcc[16];
        *fourcc = CFSwapInt32HostToBig(status);
        fourcc[4] = '\0';
        if (isprint(fourcc[0]) && isprint(fourcc[1]) && isprint(fourcc[2]) && isprint(fourcc[3])) {
            NSLog(@"%@ : %s", errMsg, fourcc);
        } else {
            NSLog(@"%@ : %d", errMsg, (int)status);
        }
        if (fatal) {
            exit(-1);
        }
    }
}

AudioUnit初始化

AudioComponentDescription description;
description.componentType = kAudioUnitType_Output; //录音一般选择Output, 除此之外还有Effect、FormatConverter等多种对音频进行解码、混音、变身等操作
description.componentSubType = kAudioUnitSubType_RemoteIO;//type对应的子类型
description.componentFlags = 0;
description.componentFlagsMask = 0;
description.componentManufacturer = kAudioUnitManufacturer_Apple;//厂商
    
_ioUnitRef = AudioComponentFindNext(NULL, &description);
CheckStatus(AudioComponentInstanceNew(_ioUnitRef, &_audioUnit), @"初始化音频单元失败", NO);

AudioUnit配置

UInt32 flag = 1;
    CheckStatus(AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, ElementOne, &flag, sizeof(flag)), @"打开麦克风成功", NO);
/**
     AudioStreamBasicDescription: 这里描述与前两种录制方式中的settings大多相同
     mSampleRate;       采样率, eg. 44100
     mFormatID;         格式, eg. kAudioFormatLinearPCM
     mFormatFlags;      标签格式, eg. kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked
     mBytesPerPacket;   每个Packet的Bytes数量
     mFramesPerPacket;  每个Packet的帧数量
     mBytesPerFrame;    每帧的Byte数
     mChannelsPerFrame; 1:单声道;2:立体声
     mBitsPerChannel;   语音每采样点占用位数[8/16/24/32]
     mReserved;         保留
     */
    _streamDescription.mFormatID = kAudioFormatLinearPCM;
    _streamDescription.mSampleRate = 44100.00;
    _streamDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved;
    _streamDescription.mChannelsPerFrame = 1;
    _streamDescription.mFramesPerPacket = 1;
    _streamDescription.mBitsPerChannel = 16;
    _streamDescription.mBytesPerFrame = 2;
    _streamDescription.mBytesPerPacket = 2;
    
    CheckStatus(AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, ElementOne, &_streamDescription, sizeof(_streamDescription)), @"设置输出音频参数成功", NO);

    AURenderCallbackStruct renderCallbackStruct;
    renderCallbackStruct.inputProc = recordingCallback;
    renderCallbackStruct.inputProcRefCon = (__bridge void *)self;
    CheckStatus(AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, ElementOne, &renderCallbackStruct, sizeof(renderCallbackStruct)), @"设置录音回掉", NO);
  
    NSString *destinationFilePath = [[NSHomeDirectory() stringByAppendingPathComponent:@"Documents"] stringByAppendingPathComponent:@"test.caf"];
    CFURLRef urlRef = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
    CheckStatus(ExtAudioFileCreateWithURL(urlRef, kAudioFileCAFType, &_streamDescription, NULL, kAudioFileFlags_EraseFile, &_audioFileRef), @"创建文件成功", NO);
    CFRelease(urlRef);
static OSStatus recordingCallback(void *inRefCon,
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp,
                                  UInt32 inBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList *ioData) {
    ViewController *controller = (__bridge ViewController *)inRefCon;
    
    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0].mNumberChannels = 1;
    bufferList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; //缓冲区大小
    bufferList.mBuffers[0].mData = malloc(inNumberFrames * 2);
    
    CheckStatus(AudioUnitRender(controller->_audioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &(bufferList)), @"获取数据成", NO);
    CheckStatus(ExtAudioFileWrite(controller->_audioFileRef, inNumberFrames, &bufferList), @"写入文件成功", NO);
    return noErr;
}
AudioOutputUnitStart(_audioUnit);

你可能感兴趣的:(音视频,iOS)