iOS录音实时抛出buffer,同时转码成WAV

最近在做一个录音插件,要求实时获取PCMBuffer,分析buffer ,并且同时转换成wav 文件

设置录音相关信息

[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryRecord error:nil];
[[AVAudioSession sharedInstance] setActive:YES error:nil];

AudioStreamBasicDescription  streamDes;
// 采样率
streamDes.mSampleRate = sampleRate;
// PCM 格式
streamDes.mFormatID = kAudioFormatLinearPCM;
streamDes.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
// 1:单声道;2:立体声
streamDes.mChannelsPerFrame = nChannel;
// 语音每采样点占用位数
streamDes.mBitsPerChannel = 16;
//每桢的bytes数
streamDes.mBytesPerFrame = (streamDes.mBitsPerChannel / 8)  * streamDes.mChannelsPerFrame;
//每个数据包下的桢数,即每个数据包里面有多少桢
streamDes.mFramesPerPacket = 1;
//每个数据包的bytes总数,每桢的bytes数*每个数据包的桢数
streamDes.mBytesPerPacket = streamDes.mBytesPerFrame * streamDes.mFramesPerPacket;

输出

AudioQueueNewInput(&streamDes, InputCallback,(__bridge void * _Nullable)(self), nil, nil, 0, &recordQueue);
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
        AudioQueueAllocateBuffer(recordQueue, buffersize * 2, rBuffer+i);
        AudioQueueEnqueueBuffer(recordQueue, rBuffer[i], 0, nil);
 }

启动录音

  AudioQueueStart(recordQueue, 0);

拿到BUffer回调

void InputCallback(
                   void * __nullable               inUserData,
                   AudioQueueRef                   inAQ,
                   AudioQueueBufferRef             inBuffer,
                   const AudioTimeStamp *          inStartTime,
                   UInt32                          inNumberPacketDescriptions,
                   const AudioStreamPacketDescription * __nullable inPacketDescs)
{
        Recorder * recorder = (__bridge Recorder*)(inUserData);
//统计buffer数量
        recorder-> bufferIndex ++;
        //写入WAV
        if (recorder->isToWAV) {
            dispatch_queue_t queue = dispatch_queue_create("WAVQueue", DISPATCH_QUEUE_SERIAL);
            dispatch_sync(queue, ^{
                [recorder PCMtoWAV:inBuffer IONumPackets:inNumberPacketDescriptions tag:recorder->bufferIndex];
                NSLog(@"Towav%@ ",[NSThread currentThread]);
            });
        }

 //抛出Buffer 进行测试
        if (recorder->isPictchDetector){
            dispatch_queue_t queue = dispatch_queue_create("PictchQueue", DISPATCH_QUEUE_SERIAL);
            dispatch_sync(queue, ^{
                [recorder pictchDetector:inBuffer->mAudioData tag:recorder->bufferIndex];
            });
        }
        NSLog(@"InputCallback  %d",recorder-> bufferIndex);
        AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, nil);
}

//pcm 转 Wav
wav 就是在PCM的基础上加上对应的文件头 本编的重点在于 根据动态来的BUffer 实时拼接在File尾部 同时修改 wav 的文件头
fseek 用来移动file的光标 fwrite 用来在对应的光标位置写入对应的数据

WavFile = fopen([fileName cStringUsingEncoding:1], "wb");
- (void)PCMtoWAV:(AudioQueueBufferRef)buffer IONumPackets:(UInt32)ionumpackets tag:(NSInteger)tag{
    wavDatalen += ionumpackets * 2;
    //wav头data
    NSData * data = [self GetWavheadData:wavDatalen+80 totalDataLen:wavDatalen longSampleRate:sampleRate  channels:nChannel byteRate:byteRate];
   
//写入头文件  光标移至头部
    fseek(WavFile, 0, 0);
    fwrite([data bytes], 44 * sizeof(Byte), 1, WavFile);
//写入Pcm data 光标移至尾部
    fseek(WavFile, 0, 2);
    fwrite(buffer->mAudioData, ionumpackets * sizeof(Byte), 2, WavFile);    
}

拼接的wav头文件 重点在于传入自己录音的配置参数与对应的data长度 生成头文件

- (NSData*)GetWavheadData:(NSInteger)totalAudioLen totalDataLen:(NSInteger)totalDataLen longSampleRate:(NSInteger)longSampleRate channels:(NSInteger)channels byteRate:(NSInteger)byteRate{
    
    Byte  header[44];
    //4byte,资源交换文件标志:RIFF
    header[0] = 'R';  // RIFF/WAVE header
    header[1] = 'I';
    header[2] = 'F';
    header[3] = 'F';
    //4byte,从下个地址到文件结尾的总字节数
    header[4] = (Byte) (totalDataLen & 0xff);  //file-size (equals file-size - 8)
    header[5] = (Byte) ((totalDataLen >> 8) & 0xff);
    header[6] = (Byte) ((totalDataLen >> 16) & 0xff);
    header[7] = (Byte) ((totalDataLen >> 24) & 0xff);
    //4byte,wav文件标志:WAVE
    header[8] = 'W';  // Mark it as type "WAVE"
    header[9] = 'A';
    header[10] = 'V';
    header[11] = 'E';
    //4byte,波形文件标志:FMT(最后一位空格符)
    header[12] = 'f';  // Mark the format section 'fmt ' chunk
    header[13] = 'm';
    header[14] = 't';
    header[15] = ' ';
    //4byte,音频属性
    header[16] = 16;   // 4 bytes: size of 'fmt ' chunk, Length of format data.  Always 16
    header[17] = 0;
    header[18] = 0;
    header[19] = 0;
    //2byte,格式种类(1-线性pcm-WAVE_FORMAT_PCM,WAVEFORMAT_ADPCM)
    header[20] = 1;  // format = 1 ,Wave type PCM
    header[21] = 0;
    //2byte,通道数
    header[22] = (Byte) channels;  // channels
    header[23] = 0;
    //4byte,采样率
    header[24] = (Byte) (longSampleRate & 0xff);
    header[25] = (Byte) ((longSampleRate >> 8) & 0xff);
    header[26] = (Byte) ((longSampleRate >> 16) & 0xff);
    header[27] = (Byte) ((longSampleRate >> 24) & 0xff);
    //4byte 传输速率,Byte率=采样频率*音频通道数*每次采样得到的样本位数/8,00005622H,也就是22050Byte/s=11025*1*16/8。
    header[28] = (Byte) (byteRate & 0xff);
    header[29] = (Byte) ((byteRate >> 8) & 0xff);
    header[30] = (Byte) ((byteRate >> 16) & 0xff);
    header[31] = (Byte) ((byteRate >> 24) & 0xff);
    //2byte   一个采样多声道数据块大小,块对齐=通道数*每次采样得到的样本位数/8,0002H,也就是2=1*16/8
    header[32] = (Byte) (channels * 16 / 8);
    header[33] = 0;
    //2byte,采样精度-PCM位宽
    header[34] = 16; // bits per sample
    header[35] = 0;
    //4byte,数据标志:data
    header[36] = 'd'; //"data" marker
    header[37] = 'a';
    header[38] = 't';
    header[39] = 'a';
    //4byte,从下个地址到文件结尾的总字节数,即除了wav header以外的pcm data length(纯音频数据)
    header[40] = (Byte) (totalAudioLen & 0xff);  //data-size (equals file-size - 44).
    header[41] = (Byte) ((totalAudioLen >> 8) & 0xff);
    header[42] = (Byte) ((totalAudioLen >> 16) & 0xff);
    header[43] = (Byte) ((totalAudioLen >> 24) & 0xff);
    
    return [[NSData alloc] initWithBytes:header length:44];;
}

你可能感兴趣的:(iOS录音实时抛出buffer,同时转码成WAV)