FFmpeg学习之开发Mac播放器(八):使用AudioUnit播放AV_SAMPLE_FMT_FLTP格式数据

使用FFmpeg解码的PCM音频数据是以一定格式存放的,包含在codec_ctx->sample_fmt中。使用AudioUnit可以直接播放FFmpeg中AV_SAMPLE_FMT_S16、AV_SAMPLE_FMT_S16P、AV_SAMPLE_FMT_FLT和AV_SAMPLE_FMT_FLTP格式的PCM数据。

//通过AUGraph来创建AudioUnit
- (OSStatus)setupAudioUnitWithStreamDescription:(AudioStreamBasicDescription)streamDescription {
    //iOS系统下需要先设置AVAudioSession,MacOS下不需要
    OSStatus status = NewAUGraph(&_graph);
    if (status != noErr) {
        NSLog(@"Can not create new graph");
        return status;
    }

    AudioComponentDescription description;
    bzero(&description, sizeof(description));
    description.componentType = kAudioUnitType_Output;
    //kAudioUnitSubType_HALOutput这个子类型是MacOS系统,iOS应该使用kAudioUnitSubType_RemoteIO
    description.componentSubType = kAudioUnitSubType_HALOutput;
    description.componentManufacturer = kAudioUnitManufacturer_Apple;

    status = AUGraphAddNode(_graph, &description, &_node);
    if (status != noErr) {
        NSLog(@"Can not add node");
        return status;
    }

    status = AUGraphOpen(_graph);
    if (status != noErr) {
        NSLog(@"Can not open graph");
        return status;
    }

    status = AUGraphNodeInfo(_graph, _node, NULL, &_unit);
    if (status != noErr) {
        NSLog(@"Can not get node info");
        return status;
    }
    //通过AudioStreamBasicDescription设置输入数据的格式
    status = AudioUnitSetProperty(_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamDescription, sizeof(streamDescription));
    if (status != noErr) {
        NSLog(@"Can not set stream format on unit input scope");
        return status;
    }
    //设置填充数据的回调
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = &InputRenderCallback;
    callbackStruct.inputProcRefCon = (__bridge void *)self;
    status = AudioUnitSetProperty(_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct));
    if (status != noErr) {
        NSLog(@"Fail to set render callback");
        return status;
    }

    status = AUGraphInitialize(_graph);
    if (status != noErr) {
        NSLog(@"Can not initialize graph");
        return status;
    }

    return status;
}
    设置AV_SAMPLE_FMT_FLTP格式数据需要的AudioStreamBasicDescription
    AudioStreamBasicDescription streamDescription;
    bzero(&streamDescription, sizeof(streamDescription));
    streamDescription.mFormatID = kAudioFormatLinearPCM;
    /*
    AV_SAMPLE_FMT_S16   kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked
    AV_SAMPLE_FMT_S16P  kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved
    AV_SAMPLE_FMT_FLT  kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked
    AV_SAMPLE_FMT_FLTP  kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved
    */
    streamDescription.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
    streamDescription.mSampleRate = 44100.0;
    streamDescription.mChannelsPerFrame = codec_ctx->channels;
    streamDescription.mFramesPerPacket = 1;
    //Float类型占4个字节,32比特,SignedInteger类型占2个字节,16比特
    streamDescription.mBitsPerChannel = 32;
    //如果是左右声道分开存储是字节数,左右声道交叉存储是字节数X2
    streamDescription.mBytesPerFrame = 4;
    streamDescription.mBytesPerPacket = 4;
//解码音频数据写入到文件中
- (void)decodeAudioData {
    AVPacket packet;
    av_init_packet(&packet);
    while ((av_read_frame(ifmt_ctx, &packet)) >= 0) {
        if (packet.stream_index == audio_stream_index) {
            int ret = avcodec_send_packet(codec_ctx, &packet);
            while (ret >= 0) {
                AVFrame * frame = av_frame_alloc();
                ret = avcodec_receive_frame(codec_ctx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                    break;
                } else if (ret < 0) {
                    NSLog(@"Error during ecoding");
                    break;
                }
                int data_size = av_samples_get_buffer_size(frame->linesize, 1, frame->nb_samples, AV_SAMPLE_FMT_FLTP, 0);
                fwrite(frame->data[0], 1, data_size, file1);
                fwrite(frame->data[1], 1, data_size, file2);
                av_frame_free(&frame);
            }
        }
    }
    if (file1) {
        fseek(file1, 0, SEEK_SET);
    }
    if (file2) {
        fseek(file2, 0, SEEK_SET);
    }
}
//填充音频数据的回调
- (OSStatus)renderData:(AudioBufferList *)ioData atTimeStamp:(const AudioTimeStamp *)timeStamp forElement:(UInt32)element numberFrames:(UInt32)numFrames flags:(AudioUnitRenderActionFlags *)flags {
    for (int iBuffer = 0; iBuffer < ioData->mNumberBuffers; iBuffer++) {
        memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize);
    }
    FILE * files[] = {file1, file2};
    for (int iBuffer = 0; iBuffer < ioData->mNumberBuffers; iBuffer++) {
        //这里我偷懒直接把左右声道的数据分别写入到两个文件中,在这里从文件中读取mDataByteSize个数据,FFmpeg解码出的数据大小和mDataByteSize可能相同需要做处理
        fread(ioData->mBuffers[iBuffer].mData, ioData->mBuffers[iBuffer].mDataByteSize, 1, files[iBuffer]);
    }
    return noErr;
}

static OSStatus InputRenderCallback(void * inRefCon, AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp * inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) {
    ViewController * viewController = (__bridge ViewController *)inRefCon;
    return [viewController renderData:ioData atTimeStamp:inTimeStamp forElement:inBusNumber numberFrames:inNumberFrames flags:ioActionFlags];
}

我在Demo中把PCM数据用AVFilter分别转成了AV_SAMPLE_FMT_S16、AV_SAMPLE_FMT_S16P、AV_SAMPLE_FMT_FLT和AV_SAMPLE_FMT_FLTP用AudioUnit进行播放
github

你可能感兴趣的:(FFmpeg学习之开发Mac播放器(八):使用AudioUnit播放AV_SAMPLE_FMT_FLTP格式数据)