ffmpeg实现音频播放

1.Audio track方式 

public native void playAudio(String path);

extern "C"
JNIEXPORT void JNICALL
Java_com_example_ffmpeg_MainActivity_playAudio(JNIEnv *env, jobject instance, jstring audioPath) {
    // TODO: implement playAudio()
    LOGE(LOG_TAG,"playAudio");
    const char* path = env->GetStringUTFChars(audioPath,0);
    AVFormatContext* pFormatContext = avformat_alloc_context();
    if(avformat_open_input(&pFormatContext,path,NULL,NULL)!=0){
        LOGE("%s","不能打开文件");
        return;
    }
    if(avformat_find_stream_info(pFormatContext,NULL) < 0){
        LOGE("%s","没有找到流文件");
        return;
    }
    av_dump_format(pFormatContext,0,path,0);
    int audio_index = -1;
    for(int i = 0 ; i < pFormatContext->nb_streams; i ++){
        if(pFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){
            audio_index = i;
            break;
        }
    }
    if(audio_index == -1){
        LOGE("%s","没有找到音频流");
        return;
    }
    AVCodecParameters* pCodecParams = pFormatContext->streams[audio_index]->codecpar;
    AVCodecID  pCodecId = pCodecParams->codec_id;
    if(pCodecId == NULL){
        LOGE("%s","CodecID == null");
        return;
    }
    AVCodec* pCodec = const_cast(avcodec_find_decoder(pCodecId));
    if(pCodec == NULL){
        LOGE("%s","没有找到解码器");
        return;
    }
    AVCodecContext* pCodecContext = avcodec_alloc_context3(pCodec);
    if(pCodecContext == NULL){
        LOGE("%s","不能为CodecContext分配内存");
        return;
    }
    if(avcodec_parameters_to_context(pCodecContext,pCodecParams)<0){
        LOGE("%s","创建codecContext失败");
        return;
    }
    if(avcodec_open2(pCodecContext,pCodec,NULL) <0 ){
        LOGE("%s","打开解码器失败");
        return;
    }
    AVPacket *avp = av_packet_alloc();
    AVFrame *avf = av_frame_alloc();

    //frame->16bit 44100 PCM 统一音频采样格式与采样率
    SwrContext* swr_cxt = swr_alloc();
    //重采样设置选项-----------------------------------------------------------start
    //输入的采样格式
    enum AVSampleFormat in_sample_fmt = pCodecContext->sample_fmt;
    //输出的采样格式
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
    //输入的采样率
    int in_sample_rate = pCodecContext->sample_rate;
    printf("sample rate = %d \n" ,in_sample_rate);
    //输出的采样率
    int out_sample_rate = 44100;
    //输入的声道布局
    uint64_t in_ch_layout = pCodecContext->channel_layout;
    //输出的声道布局
    uint64_t out_ch_layout = AV_CH_LAYOUT_MONO;
    //SwrContext 设置参数
    swr_alloc_set_opts(swr_cxt,out_ch_layout,out_sample_fmt,out_sample_rate,in_ch_layout,in_sample_fmt,in_sample_rate,0,NULL);
    //初始化SwrContext
    swr_init(swr_cxt);
    //重采样设置选项-----------------------------------------------------------end
    //获取输出的声道个数
    int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);
    jclass clazz = env->GetObjectClass(instance);
    //调用Java方法MethodID
    jmethodID methodId = env->GetMethodID(clazz,"createTrack","(II)V");
    jmethodID methodID1 = env->GetMethodID(clazz,"playTrack","([BI)V");
    //通过methodId调用Java方法
    env->CallVoidMethod(instance,methodId,44100,out_channel_nb);
    //存储pcm数据
    uint8_t *out_buf = (uint8_t*)av_malloc(2*44100);
    int got_frame, frame_count = 0;
    //6.一帧一帧读取压缩的音频数据AVPacket
    int ret;
    while(av_read_frame(pFormatContext,avp) >= 0){
        LOGD(LOG_TAG,"while");
        if(avp->stream_index == audio_index){
            LOGD(LOG_TAG,"avp->stream_index");
            //解码从avpacket到avframe
            ret = avcodec_send_packet(pCodecContext, avp);
            if(ret < 0){
                av_log(NULL,AV_LOG_INFO,"解码完成  \n");
            }
            usleep(500);
                LOGD(LOG_TAG,"got_frame != 0");
                LOGE(LOG_TAG,"正在解码第%d帧  \n",++frame_count);
                avcodec_receive_frame(pCodecContext, avf);
                swr_convert(swr_cxt , &out_buf , 2 * 44100 , (const uint8_t **)avf->data , avf->nb_samples);
                //获取sample的size
                int out_buf_size = av_samples_get_buffer_size(NULL,out_channel_nb,avf->nb_samples,out_sample_fmt,1);
                jbyteArray audioArray = env->NewByteArray(out_buf_size);
                env->SetByteArrayRegion(audioArray,0,out_buf_size,(const jbyte*)out_buf);
                //调用Java方法 把解码后的数据(audioArray)给AudioTrack进行播放
                env->CallVoidMethod(instance,methodID1,audioArray,out_buf_size);
                env->DeleteLocalRef(audioArray);
//            }
        }
        av_packet_unref(avp);
    }
    av_frame_free(&avf);
    swr_free(&swr_cxt);
    avcodec_close(pCodecContext);
    avformat_close_input(&pFormatContext);
    env->ReleaseStringUTFChars(audioPath,path);
}

2.OpenSLES方式

public native void playAudioByOpenGL(String path);
extern "C"
JNIEXPORT void JNICALL
Java_com_example_ffmpeg_MainActivity_playAudioByOpenGL(JNIEnv *env, jobject thiz, jstring path) {
    // TODO: implement playAudioByOpenGL()
    LOGD(LOG_TAG,"Java_com_example_ffmpeg_MainActivity_playAudioByOpenGL");
    const char* audioPath = env->GetStringUTFChars(path,0);
    createPlayer(audioPath);
}
#include 
#include 
#include 
#include 
#include 
#include 
#include "log.h"
#include "audio.h"


#define ABSFILTER_ENABLE  0
#if ABSFILTER_ENABLE
const AVBitStreamFilter * absFilter = NULL;
AVBSFContext *absCtx = NULL;
AVCodecParameters *codecpar = NULL;
#endif

SLObjectItf engineObject=NULL;//用SLObjectItf声明引擎接口对象
SLEngineItf engineEngine = NULL;//声明具体的引擎对象


SLObjectItf outputMixObject = NULL;//用SLObjectItf创建混音器接口对象
SLEnvironmentalReverbItf outputMixEnvironmentalReverb = NULL;//具体的混音器对象实例
SLEnvironmentalReverbSettings settings = SL_I3DL2_ENVIRONMENT_PRESET_DEFAULT;//默认情况


SLObjectItf audioplayer=NULL;//用SLObjectItf声明播放器接口对象
SLPlayItf  slPlayItf=NULL;//播放器接口
SLAndroidSimpleBufferQueueItf  slBufferQueueItf=NULL;//缓冲区队列接口


size_t buffersize =0;
void *buffer;
//将pcm数据添加到缓冲区中
void getQueueCallBack(SLAndroidSimpleBufferQueueItf  slBufferQueueItf, void* context){
    LOGE(LOG_TAG,"getQueueCallBack");
    buffersize=0;
    getPcm(&buffer,&buffersize);
    if(buffer!=NULL && buffersize!=0){
        //将得到的数据加入到队列中
        (*slBufferQueueItf)->Enqueue(slBufferQueueItf,buffer,buffersize);
    }
}

//创建引擎
void createEngine(){
    slCreateEngine(&engineObject,0,0,0,0,0);//创建引擎
    (*engineObject)->Realize(engineObject,SL_BOOLEAN_FALSE);//实现engineObject接口对象
    (*engineObject)->GetInterface(engineObject,SL_IID_ENGINE,&engineEngine);//通过引擎调用接口初始化SLEngineItf
}

//创建混音器
void createMixVolume(){
    const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
    const SLboolean req[1] = {SL_BOOLEAN_FALSE};
    (*engineEngine)->CreateOutputMix(engineEngine,&outputMixObject,1,ids,req);//用引擎对象创建混音器接口对象
    (*outputMixObject)->Realize(outputMixObject,SL_BOOLEAN_FALSE);//实现混音器接口对象
    SLresult   sLresult = (*outputMixObject)->GetInterface(outputMixObject,SL_IID_ENVIRONMENTALREVERB,&outputMixEnvironmentalReverb);//利用混音器实例对象接口初始化具体的混音器对象
    //设置
    if (SL_RESULT_SUCCESS == sLresult) {
        (*outputMixEnvironmentalReverb)->
                SetEnvironmentalReverbProperties(outputMixEnvironmentalReverb, &settings);
    }
}

//创建播放器
void createPlayer(const char* path){
    //初始化ffmpeg
    int rate ;
    int channels ;
    createFFmpeg(&rate,&channels,path);
    LOGE(LOG_TAG,"RATE %d",rate);
    LOGE(LOG_TAG,"channels %d",channels);
    /*
     * typedef struct SLDataLocator_AndroidBufferQueue_ {
    SLuint32    locatorType;//缓冲区队列类型
    SLuint32    numBuffers;//buffer位数
} */
    createEngine();
    createMixVolume();


    //3 设置输入输出数据源
//setSLData();
//3.1 设置输入 SLDataSource
    SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,2};

    SLDataFormat_PCM formatPcm = {
            SL_DATAFORMAT_PCM,//播放pcm格式的数据
            channels,//2个声道(立体声)
            rate*2000,//44100hz的频率
            SL_PCMSAMPLEFORMAT_FIXED_16,//位数 16位
            SL_PCMSAMPLEFORMAT_FIXED_16,//和位数一致就行
            SL_SPEAKER_FRONT_LEFT,//立体声(前左前右)
            SL_BYTEORDER_LITTLEENDIAN//结束标志
    };

    SLDataSource slDataSource = {&loc_bufq, &formatPcm};

    //3.2 设置输出 SLDataSink
    SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
    SLDataSink audioSnk = {&loc_outmix, NULL};

    //4.创建音频播放器

    //4.1 创建音频播放器对象

    const SLInterfaceID ids[3]={SL_IID_BUFFERQUEUE,SL_IID_EFFECTSEND,SL_IID_VOLUME};
    const SLboolean req[3]={SL_BOOLEAN_FALSE,SL_BOOLEAN_FALSE,SL_BOOLEAN_FALSE};

    int result = (*engineEngine)->CreateAudioPlayer(engineEngine, &audioplayer, &slDataSource, &audioSnk,
                                                3, ids, req);
    if (audioplayer == NULL){
        LOGE(LOG_TAG," audioplayer == NULL 11");
    }
    if (SL_RESULT_SUCCESS != result) {
        LOGE(LOG_TAG," CreateAudioPlayer error");
    }
    if (engineEngine == NULL){
        LOGE(LOG_TAG,"engineEngine == NULL");
    }

    (*audioplayer)->Realize(audioplayer,SL_BOOLEAN_FALSE);
    if (SL_RESULT_SUCCESS != result) {
        LOGE(LOG_TAG," pcmPlayerObject Realize error");
        // return -1;
    }
    (*audioplayer)->GetInterface(audioplayer,SL_IID_PLAY,&slPlayItf);//初始化播放器

    //注册缓冲区,通过缓冲区里面 的数据进行播放
    (*audioplayer)->GetInterface(audioplayer,SL_IID_BUFFERQUEUE,&slBufferQueueItf);
    //设置回调接口
    (*slBufferQueueItf)->RegisterCallback(slBufferQueueItf,getQueueCallBack,NULL);
    //播放
    (*slPlayItf)->SetPlayState(slPlayItf,SL_PLAYSTATE_PLAYING);
    //开始播放
    getQueueCallBack(slBufferQueueItf,NULL);
}
//释放资源
void releaseResource(){
    if(audioplayer!=NULL){
        (*audioplayer)->Destroy(audioplayer);
        audioplayer=NULL;
        slBufferQueueItf=NULL;
        slPlayItf=NULL;
    }
    if(outputMixObject!=NULL){
        (*outputMixObject)->Destroy(outputMixObject);
        outputMixObject=NULL;
        outputMixEnvironmentalReverb=NULL;
    }
    if(engineObject!=NULL){
        (*engineObject)->Destroy(engineObject);
        engineObject=NULL;
        engineEngine=NULL;
    }
    releaseFFmpeg();
}

AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
AVCodec *pCodex;
AVPacket *packet;
AVFrame *frame;
SwrContext *swrContext;
AVCodecParserContext *parserContext = NULL;
uint8_t *out_buffer;
int out_channer_nb;
int audio_stream_idx=-1;
//opensl es调用 int * rate,int *channel
int createFFmpeg(int *rate,int *channel,const char* path){
    const char *input = path;
    pFormatCtx = avformat_alloc_context();
    LOGE(LOG_TAG,"Lujng %s",input);
    LOGE(LOG_TAG,"xxx %p",pFormatCtx);
    int error;
    char buf[] = "";
    error = avformat_open_input(&pFormatCtx, input, NULL, NULL);
    //打开视频地址并获取里面的内容(解封装)
    if (error < 0) {
        av_strerror(error, buf, 1024);
        // LOGE("%s" ,inputPath)
        LOGE("Couldn't open file %s: %d(%s)", input, error, buf);
        // LOGE("%d",error)
        LOGE(LOG_TAG,"打开视频失败");
    }
    //3.获取视频信息
    if(avformat_find_stream_info(pFormatCtx,NULL) < 0){
        LOGE("%s","获取视频信息失败");
        return -1;
    }
    av_dump_format(pFormatCtx, 0, input, 0);
    int codecID;
    for (int i = 0; i < pFormatCtx->nb_streams; ++i) {
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            LOGE(LOG_TAG,"  找到音频id %d", pFormatCtx->streams[i]->codecpar->codec_id);
            audio_stream_idx=i;
            break;
        }
    }

    //pCodex = avcodec_find_encoder(AV_CODEC_ID_MP2);
    pCodex = avcodec_find_decoder(pFormatCtx->streams[audio_stream_idx]->codecpar->codec_id);
    if (!pCodex) {
        LOGE(LOG_TAG, "Codec not found\n");
        exit(1);
    }
    //av_parser_init(pCodex->id);
    pCodecCtx = avcodec_alloc_context3(pCodex);

    if (!pCodecCtx) {
        LOGE(LOG_TAG, "Could not allocate audio codec context\n");
        exit(1);
    }

    LOGE(LOG_TAG,"获取视频编码 %d",pCodex->id);
    if (pCodecCtx == NULL || pCodex == NULL){
        LOGE(LOG_TAG," NULL");
    }

    avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[audio_stream_idx]->codecpar);
    pCodecCtx->thread_count = 1;
    //AVCodec *pCodec264 =  avcodec_find_encoder(pCodecCtx->codec_id);

    //pCodecCtx264 = avcodec_alloc_context3(pCodec264);
    //avcodec_alloc_context3(pCodecCtx->codec_id);
    if (avcodec_open2(pCodecCtx, pCodex, 0)<0) {
        LOGE(LOG_TAG," avcodec_open2 failed");
    }
    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    //av_init_packet(packet);
//    音频数据

    frame = av_frame_alloc();

//    mp3  里面所包含的编码格式   转换成  pcm   SwcContext
    swrContext = swr_alloc();
    int length=0;
    int got_frame;
//    44100*2
    out_buffer = (uint8_t *) av_malloc(44100 * 2);
    uint64_t  out_ch_layout=AV_CH_LAYOUT_STEREO;
//    输出采样位数  16位
    enum AVSampleFormat out_formart=AV_SAMPLE_FMT_S16;
//输出的采样率必须与输入相同
   // int out_sample_rate = pCodecCtx->sample_rate;
    int out_sample_rate = pCodecCtx->sample_rate;
    LOGE(LOG_TAG,"out_sample_rate %d",out_sample_rate);

    swr_alloc_set_opts(swrContext, out_ch_layout, out_formart, out_sample_rate,
                       pCodecCtx->channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0,
                       NULL);
    swr_init(swrContext);
//    获取通道数  2
    out_channer_nb = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
    *rate = pCodecCtx->sample_rate;
    *channel = pCodecCtx->channels;
    LOGE(LOG_TAG,"%d   %d",*rate,*channel);
    return 0;
}

int getPcm(void **pcm,size_t *pcm_size){
    char buf[256];
    while(1)
    {
        int ret = av_read_frame(pFormatCtx, packet);
        if (ret != 0){
            av_strerror(ret,buf,sizeof(buf));
            LOGE("--%s--\n","%s", buf);
            av_packet_unref(packet);
            break;
        }


        if (ret >= 0 && packet->stream_index != audio_stream_idx){
            av_packet_unref(packet);
            continue;
        }
#if ABSFILTER_ENABLE
        if (av_bsf_send_packet(absCtx, packet) < 0){
            LOGE(LOG_TAG,"av_bsf_send_packet faile \n");
            av_packet_unref(packet);
            continue;
        }
        if (av_bsf_receive_packet(absCtx, packet) < 0) {
            LOGE(LOG_TAG,"av_bsf_receive_packet faile \n");
            av_packet_unref(packet);
            continue;
        }
#endif
        {
            // 发送待解码包
            int result = avcodec_send_packet(pCodecCtx, packet);
            av_packet_unref(packet);
            if (result < 0){
                av_log(NULL, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
                continue;
            }

            // 接收解码数据
            while (result >= 0){
                result = avcodec_receive_frame(pCodecCtx, frame);
                if (result == AVERROR_EOF)
                    break;
                else if (result == AVERROR(EAGAIN)){
                    result = 0;
                    break;
                }
                else if (result < 0){
                    av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                    av_frame_unref(frame);
                    break;
                }

                if (frame == NULL){
                    LOGE(LOG_TAG,"frame == NULL");
                }
                LOGE(LOG_TAG,"解码播放 %d",*out_buffer);

                swr_convert(swrContext, &out_buffer, 44100 * 2, (const uint8_t **) frame->data, frame->nb_samples);
                //缓冲区的大小
                int size = av_samples_get_buffer_size(NULL, out_channer_nb, frame->nb_samples,
                                                      AV_SAMPLE_FMT_S16, 1);
                *pcm = out_buffer;
                *pcm_size = size;

                av_frame_unref(frame);
                return 0;
            }
        }

    }
    return 0;
}


void releaseFFmpeg(){
    //free(packet);
    av_free(packet);
    //av_free_packet(packet);
    av_free(out_buffer);
    av_frame_free(&frame);
    swr_free(&swrContext);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);
}


#include 
#ifndef FFMPEG_AUDIO_H
#define FFMPEG_AUDIO_H
void releaseFFmpeg();
int createFFmpeg(int *rate,int *channel,const char* path);
void releaseResource();
void createPlayer(const char* path);
void createEngine();
void getQueueCallBack(SLAndroidSimpleBufferQueueItf  slBufferQueueItf, void* context);
void createMixVolume();
int getPcm(void **pcm,size_t *pcm_size);
#endif //FFMPEG_AUDIO_H

你可能感兴趣的:(ffmpeg,音视频播放,ffmpeg,音视频)