FFmpeg-多线程解码播放

数据音频重采样

在上篇文章FFmpeg - 初探ffmepg并解码数据我们已经可以获取到音频的数据了,也解决内存上涨的问题,但是声音不对,其实主要原因在这

object initCreateAudioTrack(JNIEnv *env) {
    /*AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
            int bufferSizeInBytes, int mode)*/
    jclass jAudioTrackClass = env->FindClass("android/media/AudioTrack");
    jmethodID jAudioTackCMid = env->GetMethodID(jAudioTrackClass, "", "(IIIIII)V");

    int streamType = 3;
    int sampleRateInHz = AUDIO_SAMPLE_RATE;
    int channelConfig = (0x4 | 0x8);
    int audioFormat = 2;
    int mode = 1;

    // int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)
    jmethodID getMinBufferSizeMid = env->GetStaticMethodID(jAudioTrackClass, "getMinBufferSize",
                                                           "(III)I");
    int bufferSizeInBytes = env->CallStaticIntMethod(jAudioTrackClass, getMinBufferSizeMid,
                                                     sampleRateInHz, channelConfig, audioFormat);
    LOGE("bufferSizeInBytes = %d",bufferSizeInBytes);

    jobject jAudioTrackObj = env->NewObject(jAudioTrackClass, jAudioTackCMid, streamType,
                                            sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mode);

    // play
    jmethodID playMid = env->GetMethodID(jAudioTrackClass, "play", "()V");
    env->CallVoidMethod(jAudioTrackObj, playMid);

    return jAudioTrackObj;
}

这里面我们设置的streamType,sampleRateInHz等参数都是固定的,但是我们获取网络音频参数可能不是我们设置的这些参数,于是我们就需要音频重采样
利用的包是

#include "libswresample/swresample.h"

主要方法:

  • 1、swr_alloc_set_opts:设置重采样参数
  • 2、swr_init:初始化
  • 3、swr_convert:设置重采样
 //------------重采样start--------------
    struct SwrContext *swrContext;
    int64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
    enum AVSampleFormat out_sample_fmt = AVSampleFormat::AV_SAMPLE_FMT_S16;
    int out_sample_rate = AUDIO_SAMPLE_RATE;
    int64_t in_ch_layout = pCodecContext->channel_layout;
    enum AVSampleFormat in_sample_fmt = pCodecContext->sample_fmt;
    int in_sample_rate = pCodecContext->sample_rate;
    //设置重采样参数
    swrContext = swr_alloc_set_opts(NULL, out_ch_layout, out_sample_fmt,
                                    out_sample_rate, in_ch_layout, in_sample_fmt, in_sample_rate, 0,
                                    NULL);
    if (swrContext == NULL) {
        return;
    }
    int swrInitRes = swr_init(swrContext);
    if (swrInitRes < 0) {
        return;
    }
    //播放 write写到缓存区
    // 1帧不是一秒,pFrame->nb_samples点 播放最终输出的大小
    int outChannels = av_get_channel_layout_nb_channels(out_ch_layout);
    int datasize = av_samples_get_buffer_size(NULL, outChannels,
                                              pCodecParameters->frame_size,
                                              out_sample_fmt, 0);
    uint8_t *resampleOutBuffer = (uint8_t *) (malloc(datasize));
    //------------重采样end--------------


    //pFrame.data->javabyte
    jbyteArray jPcmByteArray = env->NewByteArray(datasize);
    while (av_read_frame(pContext, pPacket) >= 0) {
        if (pPacket->stream_index == audioStreamIndex) {
            //AVPacket 压缩的 数据 解码成pcm
            int codecSendPacketRes = avcodec_send_packet(pCodecContext, pPacket);
            if (codecSendPacketRes == 0) {
                int codecReceiveFrameRes = avcodec_receive_frame(pCodecContext, pFrame);
                if (codecReceiveFrameRes == 0) {
                    index++;
                    LOGE("解码第:%d桢", index);
                    //调用重采样的方法
                    //struct SwrContext *s, uint8_t **out, int out_count,
                    //const uint8_t **in , int in_count

                    swr_convert(swrContext, &resampleOutBuffer, pFrame->nb_samples,
                                (const uint8_t **) pFrame->data, pFrame->nb_samples);

                    //c数据同步到java
                    jPcmData = env->GetByteArrayElements(jPcmByteArray, NULL);
                    memcpy(jPcmData, resampleOutBuffer, datasize);
                    // 0 把 c 的数组的数据同步到 jbyteArray ,不释放内存
                    env->ReleaseByteArrayElements(jPcmByteArray, jPcmData, JNI_COMMIT);
                    env->CallIntMethod(jAudioTrackObj, jWriteMid, jPcmByteArray, 0, datasize);

                }

            }

添加错误回掉到Java

  • 定义Java接口
public interface MediaErrorListener {
    void onError(int code,String message);
}
  • 修改MusicPlayer播放音频工具类的封装后的代码
public class MusicPlayer {
    static {
        System.loadLibrary("music_play");
    }

    private String url;

    public void setDataSource(String url) {
        this.url = url;
    }

    public void play() {
        if (TextUtils.isEmpty(url)) {
            throw new NullPointerException("url is null,please call method setDataSource ");
        }

        npay(url);
    }
    private MediaErrorListener mErrorListener;

    public void setErrorListener(MediaErrorListener errorListener) {
        this.mErrorListener = errorListener;
    }
    private void onError(int code,String message){
        if(mErrorListener!=null){
            mErrorListener.onError(code,message);
        }
    }
    private native void npay(String url);
}
  • 调用
   musicPlayer.setErrorListener(new MediaErrorListener() {
            @Override
            public void onError(int code, String message) {
                Log.e("MusicPlayer","error code is:"+code);
                Log.e("MusicPlayer","error message is:"+message);
            }
        });
  • native层去实现
    首先native层找到Java层方法
jclass jPlayerClass=jniEnv->GetObjectClass(jobject);
    jPlayerErrorMid = jniEnv->GetMethodID(jPlayerClass, "onError", "(ILjava/lang/String;)V");

实现java层调用方法

void PeakmainJNICall::callPlayerJniError(int code, char *msg) {
    jstring  jsg=jniEnv->NewStringUTF(msg);
    jniEnv->CallVoidMethod(jobject,jPlayerErrorMid,code,jsg);
   jniEnv->DeleteLocalRef(jsg);
}

调用非常简单直接调用即可,但是需要注意:当报错之后需要将资源释放,而我们不会直接去调用析构函数,所以我们可以将释放的方法提取出来,然后直接调用即可

void PeakmainFFmpeg::callPlayerJniError(int code, char *msg) {
    //释放资源
     release();
   //回掉
    pJNICall->callPlayerJniError(code,msg);

}

void PeakmainFFmpeg::release() {
    if (pCodecContext != NULL) {
        avcodec_close(pCodecContext);
        avcodec_free_context(&pCodecContext);
        pCodecContext = NULL;
    }
    if (pContext != NULL) {
        avformat_close_input(&pContext);
        avformat_free_context(pContext);
        pContext = NULL;
    }
    //销毁
    if (swrContext != NULL) {
        swr_free(&swrContext);
        free(swrContext);
        swrContext = NULL;
    }
    if (resampleOutBuffer != NULL) {
        free(resampleOutBuffer);
        resampleOutBuffer=NULL;
    }
    avformat_network_deinit();
}
测试结果.png

多线程播放

创建子线程

void PeakmainFFmpeg::play() {
    //多线程播放边解码边播放
    pthread_t playThread;
    pthread_create(&playThread, NULL, threadPlay,this);
    pthread_detach(playThread);
}

threadPlay方法实现

void *threadPlay(void *context) {
    PeakmainFFmpeg* pFFmepg=(PeakmainFFmpeg*)context;
    pFFmepg->prepared();
    return 0;
}

prepared:实际就是我们原本实现的代码

void PeakmainFFmpeg::prepared() {
    av_register_all();
    //初始化网络
    avformat_network_init();

    int formatOpenIntRes = 0;
    int formatFindStreamInfo = 0;
    int audioStreamIndex = 0;
    AVCodecParameters *pCodecParameters;
    AVCodec *avcodec;

    int codecParametersToContextRes = -1;
    int avcodecOpenRes = -1;
    int index;
    AVPacket *pPacket;
    AVFrame *pFrame;
    jbyte *jPcmData;
    formatOpenIntRes = avformat_open_input(&pContext, url, NULL, NULL);
    if (formatOpenIntRes != 0) {
        //回掉给Java层
        //需要释放资源
        //return;
        //打印错误信息
        LOGE("format open input error:%s", av_err2str(formatOpenIntRes));
        callPlayerJniError(formatOpenIntRes, av_err2str(formatOpenIntRes));
        return;
    }

    formatFindStreamInfo = avformat_find_stream_info(pContext, NULL);
    if (formatFindStreamInfo < 0) {
        LOGE("format find stream error:%s", av_err2str(formatFindStreamInfo));
        callPlayerJniError(formatFindStreamInfo, "format find stream error");

        return;
    }

    //查找音频流
    audioStreamIndex = av_find_best_stream(pContext, AVMediaType::AVMEDIA_TYPE_AUDIO, -1, -1, NULL,
                                           0);
    if (audioStreamIndex < 0) {
        LOGE("format audio stream error:%s", av_err2str(audioStreamIndex));
        callPlayerJniError(FIND_STREAM_ERROR_CODE, "format audio stream error");
        return;
    }
    //寻找解码器
    pCodecParameters = pContext->streams[audioStreamIndex]->codecpar;
    avcodec = avcodec_find_decoder(pCodecParameters->codec_id);
    if (avcodec == NULL) {
        LOGE("avcodec find decoder error");
        callPlayerJniError(CODEC_FIND_DECODER_ERROR_CODE, "avcodec find decoder error");
        return;
    }

    //打开解码器
    pCodecContext = avcodec_alloc_context3(avcodec);
    if (pCodecContext == NULL) {
        LOGE("codec alloc context error");
        //goto __av_resource_destory;
        callPlayerJniError(CODEC_ALLOC_CONTEXT_ERROR_CODE, "avcodec find decoder error");
        return;
    }
    //将参数设置到pCodecContext
    codecParametersToContextRes = avcodec_parameters_to_context(pCodecContext, pCodecParameters);
    if (codecParametersToContextRes < 0) {
        LOGE("codec parameters to context error:%s", av_err2str(codecParametersToContextRes));
        callPlayerJniError(codecParametersToContextRes, av_err2str(codecParametersToContextRes));
        return;
    }
    avcodecOpenRes = avcodec_open2(pCodecContext, avcodec, NULL);
    if (avcodecOpenRes != 0) {
        LOGE("codec audio open error:%s", av_err2str(avcodecOpenRes));
        callPlayerJniError(avcodecOpenRes, av_err2str(avcodecOpenRes));
        return;
    }
    LOGE("采样率:%d,通道数:%d", pCodecParameters->sample_rate, pCodecParameters->channels);
    pPacket = av_packet_alloc();
    pFrame = av_frame_alloc();
    // public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes)

    //------------重采样start--------------
    struct SwrContext *swrContext;
    int64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
    enum AVSampleFormat out_sample_fmt = AVSampleFormat::AV_SAMPLE_FMT_S16;
    int out_sample_rate = AUDIO_SAMPLE_RATE;
    int64_t in_ch_layout = pCodecContext->channel_layout;
    enum AVSampleFormat in_sample_fmt = pCodecContext->sample_fmt;
    int in_sample_rate = pCodecContext->sample_rate;
    //设置重采样参数
    swrContext = swr_alloc_set_opts(NULL, out_ch_layout, out_sample_fmt,
                                    out_sample_rate, in_ch_layout, in_sample_fmt, in_sample_rate, 0,
                                    NULL);
    if (swrContext == NULL) {
        callPlayerJniError(SWR_ALLOC_SET_OPTS_ERROR_CODE, av_err2str(avcodecOpenRes));
        return;
    }
    int swrInitRes = swr_init(swrContext);
    if (swrInitRes < 0) {
        callPlayerJniError(SWR_CONTEXT_INIT_ERROR_CODE, "swr init error");
        return;
    }
    //播放 write写到缓存区
    // 1帧不是一秒,pFrame->nb_samples点 播放最终输出的大小
    int outChannels = av_get_channel_layout_nb_channels(out_ch_layout);
    int datasize = av_samples_get_buffer_size(NULL, outChannels,
                                              pCodecParameters->frame_size,
                                              out_sample_fmt, 0);
    resampleOutBuffer = (uint8_t *) (malloc(datasize));
    //------------重采样end--------------


    //pFrame.data->javabyte
    jbyteArray jPcmByteArray = pJNICall->jniEnv->NewByteArray(datasize);
    while (av_read_frame(pContext, pPacket) >= 0) {
        if (pPacket->stream_index == audioStreamIndex) {
            //AVPacket 压缩的 数据 解码成pcm
            int codecSendPacketRes = avcodec_send_packet(pCodecContext, pPacket);
            if (codecSendPacketRes == 0) {
                int codecReceiveFrameRes = avcodec_receive_frame(pCodecContext, pFrame);
                if (codecReceiveFrameRes == 0) {
                    index++;
                    LOGE("解码第:%d桢", index);
                    //调用重采样的方法
                    //struct SwrContext *s, uint8_t **out, int out_count,
                    //const uint8_t **in , int in_count

                    swr_convert(swrContext, &resampleOutBuffer, pFrame->nb_samples,
                                (const uint8_t **) pFrame->data, pFrame->nb_samples);

                    //c数据同步到java
                    jPcmData = pJNICall->jniEnv->GetByteArrayElements(jPcmByteArray, NULL);
                    memcpy(jPcmData, resampleOutBuffer, datasize);
                    // 0 把 c 的数组的数据同步到 jbyteArray ,不释放内存
                    pJNICall->jniEnv->ReleaseByteArrayElements(jPcmByteArray, jPcmData, JNI_COMMIT);
                    pJNICall->callAudioTrackWrite(jPcmByteArray, 0, datasize);

                }

            }
        }
        //解引用
        av_packet_unref(pPacket);
        av_frame_unref(pFrame);
    }
    //1、解引用数据data 2、销毁pPacket结构体内存 3、pPacket=NULL
    av_packet_free(&pPacket);
    av_frame_free(&pFrame);

    //执行完方法,释放数组jPcmByteArray 让 javaGC 回收
    pJNICall->jniEnv->DeleteLocalRef(jPcmByteArray);
    pJNICall->jniEnv->ReleaseByteArrayElements(jPcmByteArray, jPcmData, 0);

}

运行我们会发现程序崩溃了,这是因为
1、子线程用不了主线程jniEnv(native线程)
2、子线程是不共享jniEnv,他们自己独有的
新建一个 枚举类

enum ThreadMode{
    THREAD_CHILD,THREAD_MAIN
};

修改其中上报错误的方法

void PeakmainJNICall::callPlayerJniError(ThreadMode threadMode, int code, char *msg) {
    if (threadMode == THREAD_MAIN) {
        jstring jsg = jniEnv->NewStringUTF(msg);
        jniEnv->CallVoidMethod(jobject, jPlayerErrorMid, code, jsg);
        jniEnv->DeleteLocalRef(jsg);
    } else if(threadMode==THREAD_CHILD){
        //获取当前线程的jniEnv
        JNIEnv *env;
       if(javaVM->AttachCurrentThread(&env,0)!=JNI_OK){
           LOGE("get child thread jni error");
           return;
       }
        jstring jsg = env->NewStringUTF(msg);
        env->CallVoidMethod(jobject, jPlayerErrorMid, code, jsg);
        env->DeleteLocalRef(jsg);
        javaVM->DetachCurrentThread();
    }


}

javaVM获取方法

JavaVM *pJavaVM = NULL;
//重写so加载时的一个方法
extern "C"
JNIEXPORT jint JNICALL
JNI_OnLoad(JavaVM *javaVM, void *resverved) {
    pJavaVM = javaVM;
    JNIEnv *env;
    if (javaVM->GetEnv((void **) &env, JNI_VERSION_1_4) != JNI_OK) {
        return -1;
    }
    return JNI_VERSION_1_4;
}

你可能感兴趣的:(FFmpeg-多线程解码播放)