ffmpeg内存读取音频数据转码后发送到海思AI-」AO

前两节 “从内存中读取数据” “将数据输出到内存”
转载自博客https://blog.csdn.net/leixiaohua1020/article/details/12980423/

从内存中读取数据

ffmpeg一般情况下支持打开一个本地文件,例如“C:\test.avi”

或者是一个流媒体协议的URL,例如“rtmp://222.31.64.208/vod/test.flv”

其打开文件的函数是avformat_open_input(),直接将文件路径或者流媒体URL的字符串传递给该函数就可以了。

但其是否支持从内存中读取数据呢?这个问题困扰了我很长时间。当时在做项目的时候,通过Winpcap抓取网络上的RTP包,打算直接送给ffmpeg进行解码。一直没能找到合适的方法。因为抓取的数据包是存在内存中的,所以无法传递给avformat_open_input()函数其路径(根本没有路径= =)。当然也可以将抓取的数据报存成文件,然后用ffmpeg打开这个文件,但是这样的话,程序的就太难控制了。

后来经过分析ffmpeg的源代码,发现其竟然是可以从内存中读取数据的,代码很简单,如下所示:

AVFormatContext *ic = NULL;
ic = avformat_alloc_context();

unsigned char * iobuffer=(unsigned char *)av_malloc(32768);
AVIOContext *avio =avio_alloc_context(iobuffer, 32768,0,NULL,fill_iobuffer,NULL,NULL);
ic->pb=avio;
err = avformat_open_input(&ic, "nothing", NULL, NULL);

关键要在avformat_open_input()之前初始化一个AVIOContext,而且将原本的AVFormatContext的指针pb(AVIOContext类型)指向这个自行初始化AVIOContext。当自行指定了AVIOContext之后,avformat_open_input()里面的URL参数就不起作用了。示例代码开辟了一块空间iobuffer作为AVIOContext的缓存。
fill_iobuffer则是将数据读取至iobuffer的回调函数。fill_iobuffer()形式(参数,返回值)是固定的,是一个回调函数,如下所示(只是个例子,具体怎么读取数据可以自行设计)。示例中回调函数将文件中的内容通过fread()读入内存。


//读取数据的回调函数-------------------------
//AVIOContext使用的回调函数!
//注意:返回值是读取的字节数
//手动初始化AVIOContext只需要两个东西:内容来源的buffer,和读取这个Buffer到FFmpeg中的函数
//回调函数,功能就是:把buf_size字节数据送入buf即可
//第一个参数(void *opaque)一般情况下可以不用
int fill_iobuffer(void * opaque,uint8_t *buf, int bufsize){
     
	if(!feof(fp_open)){
     
		int true_size=fread(buf,1,buf_size,fp_open);
		return true_size;
	}else{
     
		return -1;
	}
}

整体结构大致如下:

FILE *fp_open;
 
int fill_iobuffer(void *opaque, uint8_t *buf, int buf_size){
     
...
}
 
int main(){
     
	...
	fp_open=fopen("test.h264","rb+");
	AVFormatContext *ic = NULL;
	ic = avformat_alloc_context();
	unsigned char * iobuffer=(unsigned char *)av_malloc(32768);
	AVIOContext *avio =avio_alloc_context(iobuffer, 32768,0,NULL,fill_iobuffer,NULL,NULL);
	ic->pb=avio;
	err = avformat_open_input(&ic, "nothing", NULL, NULL);
	...//解码
}

将数据输出到内存

和从内存中读取数据类似,ffmpeg也可以将处理后的数据输出到内存。

回调函数如下示例,可以将输出到内存的数据写入到文件中。

//写文件的回调函数
int write_buffer(void *opaque, uint8_t *buf, int buf_size){
     
	if(!feof(fp_write)){
     
		int true_size=fwrite(buf,1,buf_size,fp_write);
		return true_size;
	}else{
     
		return -1;
	}
}

主函数如下所示。


FILE *fp_write;
 
int write_buffer(void *opaque, uint8_t *buf, int buf_size){
     
...
}
 
main(){
     
	...
	fp_write=fopen("src01.h264","wb+"); //输出文件
	...
	AVFormatContext* ofmt_ctx=NULL;
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "h264", NULL);
	unsigned char* outbuffer=(unsigned char*)av_malloc(32768);
 
	AVIOContext *avio_out =avio_alloc_context(outbuffer, 32768,0,NULL,NULL,write_buffer,NULL);  
 
	ofmt_ctx->pb=avio_out; 
	ofmt_ctx->flags=AVFMT_FLAG_CUSTOM_IO;
	...
}

从内存读取数据到ffmpeg转码

  1. 新建AVFormatContext容器并构建AVIOContext
	AVFormatContext* _fmt_ctx = NULL;
    AVIOContext *avio = NULL;
    int readSize = 1152*8;
    unsigned char * iobuffer = NULL;
 
    fp_open=fopen(filename,"r");
    _fmt_ctx = avformat_alloc_context();
    iobuffer=(unsigned char *)av_malloc(readSize);
    avio =avio_alloc_context(iobuffer, readSize,0,(void *)this,CInputCodeChn::fill_iobuffer,NULL,NULL);  
    //新建读取线程往iobuffer中填充数据,下面检测code应该会用到
	_fmt_ctx->pb = avio;
    _fmt_ctx->flags=AVFMT_FLAG_CUSTOM_IO;
    ret = avformat_open_input(&_fmt_ctx, "", NULL, NULL);
    if (ret < 0)
    {
     
        printf("CInputCodeChn: failed to call avformat_open_input\n");
        exit(1);
    }
  1. 编写发送接口
//从内存中读取buf数据传入长度size
int CInputCodeChn::sendAudioBuffer(char* buf, int size)
{
     
    if((buf == NULL) && (size <= 0) )
    {
     
        return -1;
    }
    pPbuf = memPbuf_alloc(PBUF_RAW,size,PBUF_RAM);
    if(pPbuf == NULL)
    {
     
        return -1;
    }
    pthread_mutex_lock(&mutex_pbuf);
    memcpy(pPbuf->payload,buf,size);
    pPbuf->tot_len = size;
    //将数据压入发送队列链表
    m_rawFrameList.push_back(pPbuf);

    pthread_mutex_unlock(&mutex_pbuf);
    return size;
}

新建一个线程不断地对网络流进行读取缓存将数据存储到m_rawFrameList队列中

  1. 在fill_iobuffer中对缓存队列进行读取并填充buf
    static int fill_iobuffer(void *opaque, uint8_t *buf, int buf_size)
    {
     
        struct memPbuf *pStreamFrame = NULL;
        CInputCodeChn *inputCodeChn = (CInputCodeChn *)opaque;
        int true_size = 0;
        /*if(!feof(inputCodeChn->fp_open)){
            //从内部构建的缓存中去读
            int true_size=fread(buf,1,buf_size,inputCodeChn->fp_open);
            //usleep(10*1000);
            if(true_size == 0)
            {
                printf("read file size = 0 +++++++++++++++++++++++++++\n");
            }
            return  true_size;
        }
        else
        {
            return -1;
        }*/
        
        
        while(1){
     
            pthread_mutex_lock(&inputCodeChn->mutex_pbuf);
            if(inputCodeChn->m_rawFrameList.size() > 0)
            {
         
                
                pStreamFrame = (struct memPbuf *)inputCodeChn->m_rawFrameList.front();
                if (NULL == pStreamFrame) {
     
                    printf( "new pStreamFrame fail\n");
                } else {
     
                    inputCodeChn->m_rawFrameList.pop_front();
                }
                //填充buf
                memcpy(buf,pStreamFrame->payload,pStreamFrame->tot_len);
                true_size = pStreamFrame->tot_len;
                inputCodeChn->memPbuf_free(pStreamFrame);
                pthread_mutex_unlock(&inputCodeChn->mutex_pbuf);
                return true_size;
            }else{
     
                pthread_mutex_unlock(&inputCodeChn->mutex_pbuf);
                av_usleep(10*1000);
                continue;
            }    
        }
        pthread_mutex_unlock(&inputCodeChn->mutex_pbuf);
        return 0;
    }
  1. 接收解码后的数据
bool CInputCodeChn::ReciverThreadProc()
{
     
    AVFrame* pFrame = av_frame_alloc();
    AVPacket packet;
    av_init_packet(&packet);
    struct timeval now;
    int got_sound;

    while (_state == PREPARED)
    {
     

    }

    while (_state == RUNNING)
    {
     
        packet.data = NULL;
        packet.size = 0;
        if (av_read_frame(_fmt_ctx, &packet) < 0)
        {
     
            printf("there is no context mic \n");
            break;
        }

        if (packet.stream_index == _index_audio)
        {
     
            if (avcodec_decode_audio4(_fmt_ctx->streams[_index_audio]->codec, pFrame, &got_sound, &packet) < 0)
            {
     
                printf("fail to avcodec_decode_audio4 \n");
                break;
            }
            av_free_packet(&packet);
            if (!got_sound)
            {
     
                continue;
            }
            //处理解码后的数据帧pFrame
        }

    }
    av_frame_free(&pFrame);
    return true;
}

读取编码后的帧缓存发送到海思音频接口

  1. 构建编码器上下文
    fp_open=fopen(fileName,"wb+");
    ret = avformat_alloc_output_context2(&_fmt_ctx_out, NULL, "wav", NULL);
    if (ret < 0)
    {
     
        printf("Mixer: failed to call avformat_alloc_output_context2\n");
        return -1;
    }


    AVStream* stream_a = NULL;
    stream_a = avformat_new_stream(_fmt_ctx_out, NULL);
    if (stream_a == NULL)
    {
     
        printf("Mixer: failed to call avformat_new_stream\n");
        return -1;
    }
    _index_a_out = 0;

    stream_a->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    codec_aac = NULL;
    codec_aac = avcodec_find_encoder(AV_CODEC_ID_PCM_S16LE);
    if (codec_aac == NULL)
    {
     
        printf("Mixer: failed to call avcodec_find_encoder ++++\n");
        return -1;
    }
    stream_a->codec->codec = codec_aac;
    stream_a->codec->sample_rate = 44100;
    stream_a->codec->channels = 1;
    stream_a->codec->channel_layout = av_get_default_channel_layout(1);
    stream_a->codec->sample_fmt = codec_aac->sample_fmts[0];
    stream_a->codec->bit_rate = 32000;
    stream_a->codec->time_base.num = 1;
    stream_a->codec->time_base.den = stream_a->codec->sample_rate;
    stream_a->codec->codec_tag = 0;

    if (_fmt_ctx_out->oformat->flags & AVFMT_GLOBALHEADER)
        stream_a->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (avcodec_open2(stream_a->codec, stream_a->codec->codec, NULL) < 0)
    {
     
        printf("Mixer: failed to call avcodec_open2\n");
        return -1; 
    }
 

    unsigned char* outbuffer=(unsigned char*)av_malloc(4608);
	AVIOContext *avio_out =avio_alloc_context(outbuffer,4608,1,fp_open,NULL,CAmixFilter::write_buffer,NULL);  
 
	_fmt_ctx_out->pb=avio_out; 
	_fmt_ctx_out->flags=AVFMT_FLAG_CUSTOM_IO;
     //write_buffer里发送编码后的wav数据到海思接口
 /*   if (!(_fmt_ctx_out->oformat->flags & AVFMT_NOFILE))
    {
     
        printf("avio_open AVFMT_NOFILE\n");
        if (avio_open(&_fmt_ctx_out->pb, NULL, AVIO_FLAG_WRITE) < 0)
        {
     
            printf("Mixer: failed to call avio_open\n");
            return -1;
        }
    }*/

    printf("avio_open1 \n");
    if (avformat_write_header(_fmt_ctx_out, NULL) < 0)
    {
     
        printf("Mixer: failed to call avformat_write_header\n");
        return -1;
    }

    bool b = (!_fmt_ctx_out->streams[0]->time_base.num && _fmt_ctx_out->streams[0]->codec->time_base.num);

    av_dump_format(_fmt_ctx_out, _index_a_out, NULL, 1);
  1. 构建write_buffer 发送函数
    static int write_buffer(void *opaque, uint8_t *buf, int buf_size){
     
        HI_S32 s32Ret;
        AUDIO_STREAM_S stAudioStream;    

/*        FILE* fpopen = (FILE*)opaque;
        if(!feof(fpopen)){
       
            int true_size=fwrite(buf,1,buf_size,fpopen); 
            return true_size;
        }else{
     
            return -1;
        }*/
        if(buf_size > 0)
        {
             
            stAudioStream.pStream = buf;
            stAudioStream.u32Len = buf_size;
            //发送到海思ADEC进行播放adec->ao
            s32Ret = HI_MPI_ADEC_SendStream(0, &stAudioStream, HI_TRUE);
            if(HI_SUCCESS != s32Ret)
            {
     
                printf("%s: HI_MPI_ADEC_SendStream(%d) failed with %#x!\n",\
                    __FUNCTION__, 0, s32Ret);
                return 0;
            }
            return buf_size;
        }        
    }  

你可能感兴趣的:(ffmpeg内存读取音频数据转码后发送到海思AI-」AO)