使用ffmpeg API 将yuv文件多路合并为一路

使用ffmpeg 的api, 创建滤镜图,输入多路yuv数据,利用滤镜图将多路输入的yuv合成为一路,四宫格。

ffmpeg源码中有多个参考demo, docs/examples/ 可以参考,比如滤镜的 filtering_video.c (不过这个demo创建的滤镜只有一路输入,并且是从视频文件解码出来的yuv,并不是直接输入yuv 源数据。这个demo可以用于参考滤镜图的创建,滤镜的输入和输出) encode_video.c (这个demo可以是从输入原始的yuv文件,不过是用于编码,但是可以参考其中关于直接使用AVFrame输入yuv的使用方法)

先发个最终demo结果:
使用ffmpeg API 将yuv文件多路合并为一路_第1张图片

要点:

1.0 创建多路输入的滤镜图,多个buffer filter处理
2.0 滤镜图的描述字符串
3.0 使用AVFrame,输入自己的yuv数据
需要注意ffmpeg的版本,在有些低版本库中,可能没有 buffer 滤镜,比如3.3.8 版本就没有。

1.0 多路输入滤镜buffer 的创建。调用avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx) 创建buffersrc的context时,需要一个args参数,也就是滤镜buffer的参数。可以查看:
#ffmpeg -h filter=buffer

Filter buffer
  Buffer video frames, and make them accessible to the filterchain.
    Inputs:
        none (source filter)
    Outputs:
       #0: default (video)
buffer AVOptions:
  width                     ..FV..... (from 0 to INT_MAX) (default 0)
  video_size         ..FV.....
  height                    ..FV..... (from 0 to INT_MAX) (default 0)
  pix_fmt               ..FV..... (default none)
  sar                  ..FV..... sample aspect ratio (from 0 to DBL_MAX) (default 0/1)
  pixel_aspect         ..FV..... sample aspect ratio (from 0 to DBL_MAX) (default 0/1)
  time_base            ..FV..... (from 0 to DBL_MAX) (default 0/1)
  frame_rate           ..FV..... (from 0 to DBL_MAX) (default 0/1)
  sws_param              ..FV.....

 当前设置的:
video_size=640x480:pix_fmt=AV_PIX_FMT_YUV420P:time_base=1/25;
发现如果这个time_base分母设置大于25的话比如30,60   合成的视频会丢帧。这个buffer还有一个frame_rate的参数,这里没有指定,可能和这个有关系,毕竟大多数情况下默认帧率25.

2.0 滤镜图的描述字符串
这个是最有意思的地方,后续如果要变换滤镜效果,只需要改一改这个字符串就行,改动非常简单方便。至于具体的滤镜图怎么描述,可以使用ffmpeg命令行进行调试,有需要可以对各路输入进行特效处理。
比如在刘歧书《ffmpeg从入门到精通》page:192,第6.4章节 FFmpeg 视频多宫格处理中介绍的命令行:
使用ffmpeg API 将yuv文件多路合并为一路_第2张图片

 这里就是基于此改过来的,我们输入的直接是640x480固定尺寸的数据,所以不需要再进行缩放,直接使用overlay叠加

//2x2 四宫格,注意坐标
// [in0] [in1] [in2] [in3] 名称和代码中创建的buffersrc 的output输出管脚对应!!!!
//[base] [tmp1] [tmp2] [tmp3] 只是中间过渡的标签名称
//最后的[out] 可以不写,默认就是该名称
const char *filter_descr = 
"nullsrc=size=1280x960 [base];"  //nullsrc也是一个滤镜,提供了一个1280x960的空“画布”,将其输出取名[base]
"[base][in0]overlay=0:0[tmp1];" //将[in0](也就是我们在代码中创建的输入buffer in0) 和[base] 作用于overlay滤镜,overlay滤镜把 [in0] 叠加到[base] 上面,坐标是0:0, 将这个输出取名[tmp1] 后面以此类推  
"[tmp1][in1]overlay=640:0[tmp2];"
"[tmp2][in2]overlay=0:480[tmp3];"
"[tmp3][in3]overlay=640:480[out]";

3.0  使用AVFrame,输入自己的yuv数据

        //1.先申请frame ,注意这个只是alloc 该结构体自身的内存,并没有申请存储视频数据的内存,比较到现在还不知道视频需要多大空间
        frame[i] = av_frame_alloc();
        frame[i]->format = AV_PIX_FMT_YUV420P;
        frame[i]->width  = w;
        frame[i]->height = h;
        //第一帧 pts设置为0 ,会出现多输出一帧,并且第一帧绿屏
        //第一帧 pts设置为 1, 会出现多输出两帧,并且前两帧绿屏。。。。不明觉厉
        //应该是从 -1开始。OK
        frame[i]->pts = -1; 

        //2.为AVFrame 分配内存,这个接口才是真正分配存储视频数据的内存空间,上面已经设置好了视频格式和宽高等等,可以知道视频数据所需要的空间大小。(或者,也可以自己申请空间)
        // 这里面的  字节对齐 ,1。
        int ret = av_frame_get_buffer(frame[i], 1);


      //3.最后,就可以使用这个AVFrame, 往里面填数据了
            memcpy(frame[i]->data[0],buff,w*h);
            memcpy(frame[i]->data[1],buff+w*h,w*h/4);
            memcpy(frame[i]->data[2],buff+w*h*5/4,w*h/4);
             //pts 也不能不变化,不然一直不输出。。。。。。
            frame[i]->pts ++;

上代码:

在Ubuntu18.04 中,先编译了ffmpeg, 得到其功能库。
编译命令:(有些时候回出现连接错误,提示找不到ffmpeg 函数相关的符号,需要检查一下 -l 连接库的前后顺序问题,越基础的库越往后靠)

all:
	gcc -I../../ffmpeg_src/0714_install/include/ -L../../ffmpeg_src/0714_install/lib/ hebing.c -g  -lavfilter -lswresample -lswscale  -lavformat -lavcodec -lavutil -lpostproc -lm -lpthread -lx264 

#../../ffmpeg_src/0714_install 是ffmpeg编译安装目录
//canok 20210721
/*
*滤镜图,多路输入拼合
*当前输入视频尺寸固定的 640x480
* 参考ffmpeg 源代码中的demo:filtering_video.c  encode_video.c
*/
#include 
#include 
#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 

//2x2 四宫格,注意坐标
// [in0] [in1] [in2] [in3] 名称和代码中创建的buffersrc 的output输出管脚对应!!!!
//[base] [tmp1] [tmp2] [tmp3] 只是中间过渡的标签名称
//最后的[out] 可以不写,默认就是该名称
const char *filter_descr = 
"nullsrc=size=1280x960 [base];"
"[base][in0]overlay=0:0[tmp1];"
"[tmp1][in1]overlay=640:0[tmp2];"
"[tmp2][in2]overlay=0:480[tmp3];"
"[tmp3][in3]overlay=640:480[out]";


typedef struct FilteringContext {
    AVFilterContext *buffersink_ctx;
    AVFilterContext **buffersrc_ctx;
    AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext filter_ctx;

//解码mp4文件使用
static AVCodecContext *dec_ctx;
static AVFormatContext *fmt_ctx;
static int video_stream_index = -1;

int64_t getNowUs(){
    struct timeval tv;
    gettimeofday(&tv, 0);
    return (int64_t)tv.tv_sec * 1000000 + (int64_t)tv.tv_usec;
}


static int open_input_file(const char *filename)
{
    int ret;
    AVCodec *dec;

    if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
        return ret;
    }

    if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
        return ret;
    }

    /* select the video stream */
    ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
        return ret;
    }
    video_stream_index = ret;

    /* create decoding context */
    dec_ctx = avcodec_alloc_context3(dec);
    if (!dec_ctx)
        return AVERROR(ENOMEM);
    avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);

    /* init the video decoder */
    if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
        return ret;
    }

    return 0;
}

static int init_filter(int bDecode,FilteringContext* fctx, int inW, int inH)
{
    char args[512];
    char pad_name[10];
    int ret = 0;
    int i;
    AVFilter **buffersrc = (AVFilter**)av_malloc(4*sizeof(AVFilter*));
    AVFilter *buffersink = NULL;
    AVFilterContext **buffersrc_ctx = (AVFilterContext**)av_malloc(4*sizeof(AVFilterContext*));
    AVFilterContext *buffersink_ctx = NULL;

    //buffersrc的输出引脚
    AVFilterInOut **outputs = (AVFilterInOut**)av_malloc(4*sizeof(AVFilterInOut*));

    //buffersink 的输入引脚
    AVFilterInOut *inputs = avfilter_inout_alloc();

    AVFilterGraph *filter_graph = avfilter_graph_alloc();
    for (i = 0; i < 4; i++)
    {
        buffersrc[i] = NULL;
        buffersrc_ctx[i] = NULL;
        outputs[i] = avfilter_inout_alloc();
    }
    if (!outputs || !inputs || !filter_graph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

        for (i = 0; i < 4; i++)
        {//创建了4个buffer,作为输入
            buffersrc[i] = avfilter_get_by_name("buffer");
        }
        //创建一个buffersink
        buffersink = avfilter_get_by_name("buffersink");
        if (!buffersrc || !buffersink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        for (i = 0; i < 4; i++)
        {
            printf("bDecode %d %d \n",bDecode,!bDecode);
            if(!bDecode){
                    snprintf(args, sizeof(args),
                    "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                    inW, inH, AV_PIX_FMT_YUV420P,1,25,1,1);
                    //这个 time_base ,设置为1 / >25, 会丢帧。。。。不明觉厉
            }
            else
            {
                    AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
                    snprintf(args, sizeof(args),
                "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
                time_base.num, time_base.den,
                dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
             }
            printf("[%s%d] args:%s\n",__FUNCTION__,__LINE__,args);
            snprintf(pad_name, sizeof(pad_name), "in%d", i);
            //创建in  
            ret = avfilter_graph_create_filter(&(buffersrc_ctx[i]), buffersrc[i], pad_name,
                args, NULL, filter_graph);
            if (ret < 0) {
                printf("[%s%d] create source erro i:%d,args:%s \n",__FUNCTION__,__LINE__,i,args);
                av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
                goto end;
            }
        }

        //创建 out
        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
            NULL, NULL, filter_graph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
            goto end;
        }
   
        enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
            ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
                              AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
            goto end;
        }

   
    /* Endpoints for the filter graph. */
    for (i = 0; i < 4; i++)
    {//创建了4个 src, 
    //src 的输出引脚,连接到 滤镜图 里面的 in%d 标签。 一定和滤镜图描述里面的 [in0] [in1] [in2] [in3] 名称对应
        snprintf(pad_name, sizeof(pad_name), "in%d", i);
        outputs[i]->name = av_strdup(pad_name);
        outputs[i]->filter_ctx = buffersrc_ctx[i];
        outputs[i]->pad_idx = 0;
        //不清楚为 next的关系...
        if (i == 4 - 1)
            outputs[i]->next = NULL;
        else
            outputs[i]->next = outputs[i + 1];
    }

    //sink的输入引脚,连接到 滤镜图 里面的 out标签
    inputs->name = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx = 0;
    inputs->next = NULL;
    if (!outputs[0]->name || !inputs->name) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    //解析滤镜图
    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
        &inputs, outputs, NULL)) < 0)
        goto end;

     //配置连接滤镜图
    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
        goto end;
    /* Fill FilteringContext */
    fctx->buffersrc_ctx = buffersrc_ctx;
    fctx->buffersink_ctx = buffersink_ctx;
    fctx->filter_graph = filter_graph;
end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(outputs);
    return ret;
}

#define FROME_YUVFILE 1
int main(){
    int w=640,h=480;

    FILE *fp_out = fopen("1280x960_yuv420p.yuv","w+");
    if(fp_out == NULL){
        printf("open output file err\n");
        return -1;
    }

    #if FROME_YUVFILE
    //从yuv文件中读帧数据, 如果是别的解码器,在这里也可以喂数据
    if(init_filter(0,&filter_ctx, w, h) !=0){
        printf("init_filter err!\n");
        return -1;
    }

    FILE *fp_in = fopen("640x480_yuv402p.yuv","r");
    if(fp_in == NULL){
        printf("open input file err \n");
        return -1;
    }

    int framelen = w*h*3/2;
    unsigned char * buff = malloc(framelen);
    if(buff == NULL){
        printf("malloc erro\n");
        return -1;
    }
//1.0 首先申请frame, 用来放数据。
    AVFrame *frame[4] = {0};
    AVFrame *filt_frame = NULL;
    unsigned char * buf_in[4];
    int i=0;
    for(i=0;i<4;i++){
        frame[i] = av_frame_alloc();
        frame[i]->format = AV_PIX_FMT_YUV420P;
        frame[i]->width  = w;
        frame[i]->height = h;
        //第一帧 pts设置为0 ,会出现多输出一帧,并且第一帧绿屏
        //第一帧 pts设置为 1, 会出现多输出两帧,并且前两帧绿屏。。。。
        //应该是从 -1开始。OK
        frame[i]->pts = -1; 

        //为AVFrame 分配内存,两种方式。
        #if 1
        // 这里面的  字节对齐 ,1。
        int ret = av_frame_get_buffer(frame[i], 1);
        if(ret < 0){
            printf("[%s%d] getbuffer err\n",__FUNCTION__,__LINE__);
            return -1;
        }
        ret = av_frame_make_writable(frame[i]);
        if(ret < 0){
            printf("[%s%d] make_writable  err\n",__FUNCTION__,__LINE__);
            return -1;
        }
        #else
        // 这里面的  字节对齐 ,1
        buf_in[i] = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, w, h, 1));
            //printf("sizeo %d\n",av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
            av_image_fill_arrays(frame[i]->data, frame[i]->linesize, buf_in[i],
                AV_PIX_FMT_YUV420P, w, h, 1);
        //int ret = av_image_alloc(frame[i]->data, frame[i]->linesize, frame[i]->width,frame[i]->height, frame[i]->format, 32);
        //if(ret <0){
       //     printf("[%s%d] erro to alloc image\n",__FUNCTION__,__LINE__);
       // }
        #endif
    }
    filt_frame = av_frame_alloc();
    if (!frame[0] || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }

    
    while(1){
        int ret = fread(buff,1,framelen,fp_in);
        if(ret < framelen){
            break;
        }
//2.0 填充frame数据
        for(i=0;i<4;i++){
            #if 1
            memcpy(frame[i]->data[0],buff,w*h);
            memcpy(frame[i]->data[1],buff+w*h,w*h/4);
            memcpy(frame[i]->data[2],buff+w*h*5/4,w*h/4);
            #else
            memcpy(buf_in[i],buff,framelen);
            frame[i]->data[0] = buf_in[i];
            frame[i]->data[1] = buf_in[i]+w*h;
            frame[i]->data[2] = buf_in[i]+w*h*5/4;
            #endif
             //pts 也不能不变化,不然一直不输出。。。
            frame[i]->pts ++;
            printf("[%s%d]frame[%d] %dx%d %d pts:%ld\n",__FUNCTION__,__LINE__,i,frame[i]->width,frame[i]->height,frame[i]->format,frame[i]->pts);
        }

        int64_t t1 = getNowUs();

//3.0 往4个src中输入数据
        if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[0], frame[0], AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
            break;
        }
        if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[1], frame[1], AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
            break;
        }
        if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[2], frame[2], AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
            break;
        }
        if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[3], frame[3], AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
            break;
        }

//4.0 从滤镜末端取数据
         while (1) {
                    //printf("[%d:%s,%s] can! \n",__LINE__,__FILE__,__FUNCTION__);
                    ret = av_buffersink_get_frame(filter_ctx.buffersink_ctx, filt_frame);
                   // printf("[%d:%s,%s] can! ret:%d  filt_frame: format:%d, %dx%d, pts:%ld \n",__LINE__,__FILE__,__FUNCTION__,ret,filt_frame->format,filt_frame->width,filt_frame->height,filt_frame->pts);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
                        break;
                    }
                    else if (ret < 0){
                        printf("[%s%d] get frame err\n",__FUNCTION__,__LINE__);
                        return -1;
                    }
                    
                    int64_t t2 = getNowUs();
                    //这里可以瞅一瞅到底用了多长时间
                    printf("taketimes %ld %ld,%ld filt_frame--: format:%d, %dx%d, pts:%ld\n",t2-t1,t2,t1,filt_frame->format,filt_frame->width,filt_frame->height,filt_frame->pts);
                    getNowUs();
                    //手动取数据写文件了
                    fwrite(filt_frame->data[0],1,filt_frame->height*filt_frame->linesize[0],fp_out );
                    fwrite(filt_frame->data[1],1,filt_frame->height*filt_frame->width/4,fp_out );
                    fwrite(filt_frame->data[2],1,filt_frame->height*filt_frame->width/4,fp_out );
                    av_frame_unref(filt_frame);
        }
    }

        av_frame_free(&(frame[0]));
        av_frame_free(&(frame[1]));
        av_frame_free(&(frame[2]));
        av_frame_free(&(frame[3]));

        fclose(fp_in);
        free(buff);
#else
//从mp4 文件中取解码出yuv
    open_input_file("640x480_jayuguan.mp4");
    init_filter(1,&filter_ctx, w, h);

    int ret;
    AVPacket packet;
    AVFrame *frame;
    AVFrame *filt_frame;

    frame = av_frame_alloc();
      filt_frame = av_frame_alloc();
      if (!frame || !filt_frame) {
          perror("Could not allocate frame");
          exit(1);
      }
    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
            break;

        if (packet.stream_index == video_stream_index) {
            ret = avcodec_send_packet(dec_ctx, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
                break;
            }

            while (ret >= 0) {
                ret = avcodec_receive_frame(dec_ctx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                    break;
                } else if (ret < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
                    return -1;
                }
                //解码出来是yuv420p图像格式
                printf("[%s%d] xiancan %dx%d,linesize:%d format:%d \n",__FUNCTION__,__LINE__,frame->width,frame->height,frame->linesize[0],frame->format);
                frame->pts = frame->best_effort_timestamp;

                int64_t t1 = getNowUs();
                /* push the decoded frame into the filtergraph */

                if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[0], frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                            av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                            break;
                 }

                if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[1], frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                    break;
                }

                if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[2], frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                    break;
                }

                if (av_buffersrc_add_frame_flags(filter_ctx.buffersrc_ctx[3], frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                    break;
                }

                /* pull filtered frames from the filtergraph */
                while (1) {
                    printf("[%d:%s,%s] can! \n",__LINE__,__FILE__,__FUNCTION__);
                    ret = av_buffersink_get_frame(filter_ctx.buffersink_ctx, filt_frame);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
                        break;
                    }
                    if (ret < 0){
                        return -1;
                    }
                    int64_t t2 = getNowUs();
                    printf("taketimes %ld %ld,%ld\n",t2-t1,t2,t1);
                    getNowUs();

                    fwrite(filt_frame->data[0],1,filt_frame->height*filt_frame->linesize[0],fp_out );
                    fwrite(filt_frame->data[1],1,filt_frame->height*filt_frame->width/4,fp_out );
                    fwrite(filt_frame->data[2],1,filt_frame->height*filt_frame->width/4,fp_out );
                    av_frame_unref(filt_frame);
                }
                av_frame_unref(frame);
            }
        }
        av_packet_unref(&packet);
    }
    avcodec_free_context(&dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(frame);
#endif
   avfilter_graph_free(&(filter_ctx.filter_graph));
   av_frame_free(&filt_frame);

   fclose(fp_out);
}

你可能感兴趣的:(ffmpeg,ffmpeg,c++)