ffmpeg avcodec_encode_video2 函数报错

总结此次出现错误原因是:提供给avcodec_encode_video2函数的frame数据的yuv格式和编码器所能编码的yuv数据格式不符。


项目之前使用ffmpeg完成了将yuv数据保存成视频文件的功能,最近需要添加一项新功能:满足条件时裁剪视频画面大小(不同于压缩画面大小)。网上查找资料之后知道可以用filter的crop功能来实现。因此在源代码基础上添加filter相关代码。完成之后发现之前没有问题的avcodec_encode_video2函数报错。


avcodec_encode_video2函数如下:
avcodec_encode_video2(c, pkt, m_StrmVideo->frame, &got_packet);
源frame颜色空间格式为AV_PIX_FMT_YUYV422,需要转换成AV_PIX_FMT_YUV420P。filter可以替换原来代码中的sws_scale方法进行视频大小和颜色空间变换。
调试后发现frame的格式format是1,即AV_PIX_FMT_YUYV422,另外编码器c codec_id为AV_CODEC_ID_MPEG4,而AV_CODEC_ID_MPEG4只能编码AV_PIX_FMT_YUV420P格式数据,不能编码AV_PIX_FMT_YUYV422格式数据。所以函数崩溃。最后发现是buffersrc_ctx源中错误的指定了pix_fmt=AV_PIX_FMT_YUV420P,而应该指定成pix_fmt=AV_PIX_FMT_YUYV422。


以下是filter相关代码。参考http://www.zhimengzhe.com/linux/249541.html


	AVFilterContext *buffersink_ctx;
	AVFilterContext *buffersrc_ctx;
	AVFilterGraph *filter_graph;

//szFilter是filter命令字符串。具体crop命令可以参考
	//http://ffmpeg.org/ffmpeg-filters.html#crop
	char szFilter[MAX_PATH]; 
	memset(szFilter, 0, MAX_PATH*sizeof(char));
	sprintf_s(szFilter, "crop=w=%d:h=%d:x=%d:y=%d", m_nDesWidth, m_nDesHeight, m_nSrcWidth-m_nDesWidth, m_nSrcHeight-m_nDesHeight);
	InitFilters(szFilter); 
	
	//m_StrmVideo->tmp_frame是初始AVFrame指针,m_StrmVideo->frame是裁剪完成之后的AVFrame指针
	if (av_buffersrc_add_frame_flags(buffersrc_ctx, m_StrmVideo->tmp_frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
		return 0;
	}


	/* pull filtered frames from the filtergraph */
	ret = av_buffersink_get_frame(buffersink_ctx, m_StrmVideo->frame);
	if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
		return 0;
	if (ret < 0)
		return 0;

int CFFMpegEncoder::InitFilters(const char *filters_descr)
{
    char args[512];
    int ret = 0;
    AVFilter *buffersrc  = avfilter_get_by_name("buffer");     /* 输入buffer filter */
    AVFilter *buffersink = avfilter_get_by_name("buffersink"); /* 输出buffer filter */
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    AVRational time_base = m_StrmVideo->st->time_base;

    filter_graph = avfilter_graph_alloc();                     /* 创建graph  */
    if (!outputs || !inputs || !filter_graph) {
        ret = AVERROR(ENOMEM);
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);


		return ret;
    }
    
	//此处为目标frame yuv格式
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };


    /* buffer video source: the decoded frames from the decoder will be inserted here. */
	AVCodecContext* dec_ctx = m_StrmVideo->enc;
	//此处需要指定源frame yuv格式
    snprintf(args, sizeof(args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            m_nSrcWidth, m_nSrcHeight, m_pixFmtSrc,
            time_base.num, time_base.den,
            dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);


    /* 创建并向FilterGraph中添加一个Filter */
    ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                       args, NULL, filter_graph);           
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);


		return ret;
    }


    /* buffer video sink: to terminate the filter chain. */
    ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                       NULL, NULL, filter_graph);          
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);


		return ret;
    }


     /* Set a binary option to an integer list. */
    ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
                              AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);   
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);


		return ret;
    }


    /*
     * Set the endpoints for the filter graph. The filter_graph will
     * be linked to the graph described by filters_descr.
     */


    /*
     * The buffer source output must be connected to the input pad of
     * the first filter described by filters_descr; since the first
     * filter input label is not specified, it is set to "in" by
     * default.
     */
    outputs->name       = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx    = 0;
    outputs->next       = NULL;


    /*
     * The buffer sink input must be connected to the output pad of
     * the last filter described by filters_descr; since the last
     * filter output label is not specified, it is set to "out" by
     * default.
     */
    inputs->name       = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx    = 0;
    inputs->next       = NULL;


    /* Add a graph described by a string to a graph */
    if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
                                    &inputs, &outputs, NULL)) < 0)    
	{    
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);


		return ret;
	}


    /* Check validity and configure all the links and formats in the graph */
    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)   
    {    
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);


		return ret;
	}




    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);


    return ret;
}


你可能感兴趣的:(ffmpeg avcodec_encode_video2 函数报错)