用ffmpeg4.1压缩H264,filter添加OSD

https://blog.csdn.net/dancing_night/article/details/80818444  水印例子

代码是参考雷神的博客的代码,不过由于ffmpeg版本不同,记录使用中遇到的问题。

1.调用avfilter_get_by_name("ffbuffersink")时在新版本的ffmpeg要修改为avfilter_get_by_name("buffersink");否则返回指针为空,调用avfilter_graph_create_filter返回-12,在以前的版本则没有这个问题。
2.调用avfilter_graph_parse_ptr解析字幕水印时返回-22,ffmpeg报错No such filter: 'drawtext',这是由于编译的时候没有开启FreeType字体引擎库,编译的时候./configure--enable-libfreetype。

3.调用avfilter_graph_parse_ptr解析参数时返回-2,drawtext=fontfile=arial.ttf:fontcolor=red:fontsize=30:text='Lei Xiaohua':x=50:y=50里面的arial.ttf是字体文件,要保证字体文件在程序可以找到的目录,冒号在里面是分隔符,路径中不能带冒号。

4.编译完ffmpeg后可能没有libpostproc.so这个库,编译的时候加入开启这个库的编译选项--enable-postproc。

5.代码
 

// test_avfilter.cpp : 定义控制台应用程序的入口点。
//
 
#include "stdafx.h"
 
 
/**
* 最简单的基于FFmpeg的AVFilter例子 - 纯净版
* Simplest FFmpeg AVfilter Example - Pure
*
* 雷霄骅 Lei Xiaohua
* [email protected]
* 中国传媒大学/数字电视技术
* Communication University of China / Digital TV Technology
* http://blog.csdn.net/leixiaohua1020
*
* 本程序使用FFmpeg的AVfilter实现了YUV像素数据的滤镜处理功能。
* 可以给YUV数据添加各种特效功能。
* 是最简单的FFmpeg的AVFilter方面的教程。
* 适合FFmpeg的初学者。
*
* This software uses FFmpeg's AVFilter to process YUV raw data.
* It can add many excellent effect to YUV data.
* It's the simplest example based on FFmpeg's AVFilter.
* Suitable for beginner of FFmpeg
*
*/
#include 
 
#define __STDC_CONSTANT_MACROS
 
#ifdef _WIN32
#define snprintf _snprintf
//Windows
extern "C"
{
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/avutil.h"
#include "libavutil/imgutils.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include 
#include 
#include 
#include 
#include 
#ifdef __cplusplus
};
#endif
#endif
 
 
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"swscale.lib")
#pragma comment(lib,"SDL2.lib")
#pragma comment(lib,"SDL2main.lib")
 
int main(int argc, char* argv[])
{
	getchar();
 
	int ret;
	AVFrame *frame_in;
	AVFrame *frame_out;
	unsigned char *frame_buffer_in;
	unsigned char *frame_buffer_out;
 
	AVFilterContext *buffersink_ctx;
	AVFilterContext *buffersrc_ctx;
	AVFilterGraph *filter_graph;
	static int video_stream_index = -1;
 
	//Input YUV
	FILE *fp_in = fopen("trans.yuv", "rb+");
	if (fp_in == NULL) {
		printf("Error open input file.\n");
		return -1;
	}
	int in_width = 1588;
	int in_height = 900;
 
	//Output YUV
	FILE *fp_out = fopen("output.yuv", "wb+");
	if (fp_out == NULL) {
		printf("Error open output file.\n");
		return -1;
	}
 
	//const char *filter_descr = "lutyuv='u=128:v=128'";
	//const char *filter_descr = "boxblur";
	//const char *filter_descr = "hflip";
	//const char *filter_descr = "hue='h=60:s=-3'";
	//const char *filter_descr = "crop=2/3*in_w:2/3*in_h";
	//const char *filter_descr = "drawbox=x=100:y=100:w=100:h=100:[email protected]";
	const char *filter_descr = "drawtext=fontfile=arial.ttf:fontcolor=red:fontsize=30:text='Lei Xiaohua':x=50:y=50";
 
	avfilter_register_all();
 
	char args[512];
	AVFilter *buffersrc = avfilter_get_by_name("buffer");
	AVFilter *buffersink = avfilter_get_by_name("buffersink");
	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *inputs = avfilter_inout_alloc();
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
	AVBufferSinkParams *buffersink_params;
 
	filter_graph = avfilter_graph_alloc();
 
	/* buffer video source: the decoded frames from the decoder will be inserted here. */
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		in_width, in_height, AV_PIX_FMT_YUV420P,
		1, 25, 1, 1);
 
	ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
		args, NULL, filter_graph);
	if (ret < 0) {
		printf("Cannot create buffer source\n");
		return ret;
	}
 
	/* buffer video sink: to terminate the filter chain. */
	buffersink_params = av_buffersink_params_alloc();
	buffersink_params->pixel_fmts = pix_fmts;
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		NULL, buffersink_params, filter_graph);
	av_free(buffersink_params);
	if (ret < 0) {
		printf("Cannot create buffer sink\n");
		return ret;
	}
 
	/* Endpoints for the filter graph. */
	outputs->name = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx = 0;
	outputs->next = NULL;
 
	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = NULL;
 
	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
		&inputs, &outputs, NULL)) < 0)
		return ret;
 
	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
		return ret;
 
	frame_in = av_frame_alloc();
	frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
	av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);
 
	frame_out = av_frame_alloc();
	frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
	av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);
 
	frame_in->width = in_width;
	frame_in->height = in_height;
	frame_in->format = AV_PIX_FMT_YUV420P;
 
	while (1) {
 
		if (fread(frame_buffer_in, 1, in_width*in_height * 3 / 2, fp_in) != in_width*in_height * 3 / 2) {
			break;
		}
		//input Y,U,V
		frame_in->data[0] = frame_buffer_in;
		frame_in->data[1] = frame_buffer_in + in_width*in_height;
		frame_in->data[2] = frame_buffer_in + in_width*in_height * 5 / 4;
 
		if (av_buffersrc_add_frame(buffersrc_ctx, frame_in) < 0) {
			printf("Error while add frame.\n");
			break;
		}
 
		/* pull filtered pictures from the filtergraph */
		ret = av_buffersink_get_frame(buffersink_ctx, frame_out);
		if (ret < 0)
			break;
 
		//output Y,U,V
		if (frame_out->format == AV_PIX_FMT_YUV420P) {
			for (int i = 0; iheight; i++) {
				fwrite(frame_out->data[0] + frame_out->linesize[0] * i, 1, frame_out->width, fp_out);
			}
			for (int i = 0; iheight / 2; i++) {
				fwrite(frame_out->data[1] + frame_out->linesize[1] * i, 1, frame_out->width / 2, fp_out);
			}
			for (int i = 0; iheight / 2; i++) {
				fwrite(frame_out->data[2] + frame_out->linesize[2] * i, 1, frame_out->width / 2, fp_out);
			}
		}
		printf("Process 1 frame!\n");
		av_frame_unref(frame_out);
	}
 
	fclose(fp_in);
	fclose(fp_out);
 
	av_frame_free(&frame_in);
	av_frame_free(&frame_out);
	avfilter_graph_free(&filter_graph);
 
	return 0;
}
 
 
 

H264压缩中添加osd的应用,vs2012+ffmpeg4.1

// test.cpp : 定义控制台应用程序的入口点。
//

#include "stdafx.h"
#define __STDC_CONSTANT_MACROS
extern "C"  
{  
	#include 
	#include "libavfilter/buffersink.h"
    #include "libavfilter/buffersrc.h"
	#include   
	#include 
    #include   
    #include   
    #include   
	#include "libavutil/avutil.h"
    #include   
}
#pragma comment(lib, "avcodec.lib")  
#pragma comment(lib, "avformat.lib")  
#pragma comment(lib, "avdevice.lib")  
#pragma comment(lib, "avfilter.lib")  
#pragma comment(lib, "avutil.lib")  
#pragma comment(lib, "postproc.lib")  
#pragma comment(lib, "swresample.lib")  
#pragma comment(lib, "swscale.lib")  



static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *outfile)
{
    int ret;

    /* send the frame to the encoder */
    if (frame)
        printf("Send frame %l\n", frame->pts);

    ret = avcodec_send_frame(enc_ctx, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending a frame for encoding\n");
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_packet(enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during encoding\n");
            exit(1);
        }

        printf("Write packet %l  (size=%5d)\n", pkt->pts, pkt->size);
        fwrite(pkt->data, 1, pkt->size, outfile);
        av_packet_unref(pkt);
    }
}
uint8_t  yuv_buff[1024*1024*500];
#define snprintf _snprintf
int _tmain(int argc, char* argv[])
{


	const char *filename, *codec_name;
    const AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y;
    FILE *f;
    AVFrame *frame;
    AVPacket *pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

	filename = "test.264";
   // codec_name = argv[2];
	codec =avcodec_find_encoder(AV_CODEC_ID_H264);;// avcodec_find_encoder_by_name(codec_name);
    if (!codec) {
        fprintf(stderr, "Codec '%s' not found\n", codec_name);
        exit(1);
    }
	 c = avcodec_alloc_context3(codec);
    if (!c) 
	{
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }
	
    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);
	  /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 720;
    c->height = 576;
    /* frames per second */
   // c->time_base = (AVRational){1, 25};
	c->time_base.num = 1;c->time_base.den = 25;
   // c->framerate = (AVRational){25, 1};
	c->framerate.num = 25;c->framerate.den = 1;
	    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if (codec->id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);
	 ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) 
	{
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }
	  f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }
	frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    ret = av_frame_get_buffer(frame, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }
	//
	FILE *fp;
	fp = fopen("d:\\temp\\VIDEO720576.yuv","rb+");
	int len = fread(yuv_buff,1,frame->width*  frame->height*550,fp);

	unsigned char *pbuffer;
	clock_t start_time, finish_time;
	float duration;
     
	 int frmcnt=0;
	///OSD/start
	 AVFrame *frame_in;
	AVFrame *frame_out;
	unsigned char *frame_buffer_in;
	unsigned char *frame_buffer_out;
 
	AVFilterContext *buffersink_ctx;
	AVFilterContext *buffersrc_ctx;
	AVFilterGraph *filter_graph;
	static int video_stream_index = -1;
	 const char *filter_descr = "drawtext=fontfile=arial.ttf:fontcolor=red:fontsize=30:text='12345678901234567890123456790':x=50:y=50";
     int in_width = 720;
	int in_height = 576;
//	avfilter_register_all();
	char args[512];
	AVFilter *buffersrc = (AVFilter *)avfilter_get_by_name("buffer");
	AVFilter *buffersink = (AVFilter *)avfilter_get_by_name("buffersink");
	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *inputs = avfilter_inout_alloc();
	enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
	AVBufferSinkParams *buffersink_params;

	filter_graph = avfilter_graph_alloc();
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		in_width, in_height, AV_PIX_FMT_YUV420P,
		1, 25, 1, 1);//#define snprintf _snprintf

	ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
		args, NULL, filter_graph);
	if (ret < 0) 
	{
		printf("Cannot create buffer source\n");
		return ret;
	}
	buffersink_params = av_buffersink_params_alloc();
	buffersink_params->pixel_fmts = pix_fmts;
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		NULL, buffersink_params, filter_graph);
	av_free(buffersink_params);
	if (ret < 0) 
	{
		printf("Cannot create buffer sink\n");
		return ret;
	}
    outputs->name = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx = 0;
	outputs->next = NULL;
 
	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = NULL;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
	&inputs, &outputs, NULL)) < 0)
	return ret;

	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
		return ret;

	frame_in = av_frame_alloc();
	frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
	av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);

		frame_out = av_frame_alloc();
	frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
	av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
		AV_PIX_FMT_YUV420P, in_width, in_height, 1);

	frame_in->width = in_width;
	frame_in->height = in_height;
	frame_in->format = AV_PIX_FMT_YUV420P;

	/end
	 start_time = clock();
	 while(1)
	 {
        fflush(stdout);
		
        /* make sure the frame data is writable */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);

        /* prepare a dummy image */
        /* Y */
		pbuffer = yuv_buff+(frame->width*  frame->height*3/2)*frmcnt;
		//OSD/start
		frame_in->data[0] = pbuffer;
		frame_in->data[1] = pbuffer + in_width*in_height;
		frame_in->data[2] = pbuffer + in_width*in_height * 5 / 4;

		if (av_buffersrc_add_frame(buffersrc_ctx, frame_in) < 0) 
		{
			printf("Error while add frame.\n");
			break;
		}
		/* pull filtered pictures from the filtergraph */
		ret = av_buffersink_get_frame(buffersink_ctx, frame_out);
		if (ret < 0)
			break;
		
		/end
        for (y = 0; y < c->height; y++) 
		{
           memcpy(frame->data[0]+y * frame->linesize[0],frame_out->data[0] + frame_out->linesize[0] * y,frame->width);
        }

        for (y = 0; y < c->height/2; y++) 
		{
             memcpy(frame->data[1]+y * frame->linesize[1],frame_out->data[1] + frame_out->linesize[1] * y,frame->width/2);
			 memcpy(frame->data[2]+y * frame->linesize[1],frame_out->data[2] + frame_out->linesize[2] * y,frame->width/2);
        }

		 frame->pts = frmcnt;

        /* encode the image */
        encode(c, frame, pkt, f);
		if(frmcnt++==550)break;
    }

	  encode(c, NULL, pkt, f);
	  finish_time = clock();
	duration = (double)(finish_time - start_time) / CLK_TCK;
	//CString str;
	printf( "压缩时间: %2.1f 秒(%d)\n",duration,frmcnt );
    /* add sequence end code to have a real MPEG file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_free_context(&c);
    av_frame_free(&frame);
    av_packet_free(&pkt);
	

	getchar();
	return 0;
}

 

你可能感兴趣的:(用ffmpeg4.1压缩H264,filter添加OSD)