FFmpeg进阶: 截取视频生成gif动图

文章目录

    • 1.封装视频滤镜
    • 2.截取视频生成gif
    • 3.gif优化
    • 4.示例效果

现在互联网上很多人都通过表情包来表达自己的情绪,常用的表情包很多都是视频文件的一部分。这里就介绍一下如何通过ffmpeg截取视频生成gif动图。其实原理很简单,首先我们seek到视频对应的位置,然后读取数据帧修改帧的数据格式并输出到gif文件当中,读取完毕之后我们就得到了一个视频动图。具体的操作步骤如下:

1.封装视频滤镜

首先封装一下视频滤镜,方便对数据帧进行变换处理

//video_filter.h
#ifndef VIDEOBOX_VIDEO_FILTER_H
#define VIDEOBOX_VIDEO_FILTER_H

extern "C" {
#include 
#include 
#include 
#include 
#include 
#include 
}

//滤镜输入和输出对应的配置
struct VideoConfig {
    AVPixelFormat format;
    int width;
    int height;
    AVRational timebase{1, 30};
    AVRational pixel_aspect{1, 1};

    VideoConfig(AVPixelFormat format, int width, int height, AVRational timebase = {1, 30},
                AVRational pixel_aspect = {1, 1}) {
        this->format = format; //像素格式
        this->width = width;   //视频宽
        this->height = height; //视频高
    }
};

class VideoFilter {
protected:
    AVFilterContext *buffersink_ctx;
    AVFilterContext *buffersrc_ctx;
	AVFilterContext *buffersrc_ctx1;
    AVFilterGraph *filter_graph;
    const char *description = nullptr;
public:

    VideoFilter() = default;

	//构建对应的滤镜
    int create(const char *filter_descr, VideoConfig *inConfig, VideoConfig *outConfig);
	int create(const char *filter_descr, VideoConfig *inConfig1, VideoConfig *inConfig2, VideoConfig *outConfig);

	//获取滤镜的输入和输出
    int filter(AVFrame *source, AVFrame *dest);
	int filter(AVFrame* source1, AVFrame* source2, AVFrame*dest);
	

	//添加输入1
	int addInput1(AVFrame * input);

	//添加输入2
	int addInput2(AVFrame* input);

	//获取处理之后的结果
	int getFrame(AVFrame* result);

    void dumpGraph();

    void destroy();
};

#endif 

//video_filter.cpp
#include "video_filter.h"

int VideoFilter::create(const char *filter_descr, VideoConfig *inConfig, VideoConfig *outConfig) {
    this->description = filter_descr;
    char args[512];
    int ret = 0;
    const AVFilter *buffersrc = avfilter_get_by_name("buffer");
    const AVFilter *buffersink = avfilter_get_by_name("buffersink");
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs = avfilter_inout_alloc();
    enum AVPixelFormat pix_fmts[] = {outConfig->format, AV_PIX_FMT_NONE};

    filter_graph = avfilter_graph_alloc();
    if (!outputs || !inputs || !filter_graph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    snprintf(args, sizeof(args),
             "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
             inConfig->width, inConfig->height, inConfig->format,
             inConfig->timebase.num, inConfig->timebase.den,
             inConfig->pixel_aspect.num, inConfig->pixel_aspect.num);
    ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                       args, nullptr, filter_graph);
    if (ret < 0) {
        goto end;
    }

    ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                       nullptr, nullptr, filter_graph);
    if (ret < 0) {
        goto end;
    }

    ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
                              AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        goto end;
    }

    outputs->name = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx = 0;
    outputs->next = nullptr;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx = 0;
    inputs->next = nullptr;

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
                                        &inputs, &outputs, nullptr)) < 0) {
        goto end;
    }

    if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) {
        goto end;
    }

    end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return ret;
}


int VideoFilter::create(const char *filter_descr, VideoConfig *inConfig1, VideoConfig *inConfig2, VideoConfig *outConfig) {
	this->description = filter_descr;
	char args1[512];
	char args2[512];
	int ret = 0;
	const AVFilter *buffersrc1 = avfilter_get_by_name("buffer");
	const AVFilter *buffersrc2 = avfilter_get_by_name("buffer");
	const AVFilter *buffersink = avfilter_get_by_name("buffersink");
	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *inputs = avfilter_inout_alloc();
	AVFilterInOut *full_output = avfilter_inout_alloc();
	enum AVPixelFormat pix_fmts[] = { outConfig->format, AV_PIX_FMT_NONE };

	filter_graph = avfilter_graph_alloc();
	if (!outputs || !inputs || !filter_graph || !full_output) {
		ret = AVERROR(ENOMEM);
		goto end;
	}

	snprintf(args1, sizeof(args1),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		inConfig1->width, inConfig1->height, inConfig1->format,
		inConfig1->timebase.num, inConfig1->timebase.den,
		inConfig1->pixel_aspect.num, inConfig1->pixel_aspect.num);


	snprintf(args2, sizeof(args2),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		inConfig2->width, inConfig2->height, inConfig2->format,
		inConfig2->timebase.num, inConfig2->timebase.den,
		inConfig2->pixel_aspect.num, inConfig2->pixel_aspect.num);

	ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc1, "in",
		args1, nullptr, filter_graph);
	if (ret < 0) {
		goto end;
	}
	ret = avfilter_graph_create_filter(&buffersrc_ctx1, buffersrc2, "in1",
		args2, nullptr, filter_graph);
	if (ret < 0) {
		goto end;
	}


	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		nullptr, nullptr, filter_graph);
	if (ret < 0) {
		goto end;
	}

	ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
		AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
		goto end;
	}

	outputs->name = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx = 0;
	outputs->next = full_output;

	full_output->name = av_strdup("in1");
	full_output->pad_idx = 0;
	full_output->filter_ctx = buffersrc_ctx1;
	full_output->next = NULL;

	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = nullptr;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
		&inputs, &outputs, nullptr)) < 0) {
		goto end;
	}

	if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) {
		goto end;
	}

end:
	avfilter_inout_free(&inputs);
	avfilter_inout_free(&outputs);

	return ret;
}

int VideoFilter::filter(AVFrame *source, AVFrame *dest) {

    int ret = av_buffersrc_add_frame_flags(buffersrc_ctx, source, AV_BUFFERSRC_FLAG_KEEP_REF);
    if (ret < 0) {
        return -1;
    }
    ret = av_buffersink_get_frame(buffersink_ctx, dest);
    if (ret < 0) {
        return -1;
    }
    return 0;
}

int VideoFilter::filter(AVFrame * source1, AVFrame * source2, AVFrame * dest)
{
	int ret = av_buffersrc_add_frame_flags(buffersrc_ctx, source1, AV_BUFFERSRC_FLAG_KEEP_REF);
	if (ret < 0) {
		return -1;
	}

	ret = av_buffersrc_add_frame_flags(buffersrc_ctx1, source2, AV_BUFFERSRC_FLAG_KEEP_REF);
	if (ret < 0) {
		return -1;
	}

	ret = av_buffersink_get_frame(buffersink_ctx, dest);
	if (ret < 0) {
		return -1;
	}
	return 0;
}

int VideoFilter::addInput1(AVFrame * input)
{
	return av_buffersrc_add_frame_flags(buffersrc_ctx, input, AV_BUFFERSRC_FLAG_KEEP_REF);
}

int VideoFilter::addInput2(AVFrame * input)
{
	return av_buffersrc_add_frame_flags(buffersrc_ctx1, input, AV_BUFFERSRC_FLAG_KEEP_REF);
}

int VideoFilter::getFrame(AVFrame * result)
{
	return av_buffersink_get_frame(buffersink_ctx, result);
}

void VideoFilter::dumpGraph() {
   printf("%s:%s", description, avfilter_graph_dump(filter_graph, nullptr));
}

void VideoFilter::destroy() {
    if (filter_graph)
        avfilter_graph_free(&filter_graph);
}

2.截取视频生成gif

封装完毕视频滤镜之后,我们就可以读取视频的数据帧,修改数据结构输出gif动图了,对应的实现如下:


#pragma execution_character_set("utf-8")
#define _CRT_SECURE_NO_WARNINGS

#include 
#include 
#include 
#include 
#include 
#include 

extern "C"
{
#include "libavutil/opt.h"
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
}
#include "video_filter.h"

//@1输入视频文件的地址
//@2输出gif文件的地址
//@3起始时间(s)
//@4结束时间(s)
int convert_video_to_gif(const char *output_filename, const char *input_filename, float from, float to) 
{
	
	if (from < 0 || from >= to) {
		return -1;
	}

	AVFormatContext *inFmtCtx = nullptr;
	AVFormatContext *outFmtCtx = nullptr;
	AVCodecContext *videoCodecCtx = nullptr;
	AVCodecContext *gifCodecCtx = nullptr;

	int ret = 0;

	//打开输入文件
	ret = avformat_open_input(&inFmtCtx, input_filename, nullptr, nullptr);
	ret = avformat_find_stream_info(inFmtCtx, nullptr);
	
	ret = avformat_alloc_output_context2(&outFmtCtx, nullptr, nullptr, output_filename);

	int video_idx = 0;

	for (int i = 0; i < inFmtCtx->nb_streams; ++i) 
	{
		if (inFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) 
		{
			video_idx = i;
			AVStream *inVideoStream = inFmtCtx->streams[i];
			AVStream *outVideoStream = avformat_new_stream(outFmtCtx, nullptr);
	
			//创建输出流和对应的编码器
			av_dict_copy(&outVideoStream->metadata, inVideoStream->metadata, 0);
			const AVCodec *inCodec = avcodec_find_decoder(inVideoStream->codecpar->codec_id);
			videoCodecCtx = avcodec_alloc_context3(inCodec);
			ret = avcodec_parameters_to_context(videoCodecCtx, inVideoStream->codecpar);
			ret = avcodec_open2(videoCodecCtx, inCodec, nullptr);
			
			const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_GIF);
			gifCodecCtx = avcodec_alloc_context3(codec);
			gifCodecCtx->codec_id = AV_CODEC_ID_GIF;
			gifCodecCtx->time_base = { 1, 30 };
			gifCodecCtx->bit_rate = 100000;
			gifCodecCtx->pix_fmt = AV_PIX_FMT_RGB8;
			gifCodecCtx->width = 600;
			gifCodecCtx->height = 400;
			ret = avcodec_open2(gifCodecCtx, codec, nullptr);
			ret = avcodec_parameters_from_context(outVideoStream->codecpar, gifCodecCtx);
		}
	}
	av_dict_copy(&outFmtCtx->metadata, inFmtCtx->metadata, 0);

	if (!(outFmtCtx->flags & AVFMT_NOFILE)) {
		ret = avio_open(&outFmtCtx->pb, output_filename, AVIO_FLAG_WRITE);
	}

	//写gif文件头
	ret = avformat_write_header(outFmtCtx, nullptr);

	int64_t first_pts = av_rescale_q_rnd((int64_t)from, AVRational{ 1,1 },
		inFmtCtx->streams[video_idx]->time_base,
		AV_ROUND_DOWN);
	av_seek_frame(inFmtCtx, video_idx, first_pts, AVSEEK_FLAG_BACKWARD);


	//通过滤镜缩放视频,修改图片像素格式
	VideoFilter *filter = nullptr;
	filter = new VideoFilter();
	char filter_descr[128];
	snprintf(filter_descr, sizeof(filter_descr), "scale=600:400,format=pix_fmts=%s",
		av_get_pix_fmt_name(gifCodecCtx->pix_fmt));
	VideoConfig in(videoCodecCtx->pix_fmt, videoCodecCtx->width, videoCodecCtx->height);
	VideoConfig out(gifCodecCtx->pix_fmt, gifCodecCtx->width, gifCodecCtx->height);
	filter->create(filter_descr, &in, &out);
	

	int gif_pts = 0;
	int index = 0;
	while (true) {
		AVPacket packet{ 0 };
		av_init_packet(&packet);
		ret = av_read_frame(inFmtCtx, &packet);
		if (ret < 0) 
		{
			break;
		}
		if (av_compare_ts(packet.pts, inFmtCtx->streams[packet.stream_index]->time_base,
			(int64_t)(to * 10), AVRational{ 1, 10 }) >= 0) {
			break;
		}

		//读取数据帧并进行输出
		if (packet.stream_index == video_idx) 
		{
			AVFrame *frame = av_frame_alloc();
			ret = avcodec_send_packet(videoCodecCtx, &packet);
			if (ret < 0) {
				continue;
			}
			ret = avcodec_receive_frame(videoCodecCtx, frame);
			if (ret < 0) {
				continue;
			}

			filter->filter(frame, frame);
			frame->pts = gif_pts++;

			ret = avcodec_send_frame(gifCodecCtx, frame);
			if (ret < 0) {
				continue;
			}
			av_frame_free(&frame);

			AVPacket gifPkt{ 0 };
			av_init_packet(&gifPkt);
			ret = avcodec_receive_packet(gifCodecCtx, &gifPkt);
			if (ret < 0)
			{
				continue;
			}
			gifPkt.stream_index = 0;
			av_packet_rescale_ts(&gifPkt, gifCodecCtx->time_base,
				outFmtCtx->streams[0]->time_base);

			ret = av_interleaved_write_frame(outFmtCtx, &gifPkt);
			
		}
	}
	
	if (filter != nullptr) {
		filter->destroy();
		delete filter;
	}

	avformat_close_input(&inFmtCtx);
	avformat_free_context(inFmtCtx);
	avformat_free_context(outFmtCtx);
	return 0;
}


int main(int argc, char* argv[])
{
	if (argc != 3)
	{
		printf("usage:%1 input filepath %2 outputfilepath");
		return -1;
	}
	//输入视频文件地址,输出gif的地址
	std::string fileInput = std::string(argv[1]);
	std::string fileOutput = std::string(argv[2]);
	

	avformat_network_init();
	convert_video_to_gif(fileOutput.c_str(), fileInput.c_str(), 30, 40);
}

3.gif优化

由于原始视频的像素比较高,帧率也比较高,这样截取出来的gif可能比较大。对于gif尺寸比较大,优化策略主要包括以下三点:
1.缩短截取的时间长度
2.对图片尺寸进行缩放处理
3.通过抽帧来降低视频的帧率

抽帧可能降低动图的流畅度,需要提起注意。

4.示例效果



你可能感兴趣的:(音视频,ffmpeg,音视频,流媒体,滤镜,编码解码)