大体步骤如下:
1. 编写filter文件, 比如vf_xxx.c, 放在libavfilter目录下
2. 在libavfilter/allfilters.c +++ extern AVFilter ff_vf_xxx
3. 修改libavfilter/Makefile +++ OBJS-$(CONFIG_XXX_FILTER) += vf_xxx.o
4. 修改ffbuild/config.mak +++ CONFIG_XXX_FILTER=yes
一个filter的定义了不少回调,不过一个简洁的也许你只需要写init(),uinit(),query_format()即可. filter会在config_graph()的时候初始化整个滤镜链图,这里会调用filter的init()和query_format(),add_buffersrc()会激活滤镜链图,使frame在滤镜链图中各种filter回调中进行处理.
pad -link-> filter -link-> pad -link->....
pad -link-| |-link-> pad -link->
.... ....
int (*preinit)(AVFilterContext *ctx);
int (*init)(AVFilterContext *ctx);
int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);
void (*uninit)(AVFilterContext *ctx);
int (*query_formats)(AVFilterContext *);
int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);
int (*init_opaque)(AVFilterContext *ctx, void *opaque);
int (*activate)(AVFilterContext *ctx);
编写一个vf_yuvy.c file 我们进行一个frame yuv 分量的提取测试.
这个颜色模型使用 Y 来表示亮度,还有两种颜色通道:Cb(蓝色色度) 和 Cr(红色色度)。YCbCr 可以由 RGB 转换得来,也可以转换回 RGB。使用这个模型我们可以创建拥有完整色彩的图像
YCbCr 和 RGB 之间的转换公式
Y = 0.299R + 0.587G + 0.114B
一旦我们有了亮度后,我们就可以拆分颜色(蓝色色度和红色色度):
Cb = 0.564(B - Y)
Cr = 0.713(R - Y)
# 并且我们也可以使用 YCbCr 转换回来,甚至得到绿色。
R = Y + 1.402Cr
B = Y + 1.772Cb
G = Y - 0.344Cb - 0.714Cr
省略实现,大体写法如下:
#include
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct YUVYContext {
const AVClass *class;
char *yuv;
enum AVPixelFormat *formats;
} YUVYContext;
static av_cold int init(AVFilterContext *ctx)
{
YUVYContext *s = ctx->priv;
...
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
...
}
//定义此filter在其输入输出所支持的format, 一般在filter初始化后调用,此callback必须设置AVFilterLink.out_formats
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
};
AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
if (!fmts_list)
return AVERROR(ENOMEM);
return ff_set_common_formats(ctx, fmts_list);
}
static int cnt = 0;
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
YUVYContext *yuvy = link->dst->priv;
int i;
AVFilterLink *olink = link->dst->outputs[0];
AVFrame *out = av_frame_alloc();
out->width = in->width;
out->height = in->height;
out->format = in->format;
out = ff_get_video_buffer(olink, olink->w, olink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
av_frame_copy(out, in);
//av_log(link->dst, AV_LOG_INFO, "filter_frame %d.\n", cnt++);
if (yuvy->yuv[0] == 'y') {
...
} else if (yuvy->yuv[0] == 'u' && yuvy->yuv[1] == 'v') {
...
} else if (yuvy->yuv[0] == 'b') {
...
} else if (yuvy->yuv[0] == 'u') {
...
} else if (yuvy->yuv[0] == 'v') {
...
}
return ff_filter_frame(olink, out);
}
static int config_props(AVFilterLink *outlink)
{
...
}
#define OFFSET(x) offsetof(YUVYContext, x)
static const AVOption options[] = {
{ "yuv", "Y|G|R|B", OFFSET(yuv), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
{ NULL }
};
#define CONFIG_YUVY_FILTER 1
#if CONFIG_YUVY_FILTER
#define yuvy_options options
AVFILTER_DEFINE_CLASS(yuvy);
static const AVFilterPad avfilter_vf_yuvy_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_vf_yuvy_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
},
{ NULL }
};
AVFilter ff_vf_yuvy = {
.name = "yuvy",
.description = NULL_IF_CONFIG_SMALL("Convert the input video of yuv420p pixel format to Y|R|G|B graph"),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(YUVYContext),
.priv_class = &yuvy_class,
.inputs = avfilter_vf_yuvy_inputs,
.outputs = avfilter_vf_yuvy_outputs,
};
#endif /* CONFIG_YUVY_FILTER */
ffmpeg -y -i demo.mp4 -vf "yuvy=yuv=y" -c:v libx264 -qp 0 y.mp4
ffmpeg -y -i demo.mp4 -vf "yuvy=yuv=u" -c:v libx264 -qp 0 u.mp4
ffmpeg -y -i demo.mp4 -vf "yuvy=yuv=v" -c:v libx264 -qp 0 v.mp4
ffmpeg -y -i demo.mp4 -vf "yuvy=yuv=uv" -c:v libx264 -qp 0 uv.mp4
ffmpeg -y -i demo.mp4 -vf "yuvy=yuv=b" -c:v libx264 -qp 0 b.mp4
ffmpeg -y -i demo.jpeg -vf "yuvy=yuv=y" y.jpeg; \
ffmpeg -y -i demo.jpeg -vf "yuvy=yuv=u" u.jpeg; \
ffmpeg -y -i demo.jpeg -vf "yuvy=yuv=v" v.jpeg; \
ffmpeg -y -i demo.jpeg -vf "yuvy=yuv=uv" uv.jpeg; \
ffmpeg -y -i demo.jpeg -vf "yuvy=yuv=b" b.jpeg
可以看到在部分输入像素格式不受支持的时候,ffmpeg会自动加一个auto_scale 转换了format.
+---------------+
graph 0 input from stream 0:0:default--[1920x1080 1:1 yuv420p]--default| Parsed_yuvy_0 |default--[1920x1080 1:1 yuv420p]--format:default
| (yuvy) |
+---------------+
+-------------------------------+
| graph 0 input from stream 0:0 |default--[1920x1080 1:1 yuv420p]--Parsed_yuvy_0:default
| (buffer) |
+-------------------------------+
+--------------+
format:default--[1920x1080 1:1 yuv420p]--default| out_0_0 |
| (buffersink) |
+--------------+
+----------+
Parsed_yuvy_0:default--[1920x1080 1:1 yuv420p]--default| format |default--[1920x1080 1:1 yuv420p]--out_0_0:default
| (format) |
+----------+
+---------------+
auto_scaler_0:default--[1280x720 1:1 yuv420p]--default| Parsed_yuvy_0 |default--[1280x720 1:1 yuv420p]--format:default
| (yuvy) |
+---------------+
+-------------------------------+
| graph 0 input from stream 0:0 |default--[1280x720 1:1 yuv444p]--auto_scaler_0:default
| (buffer) |
+-------------------------------+
+--------------+
format:default--[1280x720 1:1 yuv420p]--default| out_0_0 |
| (buffersink) |
+--------------+
+----------+
Parsed_yuvy_0:default--[1280x720 1:1 yuv420p]--default| format |default--[1280x720 1:1 yuv420p]--out_0_0:default
| (format) |
+----------+
+---------------+
graph 0 input from stream 0:0:default--[1280x720 1:1 yuv444p]--default| auto_scaler_0 |default--[1280x720 1:1 yuv420p]--Parsed_yuvy_0:default
| (scale) |
+---------------+