项目地址,求star
https://github.com/979451341/Audio-and-video-learning-materials/tree/master/FFmpeg(AVfilter%E8%BF%87%E6%BB%A4%EF%BC%89
1.AVfilter结构体成员
这个特效要靠AVfilter来实现,首先说一下说AVfilter这个结构体的成员
/ * * *过滤器定义。这定义了一个过滤器包含的垫,以及所有的 *用于与筛选器交互的回调函数。 * / typedef struct AVFilter { / * * *过滤器名称。在过滤器中必须是非空且唯一的。 * / const char *name; / * * *描述滤波器。可能是空的。 * 你应该使用null_if_config_small()宏定义。 * / const char *description; / * * *输入清单,由零元终止。 * *如果没有(静态)输入,则为null。过滤器实例 * avfilter_flag_dynamic_inputs集可能会有比现在更多的投入 *这份清单。 * / const AVFilterPad *inputs; / * * *输出清单,由零元终止。 * *如果没有(静态)输出,则为null。过滤器实例 * avfilter_flag_dynamic_outputs集可能会有比现在更多的产出 *这份清单。 * / const AVFilterPad *outputs; / * * *一个私人数据的类,用于声明私人avoptions过滤器。 *此字段对于未声明任何选项的筛选器无效。 * *如果该字段非空,则是筛选私有数据的第一个成员。 *必须是指针对avclass,这将由libavfilter通用 *这个类的代码。 * / const AVClass *priv_class; / * * *结合avfilter_flag_ * * / int flags;
2.AVfilter使用步骤
现在直接在代码上说这个AVfilter使用的步骤
因为使用滤镜,所以需要播放视频,就要解码,来段标准准备代码
// sd卡中的视频文件地址,可自行修改或者通过jni传入 char *file_name = "/storage/emulated/0/pauseRecordDemo/video/2018-02-03-09-25-34.mp4"; //char *file_name = "/storage/emulated/0/video.avi"; av_register_all(); //注册所有AVFilter。 avfilter_register_all();//added by ws for AVfilter AVFormatContext *pFormatCtx = avformat_alloc_context(); // Open video file if (avformat_open_input(&pFormatCtx, file_name, NULL, NULL) != 0) { LOGD("Couldn't open file:%s\n", file_name); return -1; // Couldn't open file } // Retrieve stream information if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { LOGD("Couldn't find stream information."); return -1; } // Find the first video stream int videoStream = -1, i; for (i = 0; i < pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream = i; } } if (videoStream == -1) { LOGD("Didn't find a video stream."); return -1; // Didn't find a video stream } // Get a pointer to the codec context for the video stream AVCodecContext *pCodecCtx = pFormatCtx->streams[videoStream]->codec;
开始滤镜的准备
AVFilter *buffersrc = avfilter_get_by_name("buffer"); AVFilter *buffersink = avfilter_get_by_name("buffersink");//新版的ffmpeg库必须为buffersink AVFilterInOut *outputs = avfilter_inout_alloc(); AVFilterInOut *inputs = avfilter_inout_alloc(); enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; AVBufferSinkParams *buffersink_params; //为FilterGraph分配内存。 filter_graph = avfilter_graph_alloc(); //创建并向FilterGraph中添加一个Filter。 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); if (ret < 0) { LOGD("Cannot create buffer source\n"); return ret; } buffersink_params = av_buffersink_params_alloc(); buffersink_params->pixel_fmts = pix_fmts; //创建并向FilterGraph中添加一个Filter。 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, buffersink_params, filter_graph); av_free(buffersink_params); if (ret < 0) { LOGD("Cannot create buffer sink\n"); return ret; }
给AVfilter的输入输出描述赋值
outputs->name = av_strdup("in"); outputs->filter_ctx = buffersrc_ctx; outputs->pad_idx = 0; outputs->next = NULL; inputs->name = av_strdup("out"); inputs->filter_ctx = buffersink_ctx; inputs->pad_idx = 0; inputs->next = NULL;
配置和使用滤镜效果,改变u、v这个两个像素,使画面显示黑白
const char *filters_descr = "lutyuv='u=128:v=128'"; //将一串通过字符串描述的Graph添加到FilterGraph中。 if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, &inputs, &outputs, NULL)) < 0) { LOGD("Cannot avfilter_graph_parse_ptr\n"); return ret; } //检查FilterGraph的配置。 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) { LOGD("Cannot avfilter_graph_config\n"); return ret; }
接下来就是渲染
// Find the decoder for the video stream AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { LOGD("Codec not found."); return -1; // Codec not found } if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { LOGD("Could not open codec."); return -1; // Could not open codec } // 获取native window ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface); // 获取视频宽高 int videoWidth = pCodecCtx->width; int videoHeight = pCodecCtx->height; // 设置native window的buffer大小,可自动拉伸 ANativeWindow_setBuffersGeometry(nativeWindow, videoWidth, videoHeight, WINDOW_FORMAT_RGBA_8888); ANativeWindow_Buffer windowBuffer; if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { LOGD("Could not open codec."); return -1; // Could not open codec } // Allocate video frame AVFrame *pFrame = av_frame_alloc(); // 用于渲染 AVFrame *pFrameRGBA = av_frame_alloc(); if (pFrameRGBA == NULL || pFrame == NULL) { LOGD("Could not allocate video frame."); return -1; } // Determine required buffer size and allocate buffer // buffer中数据就是用于渲染的,且格式为RGBA int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height, 1); uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t)); av_image_fill_arrays(pFrameRGBA->data, pFrameRGBA->linesize, buffer, AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height, 1); // 由于解码出来的帧格式不是RGBA的,在渲染之前需要进行格式转换 struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL);
解码和释放资源,在解码的时候将解码的数据放入FilterGraph去显示
while (av_read_frame(pFormatCtx, &packet) >= 0) { // Is this a packet from the video stream? if (packet.stream_index == videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // 并不是decode一次就可解码出一帧 if (frameFinished) { //added by ws for AVfilter start pFrame->pts = av_frame_get_best_effort_timestamp(pFrame); //* 向FilterGraph中加入一个AVFrame。 if (av_buffersrc_add_frame(buffersrc_ctx, pFrame) < 0) { LOGD("Could not av_buffersrc_add_frame"); break; } //从FilterGraph中取出一个AVFrame。 ret = av_buffersink_get_frame(buffersink_ctx, pFrame); if (ret < 0) { LOGD("Could not av_buffersink_get_frame"); break; } //added by ws for AVfilter end // lock native window buffer ANativeWindow_lock(nativeWindow, &windowBuffer, 0); // 格式转换 sws_scale(sws_ctx, (uint8_t const *const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGBA->data, pFrameRGBA->linesize); // 获取stride uint8_t *dst = (uint8_t *) windowBuffer.bits; int dstStride = windowBuffer.stride * 4; uint8_t *src = (pFrameRGBA->data[0]); int srcStride = pFrameRGBA->linesize[0]; // 由于window的stride和帧的stride不同,因此需要逐行复制 int h; for (h = 0; h < videoHeight; h++) { memcpy(dst + h * dstStride, src + h * srcStride, srcStride); } ANativeWindow_unlockAndPost(nativeWindow); } } av_packet_unref(&packet); } av_free(buffer); av_free(pFrameRGBA); // Free the YUV frame av_free(pFrame); avfilter_graph_free(&filter_graph); //added by ws for avfilter // Close the codecs avcodec_close(pCodecCtx);
这个想要真正掌握,需要将avfilter.c至少过个两个遍,因为这下面四个结构体还没有看看他们的内部成员
typedef struct AVFilterContext AVFilterContext;
typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
typedef struct AVFilterFormats AVFilterFormats;