查看avcodec_decode_video2()函数声明发现,该函数已经过期了,取代它的是avcodec_send_packet、avcodec_receive_frame。我们发现新版的avcodec_decode_video2()最终还是调用了avcodec_send_packet、avcodec_receive_frame。如下所示。
avcodec_decode_video2->compat_decode->avcodec_send_packet、avcodec_receive_frame。
我们先看avcodec_send_packet()函数,avcodec_send_packet()申明在文件libavcodec\avcodec.h中,如下所示。
/**
* Supply raw packet data as input to a decoder.
*
* Internally, this call will copy relevant AVCodecContext fields, which can
* influence decoding per-packet, and apply them when the packet is actually
* decoded. (For example AVCodecContext.skip_frame, which might direct the
* decoder to drop the frame contained by the packet sent with this function.)
*
* @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
* larger than the actual read bytes because some optimized bitstream
* readers read 32 or 64 bits at once and could read over the end.
*
* @warning Do not mix this API with the legacy API (like avcodec_decode_video2())
* on the same AVCodecContext. It will return unexpected results now
* or in future libavcodec versions.
*
* @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
* before packets may be fed to the decoder.
*
* @param avctx codec context
* @param[in] avpkt The input AVPacket. Usually, this will be a single video
* frame, or several complete audio frames.
* Ownership of the packet remains with the caller, and the
* decoder will not write to the packet. The decoder may create
* a reference to the packet data (or copy it if the packet is
* not reference-counted).
* Unlike with older APIs, the packet is always fully consumed,
* and if it contains multiple frames (e.g. some audio codecs),
* will require you to call avcodec_receive_frame() multiple
* times afterwards before you can send a new packet.
* It can be NULL (or an AVPacket with data set to NULL and
* size set to 0); in this case, it is considered a flush
* packet, which signals the end of the stream. Sending the
* first flush packet will return success. Subsequent ones are
* unnecessary and will return AVERROR_EOF. If the decoder
* still has frames buffered, it will return them after sending
* a flush packet.
*
* @return 0 on success, otherwise negative error code:
* AVERROR(EAGAIN): input is not accepted in the current state - user
* must read output with avcodec_receive_frame() (once
* all output is read, the packet should be resent, and
* the call will not fail with EAGAIN).
* AVERROR_EOF: the decoder has been flushed, and no new packets can
* be sent to it (also returned if more than 1 flush
* packet is sent)
* AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush
* AVERROR(ENOMEM): failed to add packet to internal queue, or similar
* other errors: legitimate decoding errors
*/
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
函数的英文功能介绍有很多,本文简单翻译了一下,如下。
@ 功能:支持将裸流数据输出给解码器
在函数内部,会拷贝相关的AVCodecContext结构变量,将这些结构变量应用到解码的每一个包。例如
AVCodecContext.skip_frame参数通知解码器扔掉包含该帧的包。
@ warning 输入的avpkt-data缓冲区必须大于AV_INPUT_PADDING_SIZE,因为优化的字节流读取器必须一次读取32或者64比特的数据
@ warning 不能跟之前的API(例如avcodec_decode_video2)混用,否则会返回不可预知的错误
@ note 在将包发送给解码器的时候,AVCodecContext必须已经通过avcodec_open2打开
@ 参数 avctx 解码上下文
@ 参数[输入]avpkt 输入AVPakcet.通常情况下,输入数据是一个单一的视频帧或者几个完整的音频帧。调用者保留包的原有属性,解码器不会修改包的内容。解码器可能创建对包的引用。如果包没有引用计数将拷贝一份。跟以往的API不一样,包必须完整的解码出来,如果包含有多个帧,要求多次调用avcodec_recvive_frame,在重新调用该接口之前。输入参数可以为NULL,或者AVPacket的data域设置为NULL或者size域设置为0,表示将刷新所有的包,意味着数据流已经结束了。第一次发送刷新会总会成功,第二次发送刷新包是没有必要的,并且返回AVERROR_EOF,如果解码器缓存了一些帧,返回一个刷新包,将会返回所有的解码包
@ 返回值 0 表示成功,其他的异常值说明:
AVERROR(EAGAIN):当前不接受输出,必须重新发送
AVERROR_EOF:已经刷新解码器,没有新的包可以被刷新
AVERROR(EINVAL):没有打开解码器,或者这是一个编码器,或者要求刷新
AVERRO(ENOMEN):无法添加包到内部队列
avcodec_send_packet()函数定义在文件libavcodec\decode.c中。如下所示
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
int ret;
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
if (avctx->internal->draining)
return AVERROR_EOF;
if (avpkt && !avpkt->size && avpkt->data)
return AVERROR(EINVAL);
av_packet_unref(avci->buffer_pkt);
if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
ret = av_packet_ref(avci->buffer_pkt, avpkt);
if (ret < 0)
return ret;
}
ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
if (ret < 0) {
av_packet_unref(avci->buffer_pkt);
return ret;
}
if (!avci->buffer_frame->buf[0]) {
ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
}
return 0;
}
可以看到,avcodec_send_packet 的关键是 decode_receive_frame_internal 这个函数。av_bsf_send_packet等结构体或函数主要用于存储 AVPacket。
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
{
int ret;
if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
ctx->internal->eof = 1;
return 0;
}
if (ctx->internal->eof) {
av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
return AVERROR(EINVAL);
}
if (ctx->internal->buffer_pkt->data ||
ctx->internal->buffer_pkt->side_data_elems)
return AVERROR(EAGAIN);
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
return ret;
av_packet_move_ref(ctx->internal->buffer_pkt, pkt);
return 0;
}
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
{
*dst = *src;
av_init_packet(src);
src->data = NULL;
src->size = 0;
}
可以看到,av_bsf_send_packet的作用是把传过来的AVPacket放到指定的 AVBSFContext内部,并将该AVPacket置为空。
decode_receive_frame_internal 是解码的关键函数,定义如下:
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret;
av_assert0(!frame->buf[0]);
if (avctx->codec->receive_frame)
ret = avctx->codec->receive_frame(avctx, frame);
else
ret = decode_simple_receive_frame(avctx, frame);
if (ret == AVERROR_EOF)
avci->draining_done = 1;
if (!ret) {
/* the only case where decode data is not set should be decoders
* that do not call ff_get_buffer() */
av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
!(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
if (frame->private_ref) {
FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
if (fdd->post_process) {
ret = fdd->post_process(avctx, frame);
if (ret < 0) {
av_frame_unref(frame);
return ret;
}
}
}
}
/* free the per-frame decode data */
av_buffer_unref(&frame->private_ref);
return ret;
}
可以看到,decode_receive_frame_internal首先会判断AVCodec是否存在 receive_frame 这个函数指针,如果存在,就使用该指针对应的函数实现进行解码,否则调用decode_simple_receive_frame解码。
以 H264 格式为例,对应的解码器为 ff_h264_decoder:
libavcodec\h264dec.c
AVCodec ff_h264_decoder = {
.name = "h264",
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H264,
.priv_data_size = sizeof(H264Context),
.init = h264_decode_init,
.close = h264_decode_end,
.decode = h264_decode_frame,
.capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
.hw_configs = (const AVCodecHWConfigInternal*[]) {
#if CONFIG_H264_DXVA2_HWACCEL
HWACCEL_DXVA2(h264),
#endif
#if CONFIG_H264_D3D11VA_HWACCEL
HWACCEL_D3D11VA(h264),
#endif
#if CONFIG_H264_D3D11VA2_HWACCEL
HWACCEL_D3D11VA2(h264),
#endif
#if CONFIG_H264_NVDEC_HWACCEL
HWACCEL_NVDEC(h264),
#endif
#if CONFIG_H264_VAAPI_HWACCEL
HWACCEL_VAAPI(h264),
#endif
#if CONFIG_H264_VDPAU_HWACCEL
HWACCEL_VDPAU(h264),
#endif
#if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
HWACCEL_VIDEOTOOLBOX(h264),
#endif
NULL
},
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING,
.flush = flush_dpb,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
.profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
.priv_class = &h264_class,
};
没有发现函数指针receive_frame,因此这里直接看decode_simple_receive_frame,如下。
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
int ret;
while (!frame->buf[0]) {
ret = decode_simple_internal(avctx, frame);
if (ret < 0)
return ret;
}
return 0;
}
可以看到,它直接调用decode_simple_internal这个函数:
/*
* The core of the receive_frame_wrapper for the decoders implementing
* the simple API. Certain decoders might consume partial packets without
* returning any output, so this function needs to be called in a loop until it
* returns EAGAIN.
**/
/*
*某些解码器可能会消耗部分数据包而不返回任何输出,因此需要在循环中调用此函** 数,直到它返回EAGAIN
*/
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
DecodeSimpleContext *ds = &avci->ds;
AVPacket *pkt = ds->in_pkt;
// copy to ensure we do not change pkt
int got_frame, actual_got_frame;
int ret;
if (!pkt->data && !avci->draining) {
av_packet_unref(pkt);
//获取在执行av_bsf_send_packet时缓存的AVPacket
ret = ff_decode_get_packet(avctx, pkt);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
}
// Some codecs (at least wma lossless) will crash when feeding drain packets
// after EOF was signaled.
if (avci->draining_done)
return AVERROR_EOF;
if (!pkt->data &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
avctx->active_thread_type & FF_THREAD_FRAME))
return AVERROR_EOF;
got_frame = 0;
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
//获取异步解码缓存的AVFrame
ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
} else {
//解码的关键函数
ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
frame->pkt_dts = pkt->dts;
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if(!avctx->has_b_frames)
frame->pkt_pos = pkt->pos;
//FIXME these should be under if(!avctx->has_b_frames)
/* get_buffer is supposed to set frame parameters */
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
if (!frame->width) frame->width = avctx->width;
if (!frame->height) frame->height = avctx->height;
if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
}
}
}
emms_c();
actual_got_frame = got_frame;
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if (frame->flags & AV_FRAME_FLAG_DISCARD)
got_frame = 0;
if (got_frame)
frame->best_effort_timestamp = guess_correct_pts(avctx,
frame->pts,
frame->pkt_dts);
} else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
uint8_t *side;
int side_size;
uint32_t discard_padding = 0;
uint8_t skip_reason = 0;
uint8_t discard_reason = 0;
if (ret >= 0 && got_frame) {
frame->best_effort_timestamp = guess_correct_pts(avctx,
frame->pts,
frame->pkt_dts);
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
if (!frame->channel_layout)
frame->channel_layout = avctx->channel_layout;
if (!frame->channels)
frame->channels = avctx->channels;
if (!frame->sample_rate)
frame->sample_rate = avctx->sample_rate;
}
side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
if(side && side_size>=10) {
avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
discard_padding = AV_RL32(side + 4);
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
avctx->internal->skip_samples, (int)discard_padding);
skip_reason = AV_RL8(side + 8);
discard_reason = AV_RL8(side + 9);
}
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
got_frame = 0;
}
if (avctx->internal->skip_samples > 0 && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if(frame->nb_samples <= avctx->internal->skip_samples){
got_frame = 0;
avctx->internal->skip_samples -= frame->nb_samples;
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
avctx->internal->skip_samples);
} else {
av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
if(frame->pts!=AV_NOPTS_VALUE)
frame->pts += diff_ts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
if(frame->pkt_pts!=AV_NOPTS_VALUE)
frame->pkt_pts += diff_ts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if(frame->pkt_dts!=AV_NOPTS_VALUE)
frame->pkt_dts += diff_ts;
if (frame->pkt_duration >= diff_ts)
frame->pkt_duration -= diff_ts;
} else {
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
}
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
avctx->internal->skip_samples, frame->nb_samples);
frame->nb_samples -= avctx->internal->skip_samples;
avctx->internal->skip_samples = 0;
}
}
if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (discard_padding == frame->nb_samples) {
got_frame = 0;
} else {
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
frame->pkt_duration = diff_ts;
} else {
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
}
av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
(int)discard_padding, frame->nb_samples);
frame->nb_samples -= discard_padding;
}
}
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (fside) {
AV_WL32(fside->data, avctx->internal->skip_samples);
AV_WL32(fside->data + 4, discard_padding);
AV_WL8(fside->data + 8, skip_reason);
AV_WL8(fside->data + 9, discard_reason);
avctx->internal->skip_samples = 0;
}
}
}
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
!avci->showed_multi_packet_warning &&
ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
avci->showed_multi_packet_warning = 1;
}
if (!got_frame)
av_frame_unref(frame);
if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
ret = pkt->size;
#if FF_API_AVCTX_TIMEBASE
if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
#endif
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
if (avctx->internal->draining && !actual_got_frame) {
if (ret < 0) {
/* prevent infinite loop if a decoder wrongly always return error on draining */
/* reasonable nb_errors_max = maximum b frames + thread count */
int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
avctx->thread_count : 1);
if (avci->nb_draining_errors++ >= nb_errors_max) {
av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
"Stop draining and force EOF.\n");
avci->draining_done = 1;
ret = AVERROR_BUG;
}
} else {
avci->draining_done = 1;
}
}
avci->compat_decode_consumed += ret;
//如果这个 AVPacket 已被消耗完,则释放内存,否则调整指针、成员变量
if (ret >= pkt->size || ret < 0) {
av_packet_unref(pkt);
} else {
int consumed = ret;
pkt->data += consumed;
pkt->size -= consumed;
avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
avci->last_pkt_props->pts = AV_NOPTS_VALUE;
avci->last_pkt_props->dts = AV_NOPTS_VALUE;
}
if (got_frame)
av_assert0(frame->buf[0]);
return ret < 0 ? ret : 0;
}
这个函数其实很长,但最关键的只有 avctx->codec->decode 这一句代码,decode 是 结构体 AVCodec 的函数指针,不同的编码格式对应不同的函数实现,以 H264 为例,对应的解码器为 ff_h264_decoder (上面把它的声明贴出来了),函数实现是 h264_decode_frame。
这里简单看一下 h264_decode_frame:
static int h264_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
H264Context *h = avctx->priv_data;
AVFrame *pict = data;
int buf_index;
int ret;
h->flags = avctx->flags;
h->setup_finished = 0;
h->nb_slice_ctx_queued = 0;
ff_h264_unref_picture(h, &h->last_pic_for_ec);
/* end of stream, output what is still in the buffers */
if (buf_size == 0)
return send_next_delayed_frame(h, pict, got_frame, 0);
if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
int side_size;
uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
if (is_extra(side, side_size))
ff_h264_decode_extradata(side, side_size,
&h->ps, &h->is_avc, &h->nal_length_size,
avctx->err_recognition, avctx);
}
if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
if (is_extra(buf, buf_size))
return ff_h264_decode_extradata(buf, buf_size,
&h->ps, &h->is_avc, &h->nal_length_size,
avctx->err_recognition, avctx);
}
buf_index = decode_nal_units(h, buf, buf_size);
if (buf_index < 0)
return AVERROR_INVALIDDATA;
if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
av_assert0(buf_index <= buf_size);
return send_next_delayed_frame(h, pict, got_frame, buf_index);
}
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {
if (avctx->skip_frame >= AVDISCARD_NONREF ||
buf_size >= 4 && !memcmp("Q264", buf, 4))
return buf_size;
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
return AVERROR_INVALIDDATA;
}
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
(h->mb_y >= h->mb_height && h->mb_height)) {
if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
return ret;
/* Wait for second field. */
if (h->next_output_pic) {
ret = finalize_frame(h, pict, h->next_output_pic, got_frame);
if (ret < 0)
return ret;
}
}
av_assert0(pict->buf[0] || !*got_frame);
ff_h264_unref_picture(h, &h->last_pic_for_ec);
return get_consumed_bytes(buf_index, buf_size);
}
可以看到,这个函数大致可以分为 2 个步骤:
1) 判断当前是否已到达文件的末尾,如果是,则返回解码后依然存在于缓存中的数据
2) 否则根据 NAL 的类型进行解码,如果是 SPS、PPS 数据,则调用 ff_h264_decode_extradata,否则调用 decode_nal_units
其中函数 is_extra 用于判断数据类型是否为 SPS、PPS:
static int is_extra(const uint8_t *buf, int buf_size)
{
int cnt= buf[5]&0x1f;
const uint8_t *p= buf+6;
if (!cnt)
return 0;
while(cnt--){
int nalsize= AV_RB16(p) + 2;
if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
return 0;
p += nalsize;
}
cnt = *(p++);
if(!cnt)
return 0;
while(cnt--){
int nalsize= AV_RB16(p) + 2;
if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
return 0;
p += nalsize;
}
return 1;
}
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret, changed;
av_frame_unref(frame);
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
if (avci->buffer_frame->buf[0]) {
av_frame_move_ref(frame, avci->buffer_frame);
} else {
ret = decode_receive_frame_internal(avctx, frame);
if (ret < 0)
return ret;
}
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
ret = apply_cropping(avctx, frame);
if (ret < 0) {
av_frame_unref(frame);
return ret;
}
}
avctx->frame_number++;
if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
if (avctx->frame_number == 1) {
avci->initial_format = frame->format;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
avci->initial_width = frame->width;
avci->initial_height = frame->height;
break;
case AVMEDIA_TYPE_AUDIO:
avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
avctx->sample_rate;
avci->initial_channels = frame->channels;
avci->initial_channel_layout = frame->channel_layout;
break;
}
}
if (avctx->frame_number > 1) {
changed = avci->initial_format != frame->format;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
changed |= avci->initial_width != frame->width ||
avci->initial_height != frame->height;
break;
case AVMEDIA_TYPE_AUDIO:
changed |= avci->initial_sample_rate != frame->sample_rate ||
avci->initial_sample_rate != avctx->sample_rate ||
avci->initial_channels != frame->channels ||
avci->initial_channel_layout != frame->channel_layout;
break;
}
if (changed) {
avci->changed_frames_dropped++;
av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
" drop count: %d \n",
avctx->frame_number, frame->pts,
avci->changed_frames_dropped);
av_frame_unref(frame);
return AVERROR_INPUT_CHANGED;
}
}
}
return 0;
}
可以看到,这个函数的逻辑很简单,它首先会尝试从缓存中直接获取 AVFrame,如果不存在,则调用decode_receive_frame_internal进行解码。
参考文献:
https://blog.csdn.net/u011330638/article/details/82990364