首先网上有很多人介绍ffmpeg的源码了,其中不乏一些大神的精彩之作,在此向他们致敬。没有开源分享精神,我们程序员的学习路将会很艰难。将我研究ffmepg源码的一些经验贴出来,其中也借鉴了网上的一些内容。
av_read_frame从字面意思上来看,就是从内存中读取一帧数据,但是具体的实现如何?本文是在参考别人的代码的基础上,结合最新的源代码,进行分析研究!如果有不正确的地方,欢迎指正。
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
const int genpts = s->flags & AVFMT_FLAG_GENPTS;
int eof = 0;
int ret;
AVStream *st;
//判断缓存中是否存在Packet, s->packet_buffer,若没有存在则调用read_frame_internal()
if (!genpts) {
ret = s->packet_buffer
? read_from_packet_buffer(&s->packet_buffer,
&s->packet_buffer_end, pkt)
: read_frame_internal(s, pkt);
if (ret < 0)
return ret;
goto return_packet;
}
....
}
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
int ret = 0, i, got_packet = 0;
av_init_packet(pkt);
while (!got_packet && !s->parse_queue) {
AVStream *st;
AVPacket cur_pkt;
/* 读取下一个包 */
ret = ff_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* flush the parsers */
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing)
parse_packet(s, NULL, st->index);
}
/* all remaining packets are now in parse_queue =>
* really terminate parsing */
break;
}
....
}
int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, i, err;
AVStream *st;
for (;;) {
AVPacketList *pktl = s->raw_packet_buffer;//首先判断s->raw_packet_buffer中是否有数据,一般开始的时候是没有的
if (pktl) {
*pkt = pktl->pkt;
st = s->streams[pkt->stream_index];
if (s->raw_packet_buffer_remaining_size <= 0)
if ((err = probe_codec(s, st, NULL)) < 0)
return err;
if (st->request_probe <= 0) {
s->raw_packet_buffer = pktl->next;
s->raw_packet_buffer_remaining_size += pkt->size;
av_free(pktl);
return 0;
}
}
pkt->data = NULL;
pkt->size = 0;
av_init_packet(pkt);
ret = s->iformat->read_packet(s, pkt);
if (ret < 0) {
continue;
}
if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
(pkt->flags & AV_PKT_FLAG_CORRUPT)) {
av_log(s, AV_LOG_WARNING,
"Dropped corrupted packet (stream = %d)\n",
pkt->stream_index);
av_free_packet(pkt);
continue;
}
if (pkt->stream_index >= (unsigned)s->nb_streams) {
av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
continue;
}
st = s->streams[pkt->stream_index];
if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
// correct first time stamps to negative values
if (!is_relative(st->first_dts))
st->first_dts = wrap_timestamp(st, st->first_dts);
if (!is_relative(st->start_time))
st->start_time = wrap_timestamp(st, st->start_time);
if (!is_relative(st->cur_dts))
st->cur_dts = wrap_timestamp(st, st->cur_dts);
}
pkt->dts = wrap_timestamp(st, pkt->dts);
pkt->pts = wrap_timestamp(st, pkt->pts);
force_codec_ids(s, st);
/* TODO: audio: time filter; video: frame reordering (pts != dts) */
if (s->use_wallclock_as_timestamps)
pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
if (!pktl && st->request_probe <= 0)
return ret;
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);//将packet加入到s->raw_packet_buffer
s->raw_packet_buffer_remaining_size -= pkt->size;
if ((err = probe_codec(s, st, pkt)) < 0)
return err;
}
}
static int mpegts_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MpegTSContext *ts = s->priv_data;
int ret, i;
pkt->size = -1;
ts->pkt = pkt;
ret = handle_packets(ts, 0);
if (ret < 0) {
av_free_packet(ts->pkt);
/* flush pes data left */
for (i = 0; i < NB_PID_MAX; i++)
if (ts->pids[i] && ts->pids[i]->type == MPEGTS_PES) {
PESContext *pes = ts->pids[i]->u.pes_filter.opaque;
if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) {
new_pes_packet(pes, pkt);
pes->state = MPEGTS_SKIP;
ret = 0;
break;
}
}
}
if (!ret && pkt->size < 0)
ret = AVERROR(EINTR);
return ret;
}
int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx,
uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size,
int64_t pts, int64_t dts, int64_t pos)
{
int index, i;
uint8_t dummy_buf[FF_INPUT_BUFFER_PADDING_SIZE];
if (!(s->flags & PARSER_FLAG_FETCHED_OFFSET)) {
s->next_frame_offset =
s->cur_offset = pos;
s->flags |= PARSER_FLAG_FETCHED_OFFSET;
}
if (buf_size == 0) {
/* padding is always necessary even if EOF, so we add it here */
memset(dummy_buf, 0, sizeof(dummy_buf));
buf = dummy_buf;
} else if (s->cur_offset + buf_size != s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */
/* add a new packet descriptor */
i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1);
s->cur_frame_start_index = i;
s->cur_frame_offset[i] = s->cur_offset;
s->cur_frame_end[i] = s->cur_offset + buf_size;
s->cur_frame_pts[i] = pts;
s->cur_frame_dts[i] = dts;
s->cur_frame_pos[i] = pos;
}
if (s->fetch_timestamp) {
s->fetch_timestamp = 0;
s->last_pts = s->pts;
s->last_dts = s->dts;
s->last_pos = s->pos;
ff_fetch_timestamp(s, 0, 0);
}
/* 真正的解析函数,从avctx可以看出应该是调用了响应的解码器的*/
index = s->parser->parser_parse(s, avctx, (const uint8_t **) poutbuf,
poutbuf_size, buf, buf_size);
/* update the file pointer */
if (*poutbuf_size) {
/* fill the data for the current frame */
s->frame_offset = s->next_frame_offset;
/* offset of the next frame */
s->next_frame_offset = s->cur_offset + index;
s->fetch_timestamp = 1;
}
if (index < 0)
index = 0;
s->cur_offset += index;
return index;
}