相对应avcodec_send_packet | avcodec_receive_frame而言,avcodec_send_frame | avcodec_receive_packet 是编码用的。
/**
* Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet()
* to retrieve buffered output packets.
* @return 0 on success, otherwise negative error code:
* AVERROR(EAGAIN): input is not accepted in the current state - user
* must read output with avcodec_receive_packet() (once
* all output is read, the packet should be resent, and
* the call will not fail with EAGAIN).
* AVERROR_EOF: the encoder has been flushed, and no new frames can
* be sent to it
* AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a
* decoder, or requires flush
* AVERROR(ENOMEM): failed to add packet to internal queue, or similar
* other errors: legitimate encoding errors
*/
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
/**
* Read encoded data from the encoder.
* @return 0 on success, otherwise negative error code:
* AVERROR(EAGAIN): output is not available in the current state - user
* must try to send input
* AVERROR_EOF: the encoder has been fully flushed, and there will be
* no more output packets
* AVERROR(EINVAL): codec not opened, or it is a decoder
* other errors: legitimate encoding errors
*/
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
把frame送到avci->buffer_frame中,然后根据avci->buffer_pkt中是否存在编码完成后的cache,判断是否调用encode_receive_packet_internal来编码。
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret;
//检测是否open,是否为编码器
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
//frame为空过,这个时候不能再次调用avcodec_send_frame编码,需要flush
//对于x264来说,调用flush无效,需要重新创建AVCodecContext
if (avci->draining)
return AVERROR_EOF;
if (avci->buffer_frame->data[0])
return AVERROR(EAGAIN);
//为null的话,就标记draining为1,之后会在编码阶段使用
if (!frame) {
avci->draining = 1;
} else {
//avctx->internal->buffer_frame引用frame,之后编码阶段就可以直接使用buffer_frame
ret = encode_send_frame_internal(avctx, frame);
if (ret < 0)
return ret;
}
//当前没有存放的编码完成后的cache
if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
//调用编码方法
ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
}
return 0;
}
如果avctx->internal->buffer_pkt中存在已经编码后的cache,就用buffer_pkt,不存在就调用encode_receive_packet_internal编码。
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
int ret;
av_packet_unref(avpkt);
//检测是否open,是否为编码器
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
//是否使用cache
if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
av_packet_move_ref(avpkt, avci->buffer_pkt);
} else {
ret = encode_receive_packet_internal(avctx, avpkt);
if (ret < 0)
return ret;
}
return 0;
}
主要作用是把输入的AVFrame复制到avctx->internal->buffer_frame中。
如果是音频,还会根据codec的capabilities类型,判断输入的AVFrame的采样数量是否足够,不足够就返回AVERROR(EINVAL)。
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
{
AVCodecInternal *avci = avctx->internal;
AVFrame *dst = avci->buffer_frame;
int ret;
//检测输入的音频Fram采样数量是否满足编码器的capabilities
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
/* extract audio service type metadata */
AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
/* check for valid frame size */
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
if (src->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n");
return AVERROR(EINVAL);
}
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
/* if we already got an undersized frame, that must have been the last */
if (avctx->internal->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
return AVERROR(EINVAL);
}
if (src->nb_samples < avctx->frame_size) {
ret = pad_last_frame(avctx, dst, src);
if (ret < 0)
return ret;
avctx->internal->last_audio_frame = 1;
} else if (src->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size);
return AVERROR(EINVAL);
}
}
}
//将输入的src引用到avctx->internal->buffer_frame中
if (!dst->data[0]) {
ret = av_frame_ref(dst, src);
if (ret < 0)
return ret;
}
return 0;
}
编码阶段重要函数,这个函数会判断所使用的AVCodec是否已经实现了receive_packet函数,从而判断是否是调用receive_packet编码还是调用encode_simple_receive_packet编码。
static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
int ret;
// 已经输入null,且已经返回了,这个时候就不能在调用编码方法
if (avci->draining_done)
return AVERROR_EOF;
av_assert0(!avpkt->data && !avpkt->side_data);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
avctx->stats_out[0] = '\0';
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
return AVERROR(EINVAL);
}
//硬编码器一般都会实现receive_packet。
if (avctx->codec->receive_packet) {
ret = avctx->codec->receive_packet(avctx, avpkt);
if (ret < 0)
av_packet_unref(avpkt);
else
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
} else
//软编码基本都走这里
ret = encode_simple_receive_packet(avctx, avpkt);
if (ret == AVERROR_EOF)
avci->draining_done = 1;
return ret;
}
这个函数通过while循环不停的调用encode_simple_internal编码。
static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
int ret;
while (!avpkt->data && !avpkt->side_data) {
ret = encode_simple_internal(avctx, avpkt);
if (ret < 0)
return ret;
}
return 0;
}
软编码的大部分都会调用到这个函数中,在这种函数中,通过判断是否是多线程编码,调用不同的编码函数。
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
{
AVCodecInternal *avci = avctx->internal;
EncodeSimpleContext *es = &avci->es;
AVFrame *frame = es->in_frame;
int got_packet;
int ret;
if (avci->draining_done)
return AVERROR_EOF;
if (!frame->buf[0] && !avci->draining) {
av_frame_unref(frame);
// avctx->internal->buffer_frame送到frame中。
ret = ff_encode_get_frame(avctx, frame);
if (ret < 0 && ret != AVERROR_EOF)
return ret;
}
if (!frame->buf[0]) {
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
(avci->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME)))
return AVERROR_EOF;
// Flushing is signaled with a NULL frame
frame = NULL;
}
got_packet = 0;
av_assert0(avctx->codec->encode2);
//多线程编码
if (CONFIG_FRAME_THREAD_ENCODER &&
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
/* This might modify frame, but it doesn't matter, because
* the frame properties used below are not used for video
* (due to the delay inherent in frame threaded encoding, it makes
* no sense to use the properties of the current frame anyway). */
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
else {
//单线程,直接调用AVCodec中的encode2方法编码,对应的实现方式,就是对应的编码器实现
ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
}
av_assert0(ret <= 0);
emms_c();
if (!ret && got_packet) {
// 返回正常且got_packet,这个是否就引用一下 avpkt,然后直接到end,不设置draining_done
if (avpkt->data) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
goto end;
}
if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
}
}
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
/* NOTE: if we add any audio encoders which output non-keyframe packets,
* this needs to be moved to the encoders, but for now we can do it
* here to simplify things */
avpkt->flags |= AV_PKT_FLAG_KEY;
avpkt->dts = avpkt->pts;
}
}
//如果这个时候是输入了null,且这次没有得到编码的buffer,
//即所有的编码完成的cache都已经输出了,这个时候就标识draining_done,不再允许调用avcodec_receive_packet
if (avci->draining && !got_packet)
avci->draining_done = 1;
end:
if (ret < 0 || !got_packet)
av_packet_unref(avpkt);
if (frame) {
if (!ret)
avctx->frame_number++;
av_frame_unref(frame);
}
if (got_packet)
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
return ret;
}
多线程编码,配合调用avcodec_open2 时,打开的编码线程。
int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
AVFrame *frame, int *got_packet_ptr)
{
ThreadContext *c = avctx->internal->frame_thread_encoder;
//多线程编码中,是通过Task传递编码Frame和编码完成后Packet的
Task *outtask;
av_assert1(!*got_packet_ptr);
if(frame){
//引用 frame
av_frame_move_ref(c->tasks[c->task_index].indata, frame);
pthread_mutex_lock(&c->task_fifo_mutex);
c->task_index = (c->task_index + 1) % c->max_tasks;
//发送task_fifo_cond信号量, 编码线程中等待task_fifo_cond这个信号量编码。
pthread_cond_signal(&c->task_fifo_cond);
pthread_mutex_unlock(&c->task_fifo_mutex);
}
outtask = &c->tasks[c->finished_task_index];
pthread_mutex_lock(&c->finished_task_mutex);
/* The access to task_index in the following code is ok,
* because it is only ever changed by the main thread. */
if (c->task_index == c->finished_task_index ||
(frame && !outtask->finished &&
(c->task_index - c->finished_task_index + c->max_tasks) % c->max_tasks <= avctx->thread_count)) {
pthread_mutex_unlock(&c->finished_task_mutex);
return 0;
}
//等待finished_task_cond信号量
while (!outtask->finished) {
pthread_cond_wait(&c->finished_task_cond, &c->finished_task_mutex);
}
pthread_mutex_unlock(&c->finished_task_mutex);
/* We now own outtask completely: No worker thread touches it any more,
* because there is no outstanding task with this index. */
outtask->finished = 0;
//引用编码后的pkt
av_packet_move_ref(pkt, outtask->outdata);
if(pkt->data)
*got_packet_ptr = 1;
c->finished_task_index = (c->finished_task_index + 1) % c->max_tasks;
return outtask->return_code;
}