ffmpeg v4l2集成分析

ffmpeg集成了v4l2,那么如何看v4l2有关的代码呢?
如果你看过**ffmpeg-codec函数调用流程分析**,那么就会明白,ffmpeg对于codec插件的集成只需要几个简单的回调函数怒,接下来我们看v4l2:

static const AVOption options[] = {
    V4L_M2M_DEFAULT_OPTS,
    { "num_capture_buffers", "Number of buffers in the capture context",
        OFFSET(num_capture_buffers), AV_OPT_TYPE_INT, {.i64 = 20}, 20, INT_MAX, FLAGS },
    { NULL},
};

#define M2MDEC_CLASS(NAME) \
    static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \
        .class_name = #NAME "_v4l2m2m_decoder", \
        .item_name  = av_default_item_name, \
        .option     = options, \
        .version    = LIBAVUTIL_VERSION_INT, \
    };

#define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
    M2MDEC_CLASS(NAME) \
    const AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
        .name           = #NAME "_v4l2m2m" , \
        .long_name      = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder wrapper"), \
        .type           = AVMEDIA_TYPE_VIDEO, \
        .id             = CODEC , \
        .priv_data_size = sizeof(V4L2m2mPriv), \
        .priv_class     = &v4l2_m2m_ ## NAME ## _dec_class, \
        .init           = v4l2_decode_init, \
        .receive_frame  = v4l2_receive_frame, \
        .close          = v4l2_decode_close, \
        .bsfs           = bsf_name, \
        .capabilities   = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
        .caps_internal  = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_INIT_CLEANUP, \
        .wrapper_name   = "v4l2m2m", \
    }

M2MDEC(h264,  "H.264", AV_CODEC_ID_H264,       "h264_mp4toannexb");
M2MDEC(hevc,  "HEVC",  AV_CODEC_ID_HEVC,       "hevc_mp4toannexb");
M2MDEC(mpeg1, "MPEG1", AV_CODEC_ID_MPEG1VIDEO, NULL);
M2MDEC(mpeg2, "MPEG2", AV_CODEC_ID_MPEG2VIDEO, NULL);
M2MDEC(mpeg4, "MPEG4", AV_CODEC_ID_MPEG4,      NULL);
M2MDEC(h263,  "H.263", AV_CODEC_ID_H263,       NULL);
M2MDEC(vc1 ,  "VC1",   AV_CODEC_ID_VC1,        NULL);
M2MDEC(vp8,   "VP8",   AV_CODEC_ID_VP8,        NULL);
M2MDEC(vp9,   "VP9",   AV_CODEC_ID_VP9,        NULL);

主要就是这三个函数:

 .init           = v4l2_decode_init, 
  .receive_frame  = v4l2_receive_frame, 
  .close          = v4l2_decode_close, 

所以一切的开始从这三个函数看(前提是你要知道这三个函数哪里调用)。

在开始分析函数之前,要看三个重要结构体:
V4L2Buffer 这个是对V4L2的封装,作为AVFrame,AVPacket和v4l2_buffer之间的媒介。

enum V4L2Buffer_status {
    V4L2BUF_AVAILABLE,
    V4L2BUF_IN_DRIVER,
    V4L2BUF_RET_USER,
};

/**
 * V4L2Buffer (wrapper for v4l2_buffer management)
 */
typedef struct V4L2Buffer {
    /* each buffer needs to have a reference to its context */
    struct V4L2Context *context;

    /* This object is refcounted per-plane, so we need to keep track
     * of how many context-refs we are holding. */
    AVBufferRef *context_ref;
    atomic_uint context_refcount;

    /* keep track of the mmap address and mmap length */
    struct V4L2Plane_info {
        int bytesperline;
        void * mm_addr;
        size_t length;
    } plane_info[VIDEO_MAX_PLANES];

    int num_planes;

    /* the v4l2_buffer buf.m.planes pointer uses the planes[] mem */
    struct v4l2_buffer buf;
    struct v4l2_plane planes[VIDEO_MAX_PLANES];

    int flags;
    enum V4L2Buffer_status status;

} V4L2Buffer;

V4L2Context 这个结构体主要针对的是v4l2的输入port和输出port

typedef struct V4L2Context {
    /**
     * context name.
     */
    const char* name;

    /**
     * Type of this buffer context.
     * See V4L2_BUF_TYPE_VIDEO_* in videodev2.h
     * Readonly after init.
     */
    enum v4l2_buf_type type;

    /**
     * AVPixelFormat corresponding to this buffer context.
     * AV_PIX_FMT_NONE means this is an encoded stream.
     */
    enum AVPixelFormat av_pix_fmt;

    /**
     * AVCodecID corresponding to this buffer context.
     * AV_CODEC_ID_RAWVIDEO means this is a raw stream and av_pix_fmt must be set to a valid value.
     */
    enum AVCodecID av_codec_id;

    /**
     * Format returned by the driver after initializing the buffer context.
     * Readonly after init.
     */
    struct v4l2_format format;

    /**
     * Width and height of the frames it produces (in case of a capture context, e.g. when decoding)
     * or accepts (in case of an output context, e.g. when encoding).
     */
    int width, height;
    AVRational sample_aspect_ratio;

    /**
     * Indexed array of V4L2Buffers
     */
    V4L2Buffer *buffers;

    /**
     * Readonly after init.
     */
    int num_buffers;

    /**
     * Whether the stream has been started (VIDIOC_STREAMON has been sent).
     */
    int streamon;

    /**
     *  Either no more buffers available or an unrecoverable error was notified
     *  by the V4L2 kernel driver: once set the context has to be exited.
     */
    int done;

} V4L2Context;

V4L2m2mContext 是一个管理结构体,管理整个v4l2上下文,包括上面提到的两个port

typedef struct V4L2m2mContext {
    char devname[PATH_MAX];
    int fd;

    /* the codec context queues */
    //输入
    V4L2Context capture;
    //输出
    V4L2Context output;

    /* dynamic stream reconfig */
    //codec上下文
    AVCodecContext *avctx;
    sem_t refsync;
    atomic_uint refcount;
    int reinit;

    /* null frame/packet received */
    int draining;
    AVPacket buf_pkt;

    /* Reference to a frame. Only used during encoding */
    AVFrame *frame;

    /* Reference to self; only valid while codec is active. */
    AVBufferRef *self_ref;

    /* reference back to V4L2m2mPriv */
    void *priv;
} V4L2m2mContext;

//这个结构体也是AVCodecContext的priv_data
typedef struct V4L2m2mPriv {
    AVClass *class;

    V4L2m2mContext *context;
    AVBufferRef    *context_ref;

    int num_output_buffers;
    int num_capture_buffers;
} V4L2m2mPriv;

一、v4l2初始化


static av_cold int v4l2_decode_init(AVCodecContext *avctx)
{
    V4L2Context *capture, *output;
    V4L2m2mContext *s;
    V4L2m2mPriv *priv = avctx->priv_data;
    int ret;
//这个函数归根揭底就是创建了一个V4L2m2mContext ,acctx的priv_data是V4L2m2mPriv 
//这个结构体在avcodec_alloc_context3()中创建内存,它的大小在插件中指定
    ret = ff_v4l2_m2m_create_context(priv, &s);
    if (ret < 0)
        return ret;

    capture = &s->capture;
    output = &s->output;

    /* if these dimensions are invalid (ie, 0 or too small) an event will be raised
     * by the v4l2 driver; this event will trigger a full pipeline reconfig and
     * the proper values will be retrieved from the kernel driver.
     */
    output->height = capture->height = avctx->coded_height;
    output->width = capture->width = avctx->coded_width;

//因为输出av_pix_fmt  是解码器指定的,所以这里设置为AV_PIX_FMT_NONE
    output->av_codec_id = avctx->codec_id;
    output->av_pix_fmt  = AV_PIX_FMT_NONE;
//对于输入我们是知道pix_fmt的
    capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
    capture->av_pix_fmt = avctx->pix_fmt;

    s->avctx = avctx;
    ret = ff_v4l2_m2m_codec_init(priv);
    if (ret) {
        av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n");
        return ret;
    }

    return v4l2_prepare_decoder(s);
}

这里主要看ff_v4l2_m2m_create_context()是如何创建V4L2m2mContext并初始化的。

int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s)
{
    *s = av_mallocz(sizeof(V4L2m2mContext));
    if (!*s)
        return AVERROR(ENOMEM);
//这里把V4L2m2mContext又分配到context_ref 上去
    priv->context_ref = av_buffer_create((uint8_t *) *s, sizeof(V4L2m2mContext),
                                         &v4l2_m2m_destroy_context, NULL, 0);
    if (!priv->context_ref) {
        av_freep(s);
        return AVERROR(ENOMEM);
    }

    /* assign the context */
    priv->context = *s;
    (*s)->priv = priv;

    /* populate it */
    priv->context->capture.num_buffers = priv->num_capture_buffers;
    priv->context->output.num_buffers  = priv->num_output_buffers;
    priv->context->self_ref = priv->context_ref;
    priv->context->fd = -1;

    priv->context->frame = av_frame_alloc();
    if (!priv->context->frame) {
        av_buffer_unref(&priv->context_ref);
        *s = NULL; /* freed when unreferencing context_ref */
        return AVERROR(ENOMEM);
    }

    return 0;
}

ff_v4l2_m2m_codec_init()代码如下:

int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv)
{
    int ret = AVERROR(EINVAL);
    struct dirent *entry;
    DIR *dirp;

    V4L2m2mContext *s = priv->context;

    dirp = opendir("/dev");
    if (!dirp)
        return AVERROR(errno);
//这里从/dev/videoX中找到一个可以使用的设备
    for (entry = readdir(dirp); entry; entry = readdir(dirp)) {

        if (strncmp(entry->d_name, "video", 5))
            continue;

        snprintf(s->devname, sizeof(s->devname), "/dev/%s", entry->d_name);
        av_log(s->avctx, AV_LOG_DEBUG, "probing device %s\n", s->devname);
        ret = v4l2_probe_driver(s);
        if (!ret)
            break;
    }

    closedir(dirp);

    if (ret) {
        av_log(s->avctx, AV_LOG_ERROR, "Could not find a valid device\n");
        memset(s->devname, 0, sizeof(s->devname));

        return ret;
    }

    av_log(s->avctx, AV_LOG_INFO, "Using device %s\n", s->devname);

    return v4l2_configure_contexts(s);
}



static int v4l2_configure_contexts(V4L2m2mContext *s)
{
    void *log_ctx = s->avctx;
    int ret;
    struct v4l2_format ofmt, cfmt;

    s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0);
    if (s->fd < 0)
        return AVERROR(errno);

    ret = v4l2_prepare_contexts(s, 0);
    if (ret < 0)
        goto error;

    ofmt = s->output.format;
    cfmt = s->capture.format;
    av_log(log_ctx, AV_LOG_INFO, "requesting formats: output=%s capture=%s\n",
                                 av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(ofmt.type) ?
                                               ofmt.fmt.pix_mp.pixelformat :
                                               ofmt.fmt.pix.pixelformat),
                                 av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(cfmt.type) ?
                                               cfmt.fmt.pix_mp.pixelformat :
                                               cfmt.fmt.pix.pixelformat));
//ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
    ret = ff_v4l2_context_set_format(&s->output);
    if (ret) {
        av_log(log_ctx, AV_LOG_ERROR, "can't set v4l2 output format\n");
        goto error;
    }
//ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
    ret = ff_v4l2_context_set_format(&s->capture);
    if (ret) {
        av_log(log_ctx, AV_LOG_ERROR, "can't to set v4l2 capture format\n");
        goto error;
    }
//这里对buf进行了创建和加入队列
    ret = ff_v4l2_context_init(&s->output);
    if (ret) {
        av_log(log_ctx, AV_LOG_ERROR, "no v4l2 output context's buffers\n");
        goto error;
    }

    /* decoder's buffers need to be updated at a later stage */
    if (s->avctx && !av_codec_is_decoder(s->avctx->codec)) {
    //如果是解码对capture进行设置
        ret = ff_v4l2_context_init(&s->capture);
        if (ret) {
            av_log(log_ctx, AV_LOG_ERROR, "no v4l2 capture context's buffers\n");
            goto error;
        }
    }

    return 0;

error:
    if (close(s->fd) < 0) {
        ret = AVERROR(errno);
        av_log(log_ctx, AV_LOG_ERROR, "error closing %s (%s)\n",
            s->devname, av_err2str(AVERROR(errno)));
    }
    s->fd = -1;

    return ret;
}
//如果改为userptr就在这两个函数上该
static int v4l2_prepare_contexts(V4L2m2mContext *s, int probe)
{
    struct v4l2_capability cap;
    void *log_ctx = s->avctx;
    int ret;

    s->capture.done = s->output.done = 0;
    s->capture.name = "capture";
    s->output.name = "output";
    atomic_init(&s->refcount, 0);
    sem_init(&s->refsync, 0, 0);

    memset(&cap, 0, sizeof(cap));
    //这里对设备进行了查询
    ret = ioctl(s->fd, VIDIOC_QUERYCAP, &cap);
    if (ret < 0)
        return ret;

    av_log(log_ctx, probe ? AV_LOG_DEBUG : AV_LOG_INFO,
                     "driver '%s' on card '%s' in %s mode\n", cap.driver, cap.card,
                     v4l2_mplane_video(&cap) ? "mplane" :
                     v4l2_splane_video(&cap) ? "splane" : "unknown");

    if (v4l2_mplane_video(&cap)) {
        s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
        s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
        return 0;
    }

//对于解码,我们是这个
    if (v4l2_splane_video(&cap)) {
        s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
        return 0;
    }

    return AVERROR(EINVAL);
}

int ff_v4l2_context_init(V4L2Context* ctx)
{
    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
    struct v4l2_requestbuffers req;
    int ret, i;

    if (!v4l2_type_supported(ctx)) {
        av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
        return AVERROR_PATCHWELCOME;
    }
    
	//这里获取format
    ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
    if (ret)
        av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);

    memset(&req, 0, sizeof(req));
    req.count = ctx->num_buffers;
    req.memory = V4L2_MEMORY_MMAP;
    req.type = ctx->type;
    //这里请求buf的个数
    ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
    if (ret < 0) {
        av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno));
        return AVERROR(errno);
    }

    ctx->num_buffers = req.count;
    ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
    if (!ctx->buffers) {
        av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < req.count; i++) {
        ctx->buffers[i].context = ctx;
        ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
        if (ret < 0) {
            av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret));
            goto error;
        }
    }

    av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
        req.count,
        v4l2_get_width(&ctx->format),
        v4l2_get_height(&ctx->format),
        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);

    return 0;

error:
    v4l2_release_buffers(ctx);

    av_freep(&ctx->buffers);

    return ret;
}


//根据上面获取到的个数,对buf进行初始化,
int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
{
    V4L2Context *ctx = avbuf->context;
    int ret, i;

    avbuf->buf.memory = V4L2_MEMORY_MMAP;
    avbuf->buf.type = ctx->type;
    avbuf->buf.index = index;

    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
        avbuf->buf.length = VIDEO_MAX_PLANES;
        avbuf->buf.m.planes = avbuf->planes;
    }
//查询buf的大小
    ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
    if (ret < 0)
        return AVERROR(errno);

    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
        avbuf->num_planes = 0;
        /* in MP, the V4L2 API states that buf.length means num_planes */
        for (i = 0; i < avbuf->buf.length; i++) {
            if (avbuf->buf.m.planes[i].length)
                avbuf->num_planes++;
        }
    } else
        avbuf->num_planes = 1;

//在下面对地址进行映射
    for (i = 0; i < avbuf->num_planes; i++) {

        avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
            ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
            ctx->format.fmt.pix.bytesperline;

        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
            avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
            avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
                                           PROT_READ | PROT_WRITE, MAP_SHARED,
                                           buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
        } else {
            avbuf->plane_info[i].length = avbuf->buf.length;
            avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
                                          PROT_READ | PROT_WRITE, MAP_SHARED,
                                          buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
        }

        if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
            return AVERROR(ENOMEM);
    }
//状态改变为可用
    avbuf->status = V4L2BUF_AVAILABLE;

    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
        return 0;

    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
        avbuf->buf.m.planes = avbuf->planes;
        avbuf->buf.length   = avbuf->num_planes;

    } else {
        avbuf->buf.bytesused = avbuf->planes[0].bytesused;
        avbuf->buf.length    = avbuf->planes[0].length;
    }
// 入队 ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf)
    return ff_v4l2_buffer_enqueue(avbuf);
}

v4l2_prepare_decoder()主要用来订阅event,一个是V4L2_EVENT_SOURCE_CHANGE,另外一个是VIDIOC_SUBSCRIBE_EVENT

static int v4l2_prepare_decoder(V4L2m2mContext *s)
{
    struct v4l2_event_subscription sub;
    V4L2Context *output = &s->output;
    int ret;

    /**
     * requirements
     */
    memset(&sub, 0, sizeof(sub));
    sub.type = V4L2_EVENT_SOURCE_CHANGE;
    ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
    if ( ret < 0) {
        if (output->height == 0 || output->width == 0) {
            av_log(s->avctx, AV_LOG_ERROR,
                "the v4l2 driver does not support VIDIOC_SUBSCRIBE_EVENT\n"
                "you must provide codec_height and codec_width on input\n");
            return ret;
        }
    }

    memset(&sub, 0, sizeof(sub));
    sub.type = V4L2_EVENT_EOS;
    ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
    if (ret < 0)
        av_log(s->avctx, AV_LOG_WARNING,
               "the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT\n");

    return 0;
}

二、 v4l2_receive_frame()


static int v4l2_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
    V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context;
    V4L2Context *const capture = &s->capture;
    V4L2Context *const output = &s->output;
    int ret;

    if (!s->buf_pkt.size) {
    //从bsf中获取到数据
        ret = ff_decode_get_packet(avctx, &s->buf_pkt);
        if (ret < 0) {
            if (ret == AVERROR(EAGAIN))
                return ff_v4l2_context_dequeue_frame(capture, frame, 0);
            else if (ret != AVERROR_EOF)
                return ret;
        }
    }

    if (s->draining)
        goto dequeue;
//这里是一个关键点,将pkt送入到解码器中进行解码
    ret = ff_v4l2_context_enqueue_packet(output, &s->buf_pkt);
    if (ret < 0 && ret != AVERROR(EAGAIN))
        goto fail;

    /* if EAGAIN don't unref packet and try to enqueue in the next iteration */
    if (ret != AVERROR(EAGAIN))
        av_packet_unref(&s->buf_pkt);

    if (!s->draining) {
        ret = v4l2_try_start(avctx);
        if (ret) {
            /* cant recover */
            if (ret != AVERROR(ENOMEM))
                ret = 0;
            goto fail;
        }
    }

dequeue:
    return ff_v4l2_context_dequeue_frame(capture, frame, -1);
fail:
    av_packet_unref(&s->buf_pkt);
    return ret;
}

ff_v4l2_context_enqueue_packet()这是将数据送入v4l2

int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
{
    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
    V4L2Buffer* avbuf;
    int ret;

    if (!pkt->size) {
        ret = v4l2_stop_decode(ctx);
        if (ret)
            av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
        s->draining = 1;
        return 0;
    }
//获取空闲的buf
    avbuf = v4l2_getfree_v4l2buf(ctx);
    if (!avbuf)
        return AVERROR(EAGAIN);

    ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
    if (ret)
        return ret;

    return ff_v4l2_buffer_enqueue(avbuf);
}

//获取空闲的buf
static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
{
    int timeout = 0; /* return when no more buffers to dequeue */
    int i;

    /* get back as many output buffers as possible */
    //这里是从输出port获取buf,稍微比较难一点
    if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
          do {
          } while (v4l2_dequeue_v4l2buf(ctx, timeout));
    }
    //如果是输入type,那么直接从空闲buf中获取就可以了
    for (i = 0; i < ctx->num_buffers; i++) {
        if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
            return &ctx->buffers[i];
    }

    return NULL;
}



//这里是出队,包括输入port的空buf,和输出port的yuv buf
static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
{
    struct v4l2_plane planes[VIDEO_MAX_PLANES];
    struct v4l2_buffer buf = { 0 };
    V4L2Buffer *avbuf;
    struct pollfd pfd = {
        .events =  POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
        .fd = ctx_to_m2mctx(ctx)->fd,
    };
    int i, ret;

    if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx->buffers) {
    //这里也很简单,看看有没有buf在驱动内部,如果有才会到下面去,如果都在用户空间,就没必要去到下面了
        for (i = 0; i < ctx->num_buffers; i++) {
            if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
                break;
        }
        if (i == ctx->num_buffers)
            av_log(logger(ctx), AV_LOG_WARNING, "All capture buffers returned to "
                                                "userspace. Increase num_capture_buffers "
                                                "to prevent device deadlock or dropped "
                                                "packets/frames.\n");
    }

    /* if we are draining and there are no more capture buffers queued in the driver we are done */
    if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
        for (i = 0; i < ctx->num_buffers; i++) {
            /* capture buffer initialization happens during decode hence
             * detection happens at runtime
             */
            if (!ctx->buffers)
                break;

            if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
                goto start;
        }
        ctx->done = 1;
        return NULL;
    }

start:
    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
        pfd.events =  POLLOUT | POLLWRNORM;
    else {
        /* no need to listen to requests for more input while draining */
        if (ctx_to_m2mctx(ctx)->draining)
            pfd.events =  POLLIN | POLLRDNORM | POLLPRI;
    }

    for (;;) {
        ret = poll(&pfd, 1, timeout);
        if (ret > 0)
            break;
        if (errno == EINTR)
            continue;
        return NULL;
    }

    /* 0. handle errors */
    if (pfd.revents & POLLERR) {
        /* if we are trying to get free buffers but none have been queued yet
           no need to raise a warning */
        if (timeout == 0) {
            for (i = 0; i < ctx->num_buffers; i++) {
                if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
                    av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
            }
        }
        else
            av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);

        return NULL;
    }

    /* 1. handle resolution changes */
    if (pfd.revents & POLLPRI) {
        ret = v4l2_handle_event(ctx);
        if (ret < 0) {
            /* if re-init failed, abort */
            ctx->done = 1;
            return NULL;
        }
        if (ret) {
            /* if re-init was successful drop the buffer (if there was one)
             * since we had to reconfigure capture (unmap all buffers)
             */
            return NULL;
        }
    }

    /* 2. dequeue the buffer */
    if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {

        if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
            /* there is a capture buffer ready */
            if (pfd.revents & (POLLIN | POLLRDNORM))
                goto dequeue;

            /* the driver is ready to accept more input; instead of waiting for the capture
             * buffer to complete we return NULL so input can proceed (we are single threaded)
             */
            if (pfd.revents & (POLLOUT | POLLWRNORM))
                return NULL;
        }

dequeue:
        memset(&buf, 0, sizeof(buf));
        buf.memory = V4L2_MEMORY_MMAP;
        buf.type = ctx->type;
        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
            memset(planes, 0, sizeof(planes));
            buf.length = VIDEO_MAX_PLANES;
            buf.m.planes = planes;
        }

        ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
        if (ret) {
            if (errno != EAGAIN) {
                ctx->done = 1;
                if (errno != EPIPE)
                    av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
                        ctx->name, av_err2str(AVERROR(errno)));
            }
            return NULL;
        }

        if (ctx_to_m2mctx(ctx)->draining && !V4L2_TYPE_IS_OUTPUT(ctx->type)) {
            int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
                            buf.m.planes[0].bytesused : buf.bytesused;
            if (bytesused == 0) {
                ctx->done = 1;
                return NULL;
            }
#ifdef V4L2_BUF_FLAG_LAST
            if (buf.flags & V4L2_BUF_FLAG_LAST)
                ctx->done = 1;
#endif
        }

        avbuf = &ctx->buffers[buf.index];
        avbuf->status = V4L2BUF_AVAILABLE;
        avbuf->buf = buf;
        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
            memcpy(avbuf->planes, planes, sizeof(planes));
            avbuf->buf.m.planes = avbuf->planes;
        }
        return avbuf;
    }

    return NULL;
}

(待续。。。)

你可能感兴趣的:(笔记,音视频资料,ffmpeg)