virtio的vring队列

上一篇已经提到,在virtio设备初始化的过程中,会通过setup_vp创建virtqueue,目前的virtqueue队列都是通过vring来实际工作的,我们可以把virtqueue当做一个接口类,而把vring_virtqueue当做这个接口的一个实现

/**
 * virtqueue - a queue to register buffers for sending or receiving.
 * @list: the chain of virtqueues for this device
 * @callback: the function to call when buffers are consumed (can be NULL).
 * @name: the name of this virtqueue (mainly for debugging)
 * @vdev: the virtio device this queue was created for.
 * @priv: a pointer for the virtqueue implementation to use.
 */
struct virtqueue {
    struct list_head list;
    void (*callback)(struct virtqueue *vq);
    const char *name;
    struct virtio_device *vdev;
    void *priv;
};
对于pci设备而言,priv指向一个virtio_pci_vq_info的结构体,其中vq指向接口定义virtqueue,queue指向ring的实际地址,msix_vector是中断号,etc.
struct virtio_pci_vq_info
{
    /* the actual virtqueue */
    struct virtqueue *vq;

    /* the number of entries in the queue */
    int num;

    /* the index of the queue */
    int queue_index;

    /* the virtual address of the ring queue */
    void *queue;

    /* the list node for the virtqueues list */
    struct list_head node;

    /* MSI-X vector (or none) */
    unsigned msix_vector;
};

一个vring_virtqueue的结构体定义如下

struct vring_virtqueue
{
    struct virtqueue vq;  /* virtqueue的接口类定义 */

    /* Actual memory layout for this queue */
    struct vring vring;  /* vring memory layout指针 */

    /* Other side has made a mess, don't try any more. */
    bool broken;

    /* Host supports indirect buffers */
    bool indirect;  /* transport feature位,是否支持indirect buffer */

    /* Host publishes avail event idx */
    bool event;  /* transport feature位,是否支持event idx interrupt/notify */

    /* Number of free buffers */
    unsigned int num_free;  /* vring desc里面还剩余的free buffer个数,free buffer是free_head开头的一个list */
    /* Head of free buffer list. */
    unsigned int free_head;  /* vring desc的free buffer head index */
    /* Number we've added since last sync. */
    unsigned int num_added;  /* 从上次sync到现在增加的次数,注意这里是次数,不是增加的buffer个数 */
    /* Last used index we've seen. */
    u16 last_used_idx;

    /* How to notify other side. FIXME: commonalize hcalls! */
    void (*notify)(struct virtqueue *vq);

#ifdef DEBUG
    /* They're supposed to lock for us. */
    unsigned int in_use;
#endif

    /* Tokens for callbacks. */
    void *data[];  /* token数组,个数同vring desc */
};

virtio规范定义了virtqueue的几种标准操作,e.g.

struct virtqueue *vring_new_virtqueue(unsigned int num,
                      unsigned int vring_align,
                      struct virtio_device *vdev,
                      void *pages,
                      void (*notify)(struct virtqueue *),
                      void (*callback)(struct virtqueue *),
                      const char *name)
{
    struct vring_virtqueue *vq;
    unsigned int i;

    /* We assume num is a power of 2. */
    if (num & (num - 1)) {
        dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
        return NULL;
    }

    vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); /* vring_virtqueue + data数组的大小 */
    if (!vq)
        return NULL;

    vring_init(&vq->vring, num, pages, vring_align);
    vq->vq.callback = callback;
    vq->vq.vdev = vdev;
    vq->vq.name = name;
    vq->notify = notify;
    vq->broken = false;
    vq->last_used_idx = 0;
    vq->num_added = 0;
    list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
    vq->in_use = false;
#endif

    vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); /* 是否支持indirect buffer */
    vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);  /* 是否通过event idx来触发中断时间 */

    /* No callback?  Tell other side not to bother us. */
    if (!callback)
        vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* 如果callback为空,关闭中断 */

    /* Put everything in free lists. */
    vq->num_free = num;
    vq->free_head = 0; /* idx 0作为空闲buffer list的头部 */
    for (i = 0; i < num-1; i++) { /* vring_desc[0], vring_desc[1]依次连接成一个list */
        vq->vring.desc[i].next = i+1;
        vq->data[i] = NULL;
    }
    vq->data[i] = NULL;

    return &vq->vq;
}
vring_new_virtqueue创建一个vring_virtqueue,其中内存分配大小是sizeof(struct vring_virtqueue) + num * sizeof(void *),传入的pages内存是vring的layout memory

/* virtqueue_add_buf: expose buffer to other end
 *  vq: the struct virtqueue we're talking about.
 *  sg: the description of the buffer(s).
 *  out_num: the number of sg readable by other side
 *  in_num: the number of sg which are writable (after readable ones)
 *  data: the token identifying the buffer.
 *      Returns remaining capacity of queue (sg segments) or a negative error.
 */
int virtqueue_add_buf(struct virtqueue *_vq,
          struct scatterlist sg[],
          unsigned int out,
          unsigned int in,
          void *data)
{
    struct vring_virtqueue *vq = to_vvq(_vq);
    unsigned int i, avail, head, uninitialized_var(prev);

    START_USE(vq);

    BUG_ON(data == NULL);

    /* If the host supports indirect descriptor tables, and we have multiple
     * buffers, then go indirect. FIXME: tune this threshold */
    if (vq->indirect && (out + in) > 1 && vq->num_free) {
        head = vring_add_indirect(vq, sg, out, in);
        if (head != vq->vring.num)
            goto add_head;
    }

    BUG_ON(out + in > vq->vring.num);
    BUG_ON(out + in == 0);

    if (vq->num_free < out + in) {
        pr_debug("Can't add buf len %i - avail = %i\n",
             out + in, vq->num_free);
        /* FIXME: for historical reasons, we force a notify here if
         * there are outgoing parts to the buffer.  Presumably the
         * host should service the ring ASAP. */
        if (out)
            vq->notify(&vq->vq);
        END_USE(vq);
        return -ENOSPC;
    }

    /* We're about to use some buffers from the free list. */
    vq->num_free -= out + in;

    head = vq->free_head; /* 从vring_desc[vq->free_head]开头,依次填充vring_desc项,加入new free buffer */
    for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
        vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
        vq->vring.desc[i].addr = sg_phys(sg);
        vq->vring.desc[i].len = sg->length;
        prev = i;
        sg++;
    }
    for (; in; i = vq->vring.desc[i].next, in--) {
        vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
        vq->vring.desc[i].addr = sg_phys(sg);
        vq->vring.desc[i].len = sg->length;
        prev = i;
        sg++;
    }
    /* Last one doesn't continue. */
    vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* 最后的vring_desc项没有next flag */

    /* Update free pointer */
    vq->free_head = i; /* 后移vq->free_head,新加入的buffer准备传递给avail ring */

add_head:
    /* Set token. */
    vq->data[head] = data; /* 把data指向的token填入vq->data[head] */

    /* Put entry in available array (but don't update avail->idx until they
     * do sync).  FIXME: avoid modulus here? */
    avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
    vq->vring.avail->ring[avail] = head;

    pr_debug("Added buffer head %i to %p\n", head, vq);
    END_USE(vq);

    /* If we're indirect, we can fit many (assuming not OOM). */
    if (vq->indirect)
        return vq->num_free ? vq->vring.num : 0;
    return vq->num_free;
}

virtqueue_add_buf把传入的scatterlist填入vring_desc的free buffer链表中,并更新avail ring的idx的entry,指向新加入的free buffer链表头。由于在前后端idx同步之前,有可能会有多次的virtqueue_add_buf调用,因此vring_virtqueue用了一个num_added来表示virtqueue_add_buf被调用的次数,e.g. 看下面这段代码

    /* Put entry in available array (but don't update avail->idx until they
     * do sync).  FIXME: avoid modulus here? */
    avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
    vq->vring.avail->ring[avail] = head;
初始化后vq->num_added = 0,avail = vq->vring.avail->idx,因此就把vring_avail.ring[idx]指向新加入的free buffer的链表头。但此时vring.avail->idx保持不变,只让vq->num_added增加1,因此下一次的virtqueue_add_buf调用时,vring_avail.ring[idx+1]会指向新加入的free buffer链表头,依次类推。

如果vring支持indirect的话,新增free buffer就会简单许多,通过vring_add_indirect实现

static int vring_add_indirect(struct vring_virtqueue *vq,
                  struct scatterlist sg[],
                  unsigned int out,
                  unsigned int in)
{
    struct vring_desc *desc;
    unsigned head;
    int i;

    desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); /* 分配in+out个vring_desc项 */
    if (!desc)
        return vq->vring.num;

    /* Transfer entries from the sg list into the indirect page */
    for (i = 0; i < out; i++) {
        desc[i].flags = VRING_DESC_F_NEXT;
        desc[i].addr = sg_phys(sg);
        desc[i].len = sg->length;
        desc[i].next = i+1;
        sg++;
    }
    for (; i < (out + in); i++) {
        desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
        desc[i].addr = sg_phys(sg);
        desc[i].len = sg->length;
        desc[i].next = i+1;
        sg++;
    }

    /* Last one doesn't continue. */
    desc[i-1].flags &= ~VRING_DESC_F_NEXT;
    desc[i-1].next = 0;

    /* We're about to use a buffer */
    vq->num_free--; /* 对于vring_virtqueue而言,只使用了一个vring_desc项 */

    /* Use a single buffer which doesn't continue */
    head = vq->free_head;
    vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
    vq->vring.desc[head].addr = virt_to_phys(desc);
    vq->vring.desc[head].len = i * sizeof(struct vring_desc);

    /* Update free pointer */
    vq->free_head = vq->vring.desc[head].next; /* free_head向后顺移一项 */

    return head;
}
如果是indirect的话,vring_desc只会添加一条表项,指向一个indirect vring_desc数组。

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
 * After one or more virtqueue_add_buf calls, invoke this to kick
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
void virtqueue_kick(struct virtqueue *vq)
{
    if (virtqueue_kick_prepare(vq))
        virtqueue_notify(vq);
}

bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
    struct vring_virtqueue *vq = to_vvq(_vq);
    u16 new, old;
    bool needs_kick;

    START_USE(vq);
    /* Descriptors and available array need to be set before we expose the
     * new available array entries. */
    virtio_wmb();

    old = vq->vring.avail->idx;
    new = vq->vring.avail->idx = old + vq->num_added;
    vq->num_added = 0;

    /* Need to update avail index before checking if we should notify */
    virtio_mb();

    if (vq->event) {
        needs_kick = vring_need_event(vring_avail_event(&vq->vring),
                          new, old);
    } else {
        needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
    }
    END_USE(vq);
    return needs_kick;
}
virtqueue_kick用于通知qemu/vhost端avail ring有更新,其中virtqueue_kick_prepare用于计算是否需要kick,而virtqueue_notify通过写入virtio bar0配置空间的QUEUE_NOTIFY字段产生VMEXIT从而被qemu/vhost捕获

virtqueue_kick_prepare如果支持VIRTIO_RING_F_EVENT_IDX的feature,则需要计算vq->vring.avail->idx的变化,同时对比avail ring里的used_event_idx,那这个used_event_idx是干嘛用的呢?说的通俗一点,每次判断是否要kick另一端,主要是对比这次同步时,guest端增加的avail entry,是否大于host端看到增加的entry个数,e.g. 假设guest新增了5个avail entry,之后同步了一次,接着又增加了5个avail entry,又再次同步。但此时host端第一次同步之后只消费了4个avail entry,那么第二次同步时会发现条件不成立,不会去kick virtqueue,只有host端第一次同步之后把5个avail entry都消费完毕之后,下一次同步才会kick。这个机制有点类似边缘触发的概念。

void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
    struct vring_virtqueue *vq = to_vvq(_vq);
    void *ret;
    unsigned int i;

    START_USE(vq);

    if (unlikely(vq->broken)) {
        END_USE(vq);
        return NULL;
    }

    if (!more_used(vq)) {
        pr_debug("No more buffers in queue\n");
        END_USE(vq);
        return NULL;
    }

    /* Only get used array entries after they have been exposed by host. */
    virtio_rmb();

    i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;  /* 获取last_used_idx指向的used_elem */
    *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;

    if (unlikely(i >= vq->vring.num)) {
        BAD_RING(vq, "id %u out of range\n", i);
        return NULL;
    }
    if (unlikely(!vq->data[i])) {
        BAD_RING(vq, "id %u is not a head!\n", i);
        return NULL;
    }

    /* detach_buf clears data, so grab it now. */
    ret = vq->data[i];
    detach_buf(vq, i); /* detach_buf把used ring idx对应的vring_desc链表放回到free_head指向的空闲链表头部 */
    vq->last_used_idx++;  /* last_used_idx自增 */
    /* If we expect an interrupt for the next entry, tell host
     * by writing event index and flush out the write before
     * the read in the next get_buf call. */
    if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
        vring_used_event(&vq->vring) = vq->last_used_idx; /* 更新last_used_idx值到前端驱动 */
        virtio_mb();
    }

    END_USE(vq);
    return ret;
}
virtqueue_get_buf用于回收last_used_idx指向的一个used ring的entry

static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
    unsigned int i;

    /* Clear data ptr. */
    vq->data[head] = NULL;

    /* Put back on free list: find end */
    i = head;

    /* Free the indirect table */
    if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
        kfree(phys_to_virt(vq->vring.desc[i].addr));

    while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
        i = vq->vring.desc[i].next;
        vq->num_free++;
    }

    vq->vring.desc[i].next = vq->free_head;
    vq->free_head = head;
    /* Plus final descriptor */
    vq->num_free++;
}
实际的回收工作由detach_buf完成,这里不会对vring_desc里实际的addr地址做任何操作,意味着这些地址在之前就已经被回收掉了

virtqueue_disable_cb用于关闭中断,virtqueue_enable_cb用于打开中断,比较简单这里不多分析了

void virtqueue_disable_cb(struct virtqueue *_vq)
{
    struct vring_virtqueue *vq = to_vvq(_vq);

    vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}


bool virtqueue_enable_cb(struct virtqueue *_vq)
{
    struct vring_virtqueue *vq = to_vvq(_vq);

    START_USE(vq);

    /* We optimistically turn back on interrupts, then check if there was
     * more to do. */
    /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
     * either clear the flags bit or point the event index at the next
     * entry. Always do both to keep code simple. */
    vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
    vring_used_event(&vq->vring) = vq->last_used_idx;
    virtio_mb();
    if (unlikely(more_used(vq))) {
        END_USE(vq);
        return false;
    }

    END_USE(vq);
    return true;
}

你可能感兴趣的:(virtio的vring队列)