VIRTIO的vring收发队列创建流程

针对评审文档那天提出的问题,又看了一下代码,VIRTIO收发队列的创建位置是在GUEST的前端驱动里

 

GUEST前端驱动,以网络设备为例: Virtio-net: PCI 发现后,通过PCI总线分配收发队列

static int virtnet_probe(structvirtio_device *vdev)

{

……

        /*

         * 初始化virtqueue

        * 创建和初始化发送/接收队列

        */

        err = init_vqs(vi);

……

}

 

/*创建和初始化发送/接收队列*/

static int init_vqs(struct virtnet_info*vi)

{

        /*分配*/

        ret = virtnet_alloc_queues(vi);

        if (ret)

                  goto err;

 

        /*通过find vqs来创建vring*/

        ret = virtnet_find_vqs(vi);

        if (ret)

                  goto err_free;

 

……

}

 

/*通过find vqs来创建vring*/

static int virtnet_find_vqs(structvirtnet_info *vi)

{

……

        /*调用的是vp_find_vqs,真正的创建virtqueue内部结构和分配地址,并将地址告诉后端QEMU驱动*/

        ret = vi->vdev->config->find_vqs(vi->vdev,total_vqs, vqs, callbacks, names);

……

}

 

 

VIRTIO PCI总线

static int vp_find_vqs(struct virtio_device*vdev, unsigned nvqs,

                         struct virtqueue *vqs[],

                         vq_callback_t *callbacks[],

                         const char *names[])

{

        int err;

 

        err = vp_try_to_find_vqs(vdev,nvqs, vqs, callbacks, names, true, true);

 

        err = vp_try_to_find_vqs(vdev,nvqs, vqs, callbacks, names, true, false);

 

        return vp_try_to_find_vqs(vdev,nvqs, vqs, callbacks, names, false, false);

}

 

 

 

static int vp_try_to_find_vqs()

{

……

                  /*最核心的是setup_vq()*/

                  vqs[i] = setup_vq(vdev,i, callbacks[i], names[i], msix_vec);

……

}

 

static struct virtqueue *setup_vq(structvirtio_device *vdev, unsigned index,

                                      void (*callback)(struct virtqueue *vq),

                                      const char *name,

                                      u16 msix_vec)

{

        /*设置virtio vring的地址给后端的QEMU*/

        iowrite32(virt_to_phys(info->queue)>> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr +VIRTIO_PCI_QUEUE_PFN);

}

 

IO读写被后端QEMU截获,进行模拟

 

QEMU后端驱动

static void virtio_ioport_write(void*opaque, uint32_t addr, uint32_t val)

{

……

    switch (addr) {

    case VIRTIO_PCI_QUEUE_PFN:

       pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;

……

           virtio_queue_set_addr(vdev,vdev->queue_sel, pa);

……

}


还有个细节问题,前端驱动写入的应该是QEUEUGPA“iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);”

这个被后端QEMU截获后,QEMU怎么直接使用了GPA呢?哪里完成的GPA->HVA的转换呢?

 

这个是在QEMUvirtqueue中取消息的时候,进行转换的

 

 

QEMU代码,在收到VIRTIO通知后,会通过virtqueue_pop从共享队列中取出消息

 

intvirtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)

{

    /*Now map what we have collected */

    virtqueue_map_sg(elem->in_sg,elem->in_addr, elem->in_num, 1);

    virtqueue_map_sg(elem->out_sg,elem->out_addr, elem->out_num, 0);

}

 

voidvirtqueue_map_sg()

{

 

   for (i = 0; i < num_sg; i++) {

       len = sg[i].iov_len;

       sg[i].iov_base =cpu_physical_memory_map(addr[i], &len, is_write);

       if (sg[i].iov_base == NULL || len != sg[i].iov_len) {

           error_report("virtio: trying to map MMIO memory");

           exit(1);

       }

    }

}

 

/*完成一个GUEST的物理地址GPAHVA的转换*/

void*cpu_physical_memory_map(hwaddr addr,

                             hwaddr *plen,

                             int is_write)

{

   return address_space_map(&address_space_memory,addr, plen, is_write);

}

 

 

Thanks

Feng


VIRTIO官方SPEC在

http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.html

 

 


你可能感兴趣的:(硬件知识,虚拟化)