linux 核间通讯rpmsg架构分析

以imx8为例

在最底层硬件上,A核和M核通讯是靠硬件来进行的,称为MU,如图

linux 核间通讯rpmsg架构分析_第1张图片

Linux RPMsg 是在virtio framework上实现的一个消息传递机制

VirtIO 是一个用来实现“虚拟IO”的通用框架,典型虚拟的pci,网卡,磁盘等虚拟设备,kvm等都使用了这个技术

与virtio对应的还有一个virtio-ring,其实现了 virtio 的具体通信机制和数据流程。

virtio 层属于控制层,负责前后端之间的通知机制(kick,notify)和控制流程,而 virtio-vring 则负责具体数据流转发

从整体架构上看,关系如下:

linux 核间通讯rpmsg架构分析_第2张图片

最底层有platform_bus,负责从dts获取配置来初始化相关对象,如virtio_device,初始化其config操作函数列表以及devID等,同时注册到virtio_bus

dts相关配置:

&rpmsg{
	/*
	 * 64K for one rpmsg instance:
	 */
	vdev-nums = <2>;
	reg = <0x0 0x90000000 0x0 0x20000>;
	status = "okay";
};

主要初始化过程在imx_rpmsg_probe中,关键操作有:

注册MU相关的硬件中断

ret = request_irq(irq, imx_mu_rpmsg_isr, IRQF_EARLY_RESUME | IRQF_SHARED,

"imx-mu-rpmsg", rpdev);

初始化MU硬件

ret = imx_rpmsg_mu_init(rpdev);

创建工作队列用于处理MU中断数据

INIT_DELAYED_WORK(&(rpdev->rpmsg_work), rpmsg_work_handler);

创建通知链用于对接virtio queue

BLOCKING_INIT_NOTIFIER_HEAD(&(rpdev->notifier));

初始化virtio_device并注册

for (j = 0; j < rpdev->vdev_nums; j++) {
		pr_debug("%s rpdev%d vdev%d: vring0 0x%x, vring1 0x%x\n",
			 __func__, rpdev->core_id, rpdev->vdev_nums,
			 rpdev->ivdev[j].vring[0],
			 rpdev->ivdev[j].vring[1]);
		rpdev->ivdev[j].vdev.id.device = VIRTIO_ID_RPMSG;
		rpdev->ivdev[j].vdev.config = &imx_rpmsg_config_ops;
		rpdev->ivdev[j].vdev.dev.parent = &pdev->dev;
		rpdev->ivdev[j].vdev.dev.release = imx_rpmsg_vproc_release;
		rpdev->ivdev[j].base_vq_id = j * 2;

		ret = register_virtio_device(&rpdev->ivdev[j].vdev);
		if (ret) {
			pr_err("%s failed to register rpdev: %d\n",
					__func__, ret);
			return ret;
		}

	}

值得注意的是virtio_device的config结构 rpdev->ivdev[j].vdev.config = &imx_rpmsg_config_ops;

static struct virtio_config_ops imx_rpmsg_config_ops = {

.get_features = imx_rpmsg_get_features,

.finalize_features = imx_rpmsg_finalize_features,

.find_vqs = imx_rpmsg_find_vqs,

.del_vqs = imx_rpmsg_del_vqs,

.reset = imx_rpmsg_reset,

.set_status = imx_rpmsg_set_status,

.get_status = imx_rpmsg_get_status,

};

imx_rpmsg_find_vqs

        --> rp_find_vq

                -->ioremap_nocache

                -->vring_new_virtqueue(...imx_rpmsg_notify, callback...)

需要注意的是callback的注册过程,在rpmsg_bus中

rpmsg_probe

        -->vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };

        -->virtio_find_vqs(vdev, 2, vqs, vq_cbs, names, NULL);

在此处注册的imx_rpmsg_notify 和 callback 将被virtio_bus框架所调用

中间virtio_bus承上启下,并负责提供统一标准的virtio queue操作接口,如virtqueue_add,virtqueue_kick等

针对struct virtqueue,对外只有一个callback函数,用于表示queue的数据变化

struct virtqueue {
	struct list_head list;
	void (*callback)(struct virtqueue *vq);
	const char *name;
	struct virtio_device *vdev;
	unsigned int index;
	unsigned int num_free;
	void *priv;
};

其实virtqueue只是提供一层标准queue的操作接口,其具体实现依靠vring_virtqueue

struct vring_virtqueue {
	struct virtqueue vq;

	/* Actual memory layout for this queue */
	struct vring vring
        {
            //queue的具体实现
	    unsigned int num;

	    struct vring_desc *desc;

	    struct vring_avail *avail;

	    struct vring_used *used;
        };


	/* How to notify other side. FIXME: commonalize hcalls! */
	bool (*notify)(struct virtqueue *vq);

...

	/* Per-descriptor state. */
	struct vring_desc_state desc_state[];
};

其触发过程在vring_interrupt

irqreturn_t vring_interrupt(int irq, void *_vq)
{
        ##对外只有virtqueue,找到其包装vring_virtqueue
	struct vring_virtqueue *vq = to_vvq(_vq);

	if (!more_used(vq)) {
		pr_debug("virtqueue interrupt with no work for %p\n", vq);
		return IRQ_NONE;
	}

	if (unlikely(vq->broken))
		return IRQ_HANDLED;

	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
        ##调用virtqueue的callback
	if (vq->vq.callback)
		vq->vq.callback(&vq->vq);

	return IRQ_HANDLED;
}

结合中断,整体流程如下:

imx_mu_rpmsg_isr

        -->rpmsg_work_handler

                -->vring_interrupt

                        -->virtqueue.callback

关于vring_virtqueue,包含一个notify,用于通知queue有变化

virtqueue_add 和 virtqueue_kick 以及 virtqueue_notify 都能够触发notify

最终notify的实现在imx_rpmsg_notify,其内容为设置MU寄存器,发送数据

/* kick the remote processor, and let it know which virtqueue to poke at */
static bool imx_rpmsg_notify(struct virtqueue *vq)
{
	unsigned int mu_rpmsg = 0;
	struct imx_rpmsg_vq_info *rpvq = vq->priv;

	mu_rpmsg = rpvq->vq_id << 16;
	mutex_lock(&rpvq->rpdev->lock);
	/*
	 * Send the index of the triggered virtqueue as the mu payload.
	 * Use the timeout MU send message here.
	 * Since that M4 core may not be loaded, and the first MSG may
	 * not be handled by M4 when multi-vdev is enabled.
	 * To make sure that the message wound't be discarded when M4
	 * is running normally or in the suspend mode. Only use
	 * the timeout mechanism by the first notify when the vdev is
	 * registered.
	 */
	if (unlikely(rpvq->rpdev->first_notify > 0)) {
		rpvq->rpdev->first_notify--;
		MU_SendMessageTimeout(rpvq->rpdev->mu_base, 1, mu_rpmsg, 200);
	} else {
		MU_SendMessage(rpvq->rpdev->mu_base, 1, mu_rpmsg);
	}
	mutex_unlock(&rpvq->rpdev->lock);

	return true;
}

最上面可看成基于rpmsg的应用,挂载到rpmsg_bus总线,针对rpmsg也有对应的标准操作接口,如rpmsg_send,rpmsg_sendto,rpmsg_poll等等

在rpmsg_bus这一层,还有一个rpmsg_endpoint概念,其对应有一个rpmsg_endpoint_ops,包含send,send_to等接口,目前还未对其深入研究

static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {

.destroy_ept = virtio_rpmsg_destroy_ept,

.send = virtio_rpmsg_send,

.sendto = virtio_rpmsg_sendto,

.send_offchannel = virtio_rpmsg_send_offchannel,

.trysend = virtio_rpmsg_trysend,

.trysendto = virtio_rpmsg_trysendto,

.trysend_offchannel = virtio_rpmsg_trysend_offchannel,

};

发送流程为

rpmsg_send

        -->rpmsg_endpoint.ops->send

                -->virtio_rpmsg_send

                        -->virtqueue_add_outbuf   往queue填充数据

                        -->virtqueue_kick    通知对端

                                -->virtqueue_notify

                                        -->imx_rpmsg_notify

                                                -->MU_REG_WRITE

rpmsg_bus总线默认提供两个回调rpmsg_recv_done和rpmsg_xmit_done以便通知给上层rpmsg应用,分别表示收到数据及发送完成

接收处理流程:

imx_mu_rpmsg_isr

        -->rpmsg_work_handler

                -->vring_interrupt

                        -->virtqueue.callback

                                -->rpmsg_recv_done or rpmsg_xmit_done

/* called when an rx buffer is used, and it's time to digest a message */
static void rpmsg_recv_done(struct virtqueue *rvq)
{
	struct virtproc_info *vrp = rvq->vdev->priv;
	struct device *dev = &rvq->vdev->dev;
	struct rpmsg_hdr *msg;
	unsigned int len, msgs_received = 0;
	int err;

	msg = virtqueue_get_buf(rvq, &len);
	if (!msg) {
		dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
		return;
	}

	while (msg) {
		err = rpmsg_recv_single(vrp, dev, msg, len);
		if (err)
			break;

		msgs_received++;

		msg = virtqueue_get_buf(rvq, &len);
	}

	dev_dbg(dev, "Received %u messages\n", msgs_received);

	/* tell the remote processor we added another available rx buffer */
	if (msgs_received)
                ## 通知接收queue
		virtqueue_kick(vrp->rvq);
}
static void rpmsg_xmit_done(struct virtqueue *svq)
{
	struct virtproc_info *vrp = svq->vdev->priv;

	dev_dbg(&svq->vdev->dev, "%s\n", __func__);

	/* wake up potential senders that are waiting for a tx buffer */
	wake_up_interruptible(&vrp->sendq);
}

整体过程如下:

linux 核间通讯rpmsg架构分析_第3张图片

你可能感兴趣的:(Linux,virtio,rpmsg,核间通讯)