函数调用:
Ø 初始化
__start_xen()
domain_create() // 这里是创建dom0
evtchn_init() // 初始化
get_free_port()
Ø 操作
相关操作都通过hypercall HYPERVISOR_event_channel_op(int cmd, void *arg)来进行。
arg根据cmd的不同而不同。例如:
#define EVTCHNOP_send 4
struct evtchn_send {
/* IN parameters. */
evtchn_port_t port;
};
typedef struct evtchn_send evtchn_send_t;
被保存在struct evtchn_op中。
所有的操作的服务例程都是:
long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
long rc;
switch ( cmd )
{
case EVTCHNOP_alloc_unbound: {
域间绑定有两个过程:EVTCHNOP_alloc_unbound + EVTCHNOP_bind_interdomain
为指定的dom分配一个port,供remote_dom来进行域间绑定。
Allocate a port in domain <dom> and mark as accepting interdomain bindings from domain <remote_dom>.
struct evtchn_alloc_unbound alloc_unbound;
if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) // 调用此函数的时候,struct alloc_unbound
return -EFAULT; // domid_t dom, remote_dom;已经准备好
rc = evtchn_alloc_unbound(&alloc_unbound);
if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_interdomain: {
和指定的远程dom/port建立连接,返回连接的本地port。
<remote_dom,remote_port> must identify a port that is unbound and marked as accepting bindings from the calling domain.
struct evtchn_bind_interdomain bind_interdomain;
if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_interdomain(&bind_interdomain); // 调用时,remote dom/port已经设置好
if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_virq: {
绑定指定的VIRQ到指定的VCPU。
struct evtchn_bind_virq bind_virq;
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_virq(&bind_virq);
if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_ipi: {
当前dom的VCPU之间的通信。
struct evtchn_bind_ipi bind_ipi;
if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_ipi(&bind_ipi);
if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_bind_pirq: {
只有dom0和IDD才有权申请PIRQ。
这些dom不能直接处理PIRQ,必须由Xen接受PIRQ,然后转发给dom处理。
struct evtchn_bind_pirq bind_pirq;
if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_pirq(&bind_pirq);
if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
case EVTCHNOP_close: {
关闭当前dom的指定port。
struct evtchn_close close;
if ( copy_from_guest(&close, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_close(&close);
break;
}
case EVTCHNOP_send: {
供域间通信和虚拟IPI之间使用。VIRQ和PIQR不需要使用,因为notification的发送方是Xen,不需要用hypercall。
struct evtchn_send send;
if ( copy_from_guest(&send, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_send(current->domain, send.port);
break;
}
case EVTCHNOP_status: {
获得dom/port的状态信息。根据绑定类型(这里分为6类)的不同,返回的信息不同。
struct evtchn_status status; // 输入参数为dom/port,查询pair的状态
if ( copy_from_guest(&status, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_status(&status);
if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
rc = -EFAULT;
break;
}
case EVTCHNOP_bind_vcpu: {
将指定的evtchn绑定到指定的VCPU处理。
struct evtchn_bind_vcpu bind_vcpu;
if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu);
break;
}
case EVTCHNOP_unmask: {
dom如何设置/取消mask参考mask_evtchn()/unmask_evtchn()。
struct evtchn_unmask unmask;
if ( copy_from_guest(&unmask, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_unmask(unmask.port);
break;
}
case EVTCHNOP_reset: {
关闭指定dom的所有evtchn。
struct evtchn_reset reset;
if ( copy_from_guest(&reset, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_reset(&reset);
break;
}
default:
rc = -ENOSYS;
break;
}
return rc;
}
static long evtchn_status(evtchn_status_t *status)
{
struct domain *d;
domid_t dom = status->dom;
int port = status->port; // 被查询的dom/port
struct evtchn *chn;
long rc = 0;
rc = rcu_lock_target_domain_by_id(dom, &d);
if ( rc )
return rc;
spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
rc = -EINVAL;
goto out;
}
chn = evtchn_from_port(d, port); // 获得对应的evtchn
rc = xsm_evtchn_status(d, chn);
if ( rc )
goto out;
switch ( chn->state )
{
case ECS_FREE:
case ECS_RESERVED:
status->status = EVTCHNSTAT_closed;
break;
case ECS_UNBOUND:
status->status = EVTCHNSTAT_unbound;
status->u.unbound.dom = chn->u.unbound.remote_domid; // 输出此dom/pair正开放给哪个远程dom
break;
case ECS_INTERDOMAIN:
status->status = EVTCHNSTAT_interdomain;
status->u.interdomain.dom =
chn->u.interdomain.remote_dom->domain_id;
status->u.interdomain.port = chn->u.interdomain.remote_port; // 输出对点的dom/port
break;
case ECS_PIRQ:
status->status = EVTCHNSTAT_pirq;
status->u.pirq = chn->u.pirq;
break;
case ECS_VIRQ:
status->status = EVTCHNSTAT_virq;
status->u.virq = chn->u.virq;
break;
case ECS_IPI:
status->status = EVTCHNSTAT_ipi;
break;
default:
BUG();
}
status->vcpu = chn->notify_vcpu_id;
out:
spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
}
static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
{
从指定的dom中获得一个free的evtchn(port)。分配给远端dom以后使用。
如果是自己分配或分配给自己使用,则DOMID_SELF。
然后
1. 设置它的state和remote_domid。
2. 填充获得的port进参数alloc。
分配的port被放入xenstore,以后想用的话,从中获得。方法是什么?
struct evtchn *chn;
struct domain *d;
int port;
domid_t dom = alloc->dom;
long rc;
rc = rcu_lock_target_domain_by_id(dom, &d);
if ( rc )
return rc;
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 ) // 从dom中获得一个free的port
ERROR_EXIT_DOM(port, d);
chn = evtchn_from_port(d, port); // 得到对应的struct evtchn
rc = xsm_evtchn_unbound(d, chn, alloc->remote_dom);
if ( rc )
goto out;
chn->state = ECS_UNBOUND; // 设置(1)状态为ESC_UNBOUND
if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF )
chn->u.unbound.remote_domid = current->domain->domain_id; // 设置(2)remote_domid
alloc->port = port; // 设置(3)到evtchn_alloc_unbound_t的port
out:
spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
}
static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
dom在调用hypercall的时候,已经将bind中remote_dom和remote_port设置好
{
struct evtchn *lchn, *rchn;
struct domain *ld = current->domain, *rd;
int lport, rport = bind->remote_port;
domid_t rdom = bind->remote_dom;
long rc;
if ( rdom == DOMID_SELF )
rdom = current->domain->domain_id;
if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL )
return -ESRCH;
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
if ( ld < rd )
{
spin_lock(&ld->event_lock);
spin_lock(&rd->event_lock);
}
else
{
if ( ld != rd )
spin_lock(&rd->event_lock);
spin_lock(&ld->event_lock);
}
if ( (lport = get_free_port(ld)) < 0 ) // 获得一个本地的free port
ERROR_EXIT(lport);
lchn = evtchn_from_port(ld, lport); // 得到对应的本地evtchn
if ( !port_is_valid(rd, rport) )
ERROR_EXIT_DOM(-EINVAL, rd);
rchn = evtchn_from_port(rd, rport); // 根据rd和rport,获得远端evtchn
if ( (rchn->state != ECS_UNBOUND) || // 远端evtchn的state要是ESC_UNBOUND
(rchn->u.unbound.remote_domid != ld->domain_id) ) // 远端evtchn的remote domid要是自己的id
ERROR_EXIT_DOM(-EINVAL, rd); // 这里就是要做的检查。即远端dom的port必须开放给了
// 自己。开放是在EVTCHNOP_alloc_unbound里面做的
rc = xsm_evtchn_interdomain(ld, lchn, rd, rchn);
if ( rc )
goto out;
lchn->u.interdomain.remote_dom = rd;
lchn->u.interdomain.remote_port = (u16)rport;
lchn->state = ECS_INTERDOMAIN; // 设置本地evtchn的状态
rchn->u.interdomain.remote_dom = ld;
rchn->u.interdomain.remote_port = (u16)lport;
rchn->state = ECS_INTERDOMAIN; // 设置远端evtchn的状态
/*
* We may have lost notifications on the remote unbound port. Fix that up
* here by conservatively always setting a notification on the local port.
*/
evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); // 设置本evtchn绑定的VCPU的pending位
// lchn里面的notify_vcpu_id是什么时候设置的
bind->local_port = lport; // 设置输出参数,本地port
out:
spin_unlock(&ld->event_lock);
if ( ld != rd )
spin_unlock(&rd->event_lock);
rcu_unlock_domain(rd);
return rc;
}
static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
{
struct evtchn *chn;
struct vcpu *v;
struct domain *d = current->domain;
int port, virq = bind->virq, vcpu = bind->vcpu;
long rc = 0;
if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
if ( virq_is_global(virq) && (vcpu != 0) ) // 全局型VIRQ只能绑定到VCPU0
return -EINVAL;
if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
((v = d->vcpu[vcpu]) == NULL) ) // 根据VCPU的id,获得VCPU
return -ENOENT;
spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 ) // 如果对应的port不为0
ERROR_EXIT(-EEXIST);
if ( (port = get_free_port(d)) < 0 ) // 获得port
ERROR_EXIT(port);
chn = evtchn_from_port(d, port); // 获得对应的evtchn
chn->state = ECS_VIRQ; // 设置state为ECS_VIRQ
chn->notify_vcpu_id = vcpu; // 在evtchn中,设置绑定到的VCPU
chn->u.virq = virq; // 设置绑定的VIRQ
v->virq_to_evtchn[virq] = bind->port = port; // VIRQ绑定的port设置到vcpu的virq_to_evtchn
// PIRQ设置到dom里面的pirq_to_evtchn
out:
spin_unlock(&d->event_lock);
return rc;
}