Xen Event Channel (2)

Xen Event Channel (2)

static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)

{

    struct evtchn *chn;

    struct domain *d = current->domain;                            // 当前dom

    int            port, vcpu = bind->vcpu;

    long           rc = 0;

 

    if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||

         (d->vcpu[vcpu] == NULL) )

        return -ENOENT;

 

    spin_lock(&d->event_lock);

 

    if ( (port = get_free_port(d)) < 0 )                                     // 获得一个port

        ERROR_EXIT(port);

 

    chn = evtchn_from_port(d, port);                                     // 获得port对应的evtchn

    chn->state          = ECS_IPI;

    chn->notify_vcpu_id = vcpu;                                              // evtchnIPI绑定,即设置此evtchnnotify_vcpu_id

 

    bind->port = port;                                                               // 设置输出参数

 

 out:

    spin_unlock(&d->event_lock);

 

    return rc;

}


static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)

{

    struct evtchn *chn;

    struct domain *d = current->domain;

    int            port, pirq = bind->pirq;

    long           rc;

 

    if ( (pirq < 0) || (pirq >= d->nr_pirqs) )

        return -EINVAL;

 

    if ( !irq_access_permitted(d, pirq) )

        return -EPERM;

 

    spin_lock(&d->event_lock);

 

    if ( d->pirq_to_evtchn[pirq] != 0 )

        ERROR_EXIT(-EEXIST);

 

    if ( (port = get_free_port(d)) < 0 )                                     // 分配一个freeport

        ERROR_EXIT(port);

 

    chn = evtchn_from_port(d, port);                                     // 获得对应的evtchn

 

    d->pirq_to_evtchn[pirq] = port;                                       // PIRQ绑定的port设置到dompirq_to_evtchn

    rc = pirq_guest_bind(d->vcpu[0], pirq,

                         !!(bind->flags & BIND_PIRQ__WILL_SHARE));

    if ( rc != 0 )

    {

        d->pirq_to_evtchn[pirq] = 0;

        goto out;

    }

 

    chn->state  = ECS_PIRQ;

    chn->u.pirq = pirq;                                                              // PIRQ绑定

 

    bind->port = port;                                                               // 设置输出参数

 

 out:

    spin_unlock(&d->event_lock);

 

    return rc;

}


static long __evtchn_close(struct domain *d1, int port1)

{

    struct domain *d2 = NULL;

    struct vcpu   *v;

    struct evtchn *chn1, *chn2;

    int            port2;

    long           rc = 0;

 

 again:

    spin_lock(&d1->event_lock);

 

    if ( !port_is_valid(d1, port1) )

    {

        rc = -EINVAL;

        goto out;

    }

 

    chn1 = evtchn_from_port(d1, port1);                                                 // 得到对应的evtchn

 

    /* Guest cannot close a Xen-attached event channel. */

    if ( unlikely(chn1->consumer_is_xen) )

    {

        rc = -EINVAL;

        goto out;

    }

 

    switch ( chn1->state )

    {

    case ECS_FREE:

    case ECS_RESERVED:

        rc = -EINVAL;

        goto out;

 

    case ECS_UNBOUND:

        break;

 

    case ECS_PIRQ:

        pirq_guest_unbind(d1, chn1->u.pirq);

        d1->pirq_to_evtchn[chn1->u.pirq] = 0;

        break;

 

    case ECS_VIRQ:

        for_each_vcpu ( d1, v )

        {

            if ( v->virq_to_evtchn[chn1->u.virq] != port1 )

                continue;

            v->virq_to_evtchn[chn1->u.virq] = 0;

            spin_barrier_irq(&v->virq_lock);

        }

        break;

 

    case ECS_IPI:

        break;

 

    case ECS_INTERDOMAIN:

        if ( d2 == NULL )

        {

            d2 = chn1->u.interdomain.remote_dom;

 

            /* If we unlock d1 then we could lose d2. Must get a reference. */

            if ( unlikely(!get_domain(d2)) )

                BUG();

 

            if ( d1 < d2 )

            {

                spin_lock(&d2->event_lock);

            }

            else if ( d1 != d2 )

            {

                spin_unlock(&d1->event_lock);

                spin_lock(&d2->event_lock);

                goto again;

            }

        }

        else if ( d2 != chn1->u.interdomain.remote_dom )

        {

            /*

             * We can only get here if the port was closed and re-bound after

             * unlocking d1 but before locking d2 above. We could retry but

             * it is easier to return the same error as if we had seen the

             * port in ECS_CLOSED. It must have passed through that state for

             * us to end up here, so it's a valid error to return.

             */

            rc = -EINVAL;

            goto out;

        }

 

        port2 = chn1->u.interdomain.remote_port;                             // 获得远端port

        BUG_ON(!port_is_valid(d2, port2));

 

        chn2 = evtchn_from_port(d2, port2);                                         // 获得对应的evtchn

        BUG_ON(chn2->state != ECS_INTERDOMAIN);

        BUG_ON(chn2->u.interdomain.remote_dom != d1);

 

        chn2->state = ECS_UNBOUND;                                               //设置远端为alloc之后,绑定之前的状态

        chn2->u.unbound.remote_domid = d1->domain_id;              // ECS_UNBOUND

        break;

 

    default:

        BUG();

    }

 

    /* Clear pending event to avoid unexpected behavior on re-bind. */

    clear_bit(port1, &shared_info(d1, evtchn_pending));

 

    /* Reset binding to vcpu0 when the channel is freed. */

    chn1->state          = ECS_FREE;                                               // 设置本地为ECS_FREE状态

    chn1->notify_vcpu_id = 0;                                                                    // 设置初始化状态绑定的VCPU0

 

    xsm_evtchn_close_post(chn1);

 

 out:

    if ( d2 != NULL )

    {

        if ( d1 != d2 )

            spin_unlock(&d2->event_lock);

        put_domain(d2);

    }

 

    spin_unlock(&d1->event_lock);

 

    return rc;

}

 

 

static long evtchn_close(evtchn_close_t *close)

{

    return __evtchn_close(current->domain, close->port);

}


int evtchn_send(struct domain *d, unsigned int lport)

d为本地dom。是current->domain

lport为本地port,要send的对象

{

    struct evtchn *lchn, *rchn;

    struct domain *ld = d, *rd;

    struct vcpu   *rvcpu;

    int            rport, ret = 0;

 

    spin_lock(&ld->event_lock);

 

    if ( unlikely(!port_is_valid(ld, lport)) )

    {

        spin_unlock(&ld->event_lock);

        return -EINVAL;

    }

 

    lchn = evtchn_from_port(ld, lport);                // 首先获得lport对应的本地evtchn

 

    /* Guest cannot send via a Xen-attached event channel. */

    if ( unlikely(lchn->consumer_is_xen) )

    {

        spin_unlock(&ld->event_lock);

        return -EINVAL;

    }

 

    ret = xsm_evtchn_send(ld, lchn);

    if ( ret )

        goto out;

 

    switch ( lchn->state )

    {

    case ECS_INTERDOMAIN:                                             // 域间通信

        rd    = lchn->u.interdomain.remote_dom;          // 获得远端/对点的rdom

        rport = lchn->u.interdomain.remote_port;            // 获得远端/对点的rport

        rchn  = evtchn_from_port(rd, rport);                     // 获得rport对应的rchn

        rvcpu = rd->vcpu[rchn->notify_vcpu_id];                // 获得对点evtchn绑定的VCPU

        if ( rchn->consumer_is_xen )

        {

            /* Xen consumers need notification only if they are blocked. */

            if ( test_and_clear_bit(_VPF_blocked_in_xen,

                                    &rvcpu->pause_flags) )

                vcpu_wake(rvcpu);

        }

        else

        {

            evtchn_set_pending(rvcpu, rport);      // 设置对点VCPU上的portevent发生。

        }                                                                                       // 下面将异步进行event句柄的处理。

        break;

    case ECS_IPI:

        evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);                  // IPI的话,则设置对应的VCPU

        break;

    case ECS_UNBOUND:

        /* silently drop the notification */

        break;

    default:                                                               // ESC_VIRQ & ESC_PIRQ不会到这里来

        ret = -EINVAL;                                         // 因为这些notification是的发送方是Xen,不需要用hypercall

    }

 

out:

    spin_unlock(&ld->event_lock);

 

    return ret;

}
long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)

{

绑定VCPU之后,这个event的处理就由该VCPU来完成。

evtchn的屏蔽:

1)   所有的VCPU屏蔽某个evtchn

    设置evtchnMASK位。这个MASK位在struct shared_info里面。

2)   某个VCPU屏蔽所有evtchn

    VCPU结构体struct vcpu_info成员完成。方法在结构体处说明。

    struct domain *d = current->domain;

    struct evtchn *chn;

    long           rc = 0;

 

    if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )

        return -ENOENT;

 

    spin_lock(&d->event_lock);

 

    if ( !port_is_valid(d, port) )

    {

        rc = -EINVAL;

        goto out;

    }

 

    chn = evtchn_from_port(d, port);                                                        // 根据port得到evtchn

 

    /* Guest cannot re-bind a Xen-attached event channel. */

    if ( unlikely(chn->consumer_is_xen) )

    {

        rc = -EINVAL;

        goto out;

    }

 

    switch ( chn->state )

    {

    case ECS_VIRQ:

        if ( virq_is_global(chn->u.virq) )                              // 只有全局性虚拟中断才能绑定VCPU

            chn->notify_vcpu_id = vcpu_id;     // 所谓绑定,不过是设置evtchn中的notify_vcpu_id

        else

            rc = -EINVAL;

        break;

    case ECS_UNBOUND:

    case ECS_INTERDOMAIN:        // 域间绑定之后,状态会被设置为ECS_INTERDOMAIN

    case ECS_PIRQ:

        chn->notify_vcpu_id = vcpu_id;

        break;

    default:

        rc = -EINVAL;

        break;

    }

 

 out:

    spin_unlock(&d->event_lock);

 

    return rc;

}


int evtchn_unmask(unsigned int port)

{

    struct domain *d = current->domain;

    struct vcpu   *v;

 

    spin_lock(&d->event_lock);

 

    if ( unlikely(!port_is_valid(d, port)) )

    {

        spin_unlock(&d->event_lock);

        return -EINVAL;

    }

 

    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];                     // 获得对应的VCPU

 

    /*

     * These operations must happen in strict order. Based on

     * include/xen/event.h:evtchn_set_pending().

     */

    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && // 如果evtchn_mask被设置,那么取消设置(屏蔽)

         test_bit          (port, &shared_info(d, evtchn_pending)) && // 并且evtchn_pending被设置(未决)

         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),      // 那么设置此VCPU里的evtchn_pending_sel

                            &vcpu_info(v, evtchn_pending_sel)) )

    {

        vcpu_mark_events_pending(v);                                       // 并且设置此VCPU里的evtchn_upcall_pending

    }

 

    spin_unlock(&d->event_lock);

 

    return 0;

}


static long evtchn_reset(evtchn_reset_t *r)

{

    domid_t dom = r->dom;

    struct domain *d;

    int i, rc;

 

    rc = rcu_lock_target_domain_by_id(dom, &d);

    if ( rc )

        return rc;

 

    rc = xsm_evtchn_reset(current->domain, d);

    if ( rc )

        goto out;

 

    for ( i = 0; port_is_valid(d, i); i++ )

        (void)__evtchn_close(d, i);            // 可以这样做的原因是,当初分配port的时候就是严格按照顺序分配的

 

    rc = 0;

 

out:

    rcu_unlock_domain(d);

 

    return rc;

}

你可能感兴趣的:(struct,null,domain,xen,binding,locking)