hwbinder 优先级相关介绍

一Linux进程优先级

 In linux system priorities are 0 to 139 in which 0 to 99 for real time and 100 to 139 for users. nice value range is -20 to +19 where -20 is highest, 0 default and +19 is lowest. relation between nice value and priority is :

翻译出来就是linux kernel的优先级是0到139,其中0到99是给实时优先级,100到139是给用户使用。程序可以设置的是nice值,Nice值从-20到19,其中数值越低代表优先级越低,0是默认值,19是优先级最低的。
PI值是内核使用的参数,它与nice值的关系:PR = 20 + NI
PR值为从0到39,映射到100到139

二 hwbinder优先级设置

一个binder的默认优先级设置在BHwBinder的构造函数中
system/libhwbinder/Binder.cpp

BHwBinder::BHwBinder() : mSchedPolicy(SCHED_NORMAL), mSchedPriority(0), mExtras(nullptr)
{
}
int BHwBinder::getMinSchedulingPolicy() {
    return mSchedPolicy;
}

int BHwBinder::getMinSchedulingPriority() {
    return mSchedPriority;
}

优先级的设置有两个地方
一个是在binder驱动打开的时候

static int binder_open(struct inode *nodp, struct file *filp)
{
    if (binder_supported_policy(current->policy)) {
        proc->default_priority.sched_policy = current->policy;
        proc->default_priority.prio = current->normal_prio;
    } else {
        proc->default_priority.sched_policy = SCHED_NORMAL;
        proc->default_priority.prio = NICE_TO_PRIO(0);
    }
}
static bool binder_supported_policy(int policy)
{
    return is_fair_policy(policy) || is_rt_policy(policy);
}
static bool is_rt_policy(int policy)
{
    return policy == SCHED_FIFO || policy == SCHED_RR;
}

static bool is_fair_policy(int policy)
{
    return policy == SCHED_NORMAL || policy == SCHED_BATCH;
}

对于优先级策略,首先有几个优先级,一个是binder_porc中从属于binder进程的优先级,这个default值是通过Open binder来获得的

struct binder_proc {
    struct binder_priority default_priority;
}

还有一个是在binder_node中,他的初始化主要是在BHwBinder构造函数中

struct binder_node {
    struct {
        /*
         * invariant after initialization
         */
        u8 sched_policy:2;
        u8 inherit_rt:1;
        u8 accept_fds:1;
        u8 min_priority;
    };
}

status_t flatten_binder(const sp& /*proc*/,
    const sp& binder, Parcel* out)
{
    flat_binder_object obj;

    if (binder != NULL) {
        BHwBinder *local = binder->localBinder();
        if (!local) {
            BpHwBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                ALOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.hdr.type = BINDER_TYPE_HANDLE;
            obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
            obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
            obj.handle = handle;
            obj.cookie = 0;
        } else {
            // Get policy and convert it
            int policy = local->getMinSchedulingPolicy();
            int priority = local->getMinSchedulingPriority();

            obj.flags = priority & FLAT_BINDER_FLAG_PRIORITY_MASK;
            obj.flags |= FLAT_BINDER_FLAG_ACCEPTS_FDS | FLAT_BINDER_FLAG_INHERIT_RT;
            obj.flags |= (policy & 3) << FLAT_BINDER_FLAG_SCHEDPOLICY_SHIFT;
            obj.hdr.type = BINDER_TYPE_BINDER;
            obj.binder = reinterpret_cast(local->getWeakRefs());
            obj.cookie = reinterpret_cast(local);

驱动中对应的是

static struct binder_node *binder_init_node_ilocked(
                        struct binder_proc *proc,
                        struct binder_node *new_node,
                        struct flat_binder_object *fp)
{
    priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;

还有一个是在struct binder_transaction

struct binder_transaction {
    struct binder_priority  priority;
    struct binder_priority  saved_priority;
}

其中binder_transaction的priority设置策略为

    if (!(t->flags & TF_ONE_WAY) &&
        binder_supported_policy(current->policy)) {
        //异步的话,继承的是调用端的优先级
        /* Inherit supported policies for synchronous transactions */
        t->priority.sched_policy = current->policy;
        t->priority.prio = current->normal_prio;
    } else {
        //单向调用的话,为目标进程的proc->default_priority
        /* Otherwise, fall back to the default priority */
        t->priority = target_proc->default_priority;
    }

在调用到达对端后


static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
                        BUG_ON(t->buffer == NULL);
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;
            struct binder_priority node_prio;

            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            node_prio.sched_policy = target_node->sched_policy;
            node_prio.prio = target_node->min_priority;
            binder_transaction_priority(current, t, node_prio,
                            target_node->inherit_rt);
            cmd = BR_TRANSACTION;

static void binder_transaction_priority(struct task_struct *task,
                    struct binder_transaction *t,
                    struct binder_priority node_prio,
                    bool inherit_rt)
{
    struct binder_priority desired_prio = t->priority;

    if (t->set_priority_called)
        return;

    t->set_priority_called = true;
    t->saved_priority.sched_policy = task->policy;  //保存对端传输前的调度策略
    t->saved_priority.prio = task->normal_prio;

    if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
        desired_prio.prio = NICE_TO_PRIO(0);
        desired_prio.sched_policy = SCHED_NORMAL;
    }
   //如果对端优先级小于node,设置为Node的优先级,也就是到达对端,如果对端优先级高,会提升优先级
    if (node_prio.prio < t->priority.prio ||
        (node_prio.prio == t->priority.prio &&
         node_prio.sched_policy == SCHED_FIFO)) {
        /*
         * In case the minimum priority on the node is
         * higher (lower value), use that priority. If
         * the priority is the same, but the node uses
         * SCHED_FIFO, prefer SCHED_FIFO, since it can
         * run unbounded, unlike SCHED_RR.
         */
        desired_prio = node_prio;
    }

    binder_set_priority(task, desired_prio);
}
            

你可能感兴趣的:(hwbinder 优先级相关介绍)