binder(三) servicemanager启动

使用主要交互逻辑


int main(int argc, char **argv)
{
    struct binder_state *bs;
    // 1. binder_open
    bs = binder_open(128*1024);
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }
    //2. 注册成为管家
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    //3. 循环接收数据和处理时间
    binder_loop(bs, svcmgr_handler);

    return 0;
}

  1. 打开驱动,mmap映射,binder_open
struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;
    
    
    bs = malloc(sizeof(*bs));
    //打开驱动 ->1.1
    bs->fd = open("/dev/binder", O_RDWR);
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open device (%s)\n",
                strerror(errno));
        goto fail_open;
    }
    //检测版本号 ->1.2
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr, "binder: driver version differs from user space\n");
        goto fail_open;
    }
    
    bs->mapsize = mapsize;
    //mmap建立映射  ->1.3
     bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
     if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }
 
    return bs;
    
fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}


1.1 open()对应驱动层binder_open(),主要作用是创建binder_proc

static int binder_open(struct inode *nodp, struct file *filp){
    struct binder_proc *proc;
    //初始化binder_proc赋值
    
    proc = kzalloc(sizeof(*proc), GFP_KERNEL);
    get_task_struct(current);
    proc->tsk = current;
    //初始化todo
    INIT_LIST_HEAD(&proc->todo);
    //初始化等待队列
    init_waitqueue_head(&proc->wait);
    proc->default_priority = task_nice(current);
    hlist_add_head(&proc->proc_node, &binder_procs);
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death);
    //把binder_proc赋值到filp->private_data
    filp->private_data = proc;
    return 0;
}

1.2 ioctl(bs->fd, BINDER_VERSION, &vers)检测版本号,对应驱动层函数binder_ioctl()

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){
    //...
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    //...
    void __user *ubuf = (void __user *)arg;
    //..
    thread = binder_get_thread(proc);
    //...
    switch (cmd) {
        //...
        case BINDER_VERSION: {
            struct binder_version __user *ver = ubuf;
            //..
            //把版本号put到ver->protocol_version
            if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                     &ver->protocol_version)) {
                ret = -EINVAL;
                goto err;
            };
        }
    }
}

1.3 mmap对应驱动层binder_mmap ==这里不清楚,需要再次查看==


static int binder_mmap(struct file *filp, struct vm_area_struct *vma){
    int ret;
    struct vm_struct *area;
    struct binder_proc *proc = filp->private_data;
    const char *failure_string;
    struct binder_buffer *buffer;
    //...
    if ((vma->vm_end - vma->vm_start) > SZ_4M)
        vma->vm_end = vma->vm_start + SZ_4M;
    }
    //...
    area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
    
    proc->buffer = area->addr;
    proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
    proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
    proc->buffer_size = vma->vm_end - vma->vm_start;
    //...
    vma->vm_private_data = proc;
    list_add(&buffer->entry, &proc->buffers);
    //...
    proc->vma = vma;
    proc->vma_vm_mm = vma->vm_mm;
    return 0
}

  1. 注册成为管家
int binder_become_context_manager(struct binder_state *bs)
{   
    // 和驱动交互,交互cmd是BINDER_SET_CONTEXT_MGR,2.1
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

2.1 驱动层通过BINDER_SET_CONTEXT_MGR 找到对应执行代码块,主要是用于创建binder_context_mgr_node为名称的binder_node,binder_context_mgr_node是全局唯一的,不能被重复构建。

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    //...
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    //...
    thread = binder_get_thread(proc);
    switch (cmd) {
        //...
        case BINDER_SET_CONTEXT_MGR:
            binder_ioctl_set_ctx_mgr(filp);
            security_binder_set_context_mgr(proc->tsk);
            break;
            
        //...
    }
}


static struct binder_node *binder_context_mgr_node;

static int binder_ioctl_set_ctx_mgr(struct file *filp){
    int ret = 0;
    //...
    struct binder_proc *proc = filp->private_data;
    //...
    //判断是否已经创建binder_context_mgr_node
    if (binder_context_mgr_node != NULL) {
        pr_err("BINDER_SET_CONTEXT_MGR already set\n");
        ret = -EBUSY;
        goto out;
    }
    
    //创建service_manager binder_node -> 2.1.1
    binder_context_mgr_node = binder_new_node(proc, 0, 0)
    if (binder_context_mgr_node == NULL) {
        ret = -ENOMEM;
        goto out;
    }
    //更新相关计数
    binder_context_mgr_node->local_weak_refs++;
    binder_context_mgr_node->local_strong_refs++;
    binder_context_mgr_node->has_strong_ref = 1;
    binder_context_mgr_node->has_weak_ref = 1;
out:
    return ret;
}


2.1.1 binder_new_node主要用于创建一个新的binder_node,并把proc指向当前进程binder_proc

static struct binder_node *binder_new_node(struct binder_proc *proc,
                       binder_uintptr_t ptr,
                       binder_uintptr_t cookie)
{
    struct rb_node **p = &proc->nodes.rb_node;
    struct rb_node *parent = NULL;
    struct binder_node *node;
    
    
    while (*p) {
        parent = *p;
        node = rb_entry(parent, struct binder_node, rb_node);

        if (ptr < node->ptr)
            p = &(*p)->rb_left;
        else if (ptr > node->ptr)
            p = &(*p)->rb_right;
        else
            return NULL;
    }
    //分配内存
    node = kzalloc(sizeof(*node), GFP_KERNEL);
    if (node == NULL)
        return NULL;
    binder_stats_created(BINDER_STAT_NODE);
    rb_link_node(&node->rb_node, parent, p);
    rb_insert_color(&node->rb_node, &proc->nodes);
    node->debug_id = ++binder_last_id;
    //proc 指向当前进程binder_proc
    node->proc = proc;
    //ptr 指向具体处理的函数
    node->ptr = ptr;
    node->cookie = cookie;
    node->work.type = BINDER_WORK_NODE;
    INIT_LIST_HEAD(&node->work.entry);
    INIT_LIST_HEAD(&node->async_todo);
    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
             "%d:%d node %d u%016llx c%016llx created\n",
             proc->pid, current->pid, node->debug_id,
             (u64)node->ptr, (u64)node->cookie);
    return node;
    
}

  1. binder_loop()更改looper标记,开启循环读取

//svcmgr_handler 代表一个函数
binder_loop(bs, svcmgr_handler);


void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];
    
    //构造binder_write_read和ioctl交互,进行数据交换
    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
    //记住这个命令 BC_ENTER_LOOPER
    readbuf[0] = BC_ENTER_LOOPER;
    //通知驱动开启循环,BC_ENTER_LOOPER   ->3.1
    binder_write(bs, readbuf, sizeof(uint32_t));

    //开始读取数据
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        
        //开始读取数据 -->3.2
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        //解析数据  func对应svcmgr_handler函数-->3.3
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

3.1 binder_write,主要负责构建binder_write_read结构体,然后和驱动层交互更改looper参数

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    //BC_ENTER_LOOPER
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    //和驱动交互 3.1.1
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}


3.1.1 ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

//cmd = BINDER_WRITE_READ
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    //arg对应binder_transation_data数据
    void __user *ubuf = (void __user *)arg;
    //...
    thread = binder_get_thread(proc);
    //...
    switch (cmd) {
        case BINDER_WRITE_READ:
            ret = binder_ioctl_write_read(filp, cmd, arg, thread);
            //...
            break;
            
        //...
    }
    //...
}


static int binder_ioctl_write_read(struct file *filp,unsigned int cmd, unsigned long arg,struct binder_thread *thread){
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;
    //...
    //将binder_transation_data数据从用户空间拷贝到内核控件
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    //...
    //此时write_size>0
    if (bwr.write_size > 0) {
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
    
        //...
    }
    
    //把处理的数据从内核控件copy到用户空间
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    
    //...
}

static int binder_thread_write(struct binder_proc *proc,struct binder_thread *thread,binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed){
    uint32_t cmd;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;
    while (ptr < end && thread->return_error == BR_OK) {
        //根据ptr获取具体的命令 cmd == BC_ENTER_LOOPER
        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        //...
        switch (cmd) {
            //...
            case BC_ENTER_LOOPER:
                //...
                //更改标志位,开启循环
                thread->looper |= BINDER_LOOPER_STATE_ENTERED;
                break;
            //...
        }
    
    }
    
}



3.2 ioctl(bs->fd, BINDER_WRITE_READ, &bwr);,这个时候read_size有数据了,最终走到了binder_thread_read

//cmd = BINDER_WRITE_READ
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    //arg对应binder_transation_data数据
    void __user *ubuf = (void __user *)arg;
    //...
    thread = binder_get_thread(proc);
    //...
    switch (cmd) {
        case BINDER_WRITE_READ:
            ret = binder_ioctl_write_read(filp, cmd, arg, thread);
            //...
            break;
            
        //...
    }
    //...
}


static int binder_ioctl_write_read(struct file *filp,unsigned int cmd, unsigned long arg,struct binder_thread *thread){
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;
    //...
    //将binder_transation_data数据从用户空间拷贝到内核控件
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    //...
    //read_size>0
   if (bwr.read_size > 0) {
       ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
    
        //...
        //如果binder_proc的todo链表没有数据就等待
        if (!list_empty(&proc->todo)){
            wake_up_interruptible(&proc->wait);
        }
        if (ret < 0) {
            //如果出错 ,把最终处理的数据从内核控件copy到用户控件
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
        //...
    }
    //...
    //把最终处理的数据从内核控件copy到用户控件
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
}



static int binder_thread_read(struct binder_proc *proc,struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed, int non_block){
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;
    //没有数据将会进入这里,将BR_NOOP设置到ptr里面
    if (*consumed == 0) {
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }
    //...
    //循环读取todo事物进行一些操作
    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;
        //获取binder_work,也就是需要执行的work
        if (!list_empty(&thread->todo)) {
        //从thread->todo取
            w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
         //proc->todo取
            w = list_first_entry(&proc->todo, struct binder_work,
                         entry);
        } else{
            //...
        }
        
        
        //...
        //执行具体的事件,通过取出binder_transaction_data进行读和写操作
        switch (w->type) {
            //...
        }

    }
}

3.3 binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);,通过3.2的读操作,获取到了数据到bwr结构体里面


// func: svcmgr_handler函数
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;
     while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
        
         switch(cmd) {
            //刚开始循环,没有数据第一次进入会到这里
            case BR_NOOP:
                break;
                
            //...
            
            //  正常的收到的数据,其他端发送BC_TRANSACTION
            case BR_TRANSACTION: {
                struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
                //...
                if (func) {
                    unsigned rdata[256/4];
                    struct binder_io msg;
                    struct binder_io reply;
                    int res;
                    //将binder_transaction_data转为binder_io
                    bio_init(&reply, rdata, sizeof(rdata), 4);
                    bio_init_from_txn(&msg, txn);
                    //函数处理,也就是svcmgr_handler函数 3.3.1
                    res = func(bs, txn, &msg, &reply);
                    //发送回执
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
                ptr += sizeof(*txn);
                break;
            }
            //...
         }
    }
}

3.3.1 svcmgr_handler,主要执行service_manager服务的具体cmd操作,比如增加服务,查找服务,获取所有服务


int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    //ALOGI("target=%x code=%d pid=%d uid=%d\n",
    //  txn->target.handle, txn->code, txn->sender_pid, txn->sender_euid);

    if (txn->target.handle != svcmgr_handle)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }


    switch(txn->code) {
        case SVC_MGR_GET_SERVICE:
        case SVC_MGR_CHECK_SERVICE:
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            //根据名称从链表查找服务handle
            handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
            if (!handle)
                break;
            bio_put_ref(reply, handle);
            return 0;
    
        case SVC_MGR_ADD_SERVICE:
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            handle = bio_get_ref(msg);
            allow_isolated = bio_get_uint32(msg) ? 1 : 0;
            //增加服务到链表
            if (do_add_service(bs, s, len, handle, txn->sender_euid,
                allow_isolated, txn->sender_pid))
                return -1;
            break;
    
        case SVC_MGR_LIST_SERVICES: {
            uint32_t n = bio_get_uint32(msg);
    
            if (!svc_can_list(txn->sender_pid)) {
                ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                        txn->sender_euid);
                return -1;
            }
            //获取服务列表
            si = svclist;
            while ((n-- > 0) && si)
                si = si->next;
            if (si) {
                bio_put_string16(reply, si->name);
                return 0;
            }
            return -1;
        }
        default:
            ALOGE("unknown code %d\n", txn->code);
            return -1;
        }
    
        bio_put_uint32(reply, 0);
    return 0;
}

总结:

1. open驱动,生成binder_proc结构体
2. 通过mmap建立用户控件和内核空间映射
3. 通知驱动,创建一个binder_node成为binder管理的manager_node
4. 通知驱动,更改looer标记,意味着开始循环
5. 循环读取todo链表,根据binder_work的type对binder_transation_data的数据做对应的调整,返回到用户空间
6. 用户空间根据binder_transation_data的code和参数执行不同的处理,比如增加服务和查找服务,把返回数据copy到reply里面,在通过驱动交互。

你可能感兴趣的:(binder(三) servicemanager启动)