Binder系列(1)——ServiceManager

1. 概述

在一台Android 8.1的手机中,可以看到三个servicemanager:

  1. servicemanager,管理系统服务,本文只讨论servicemanager;
  2. vndservicemanager,管理厂商服务,对应的文件节点是/dev/vndbinder,servicemanager和vndservicemanager使用的是同一份代码,都是由service_manager.c编译而来;
  3. hwservicemanager,用于管理hidl服务,因此其实现和servicemanager完全不同,使用的binder库也完全不同。
    在这里插入图片描述

2. 启动

/frameworks/native/cmds/servicemanager/servicemanager.rc

service servicemanager /system/bin/servicemanager
...

3. 初始化

frameworks/native/cmds/servicemanager/service_manager.c

 int main(int argc, char** argv)                                        
 {                                                                      
     struct binder_state *bs;                                           
     union selinux_callback cb;                                         
     char *driver;                                                      
                                                                        
     if (argc > 1) {                                                    
         driver = argv[1];             //vndservicemanager启动时,传入/dev/vndbinder          
     } else {                                                           
         driver = "/dev/binder";                                        
     }                                                                  
                                                                        
     bs = binder_open(driver, 128*1024);                                                                         
                                                                        
     if (binder_become_context_manager(bs)) {                           
...                                              
     }                                                                                         
                                                                        
      binder_loop(bs, svcmgr_handler);    
                                         
      return 0;                           
 }                                                                                                        

3.1 binder_open@servicemanager

frameworks/native/cmds/servicemanager/binder.c

struct binder_state *binder_open(const char* driver, size_t mapsize)                        
{                                                                                           
    struct binder_state *bs;                                                                
    struct binder_version vers;                                                             
                                                                                            
    bs = malloc(sizeof(*bs));                           -------------1                                    
                                                             
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);                  ------ 2                                                                                             
                                                                                            
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||                                     
...                                                                
    }                                                                                       
                                                                                            
    bs->mapsize = mapsize;                  // 128K                                                
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);                    ---------3
....                                                                   
                                                                                            
    return bs;                                                                                                                                                      
}                                                                                           
  1. binder_state成员有:open /dev/binder的fd,mmap映射的大小,mmap后返回的buffer指针mapped;
  2. 调用kernel binder driver的binder_open;
  3. 调用kernel binder driver的binder_mmap。

3.1.1 binder_open@driver

drivers/android/binder.c

static int binder_open(struct inode *nodp, struct file *filp)               
{                                                                           
        struct binder_proc *proc;                                           
        struct binder_device *binder_dev;                                   

        proc = kzalloc(sizeof(*proc), GFP_KERNEL);           -----------1               
                         
        get_task_struct(current->group_leader);       //当前线程的thread group leader                      
        proc->tsk = current->group_leader;                                  
        binder_init_worklist(&proc->todo);                                  
        if (binder_supported_policy(current->policy)) {     //binder支持的调度策略有SCHED_NORMAL,SCHED_BATCH, SCHED_FIFO,SCHED_RR 。并设置优先级。             
                proc->default_priority.sched_policy = current->policy;      
                proc->default_priority.prio = current->normal_prio;         
        } else {                                                            
                proc->default_priority.sched_policy = SCHED_NORMAL;         
                proc->default_priority.prio = NICE_TO_PRIO(0);              
        }                                                                   
        binder_dev = container_of(filp->private_data, struct binder_device, 
                                  miscdev);                                 
        proc->context = &binder_dev->context;                               
        binder_alloc_init(&proc->alloc);                                    
                                                                            
        mutex_lock(&binder_procs_lock);                                     
                                                                            
        binder_stats_created(BINDER_STAT_PROC);                             
        hlist_add_head(&proc->proc_node, &binder_procs);                    
        proc->pid = current->group_leader->pid;                             
        spin_lock_init(&proc->proc_lock);                                   
        binder_init_worklist(&proc->delivered_death);                       
        atomic_set(&proc->ready_threads, 0);                                
        proc->max_threads = 0;                                              
        proc->requested_threads = 0;                                        
        proc->requested_threads_started = 0;                                
        INIT_LIST_HEAD(&proc->zombie_proc.list_node);                       
        INIT_HLIST_HEAD(&proc->zombie_refs);                                
        INIT_HLIST_HEAD(&proc->zombie_nodes);  
        INIT_HLIST_HEAD(&proc->zombie_threads); 
        INIT_LIST_HEAD(&proc->waiting_threads); 
        filp->private_data = proc;     // binder_proc被保存在file的private_data中                                              

驱动中的binder_open主要是创建了该进程的binder_proc并进行初始化,binder_proc具体字段的含义见TODO。

3.1.2 binder_mmap@driver

 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)              
 {                                                                                  
         int ret;                                                                   
         struct binder_proc *proc = filp->private_data;                             
         const char *failure_string;                                                
                                                                                    
         if (proc->tsk != current->group_leader)                                    
                 return -EINVAL;                                                    
                                                                                    
         if ((vma->vm_end - vma->vm_start) > SZ_4M)                  -------------1               
                 vma->vm_end = vma->vm_start + SZ_4M;                               
                                                                                    
         binder_debug(BINDER_DEBUG_OPEN_CLOSE,                                      
                      "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",        
                      proc->pid, vma->vm_start, vma->vm_end,                        
                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,         
                      (unsigned long)pgprot_val(vma->vm_page_prot));                
                                                                                    
         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {                                
                 ret = -EPERM;                                                      
                 failure_string = "bad vm_flags";                                   
                 goto err_bad_arg;                                                  
         }                                                                          
         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;              
         vma->vm_ops = &binder_vm_ops;                                              
         vma->vm_private_data = proc;                                               
                                                                                    
         ret = binder_alloc_mmap_handler(&proc->alloc, vma);                 -------------2       
                                                                                    
         if (!ret) {                                                                
                 proc->files = get_files_struct(current);                           
                 return 0;                                                          
         }                                                                          
                                                                                    
 err_bad_arg:                                                                       
         pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",                           
                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);        
         return ret;                                                                
 }                                                                                  
  1. 限制映射的大小,不得大于4M;
  2. 映射内存的工作在binder_alloc_mmap_handler中

3.1.2.1 binder_alloc_mmap_handler

int binder_alloc_mmap_handler(struct binder_alloc *alloc,                            
                              struct vm_area_struct *vma)                            
{                                                                                    
        int ret;                                                                     
        struct vm_struct *area;                                                      
        const char *failure_string;                                                  
        struct binder_buffer *buffer;                                                
                                          
                                                                                     
        area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);            -----------1                                                            
        alloc->buffer = area->addr;           -------------2              
        WRITE_ONCE(alloc->user_buffer_offset,                      
                         vma->vm_start - (uintptr_t)alloc->buffer);                                                                                             
        alloc->pages = kzalloc(sizeof(alloc->pages[0]) *                ------------3             
                                   ((vma->vm_end - vma->vm_start) / PAGE_SIZE),      
                               GFP_KERNEL);                                                                                                      
        alloc->buffer_size = vma->vm_end - vma->vm_start;   
                                                                                   
         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);            ---------4                                                                           
                                                                                   
         if (__binder_update_page_range(alloc, 1, alloc->buffer,                   ---------5
                                        alloc->buffer + BINDER_MIN_ALLOC, vma)) {                         
         }                                                                         
         buffer->data = alloc->buffer;                                         ------------6    
         list_add(&buffer->entry, &alloc->buffers);                    -----------7            
         buffer->free = 1;                                                         
         binder_insert_free_buffer(alloc, buffer);                      ---------8           
         alloc->free_async_space = alloc->buffer_size / 2;                         
                                                                                   
         barrier();                                                                
         alloc->vma = vma;                                                         
         alloc->vma_vm_mm = vma->vm_mm;                                            
                                                                                   
         return 0;                                                                                                                     
 }                                                                                                          

首先需要了解binder_alloc和binder_buffer两个数据结构,前者用来管理binder_proc的内存分配,后者是分配内存的具体实现
Binder系列(1)——ServiceManager_第1张图片

  1. 在内核空间reserve一块连续的虚拟地址空间;
  2. 把这个地址给到alloc->buffer。用户空间的vma;
  3. 这里只是分配struct page 数据结构所需的地址,并不是分配物理内存;
  4. 分配binder_buffer结构体;
  5. __binder_update_page_range将用户空间和内核空间的一段内存映射到统一块物理地址,初始化的时候只映射一个page;
  6. buffer->data 保存的也是内和空间reserve的一段连续虚拟地址;
  7. 把binder_buffer插入到binder_alloc的buffers链表中;
  8. 插入到free buffer中。

3.2 binder_become_context_manager@servicemanager

native/cmds/servicemanager/binder.c


int binder_become_context_manager(struct binder_state *bs) 
{                                                          
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);       
}                                                          

3.2.1 binder_ioctl_set_ctx_mgr@driver

drivers/android/binder.c

binder_ioctl会调用binder_ioctl_set_ctx_mgr

		....
         if (uid_valid(context->binder_context_mgr_uid)) {                   
                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {  
                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 
                                from_kuid(&init_user_ns, curr_euid),         
                                from_kuid(&init_user_ns,                     
                                          context->binder_context_mgr_uid)); 
                         ret = -EPERM;                                       
                         goto out;                                           
                 }                                                           
         } else {                                                            
                 context->binder_context_mgr_uid = curr_euid;                
         } 
         temp = binder_new_node(proc, 0, 0);                   
         if (temp == NULL) {                                   
                 context->binder_context_mgr_uid = INVALID_UID;
                 ret = -ENOMEM;                                
                 goto out;                                     
         }                                                                
         temp->local_weak_refs++;                          
         temp->local_strong_refs++;                        
         temp->has_strong_ref = 1;                         
         temp->has_weak_ref = 1;                           
         context->binder_context_mgr_node = temp;          
         binder_put_node(temp);        
         ...                                                                              

逻辑比较简单,首先设置UID,然后新建一个binder_node,对应userspace的SM的binder实体。

3.2 binder_loop@servicemanager

void binder_loop(struct binder_state *bs, binder_handler func)        // func is svcmgr_handler          
{                                                                               
    int res;                                                                    
    struct binder_write_read bwr;                                               
    uint32_t readbuf[32];                                                       
                                                                                
    bwr.write_size = 0;                                                         
    bwr.write_consumed = 0;                                                     
    bwr.write_buffer = 0;                                                       
                                                                                
    readbuf[0] = BC_ENTER_LOOPER;                                               
    binder_write(bs, readbuf, sizeof(uint32_t));            -------------------1                   
                                                                                
    for (;;) {                                                     -------------------2             
        bwr.read_size = sizeof(readbuf);                                        
        bwr.read_consumed = 0;                                                  
        bwr.read_buffer = (uintptr_t) readbuf;                                  
                                                                                
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);                           
                                                                                
        if (res < 0) {                                                          
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));         
            break;                                                              
        }                                                                       
                                                                                
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {                                                         
            ALOGE("binder_loop: unexpected reply?!\n");                         
            break;                                                              
        }                                                                       
        if (res < 0) {                                                          
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));       
            break;                                                              
        } 
     } 
 }                                                                           
  1. 向driver里面发送BC_ENTER_LOOPER命令:
    1.1 binder_ioctl中先调用binder_get_thread,获取当前线程对应的binder_thread;
    1.2 binder_ioctl_write_read再根据write_size大于0,调用binder_thread_write;
    1.3 根据BC_REGISTER_LOOPER,设置binder_thread的looper值为:thread->looper |= BINDER_LOOPER_STATE_ENTERED
  2. 进入一个无限循环,不断从驱动中读取数据;
  3. binder_parse对读取的数据进行解析,读取的数据是CMD+binder_transaction_data的组合,这里面比较重要的是BR_TRANSACTION和BR_REPLY,后续都会有分析;

你可能感兴趣的:(binder,Android,Binder,ServiceManager,framwork)