Android系统Binder驱动程序主要由初始化,打开文件和映射内存3部分组成。
1.初始化
static int __init binder_init(void) { int ret; //在设备上的/proc目录下创建binder目录,即/proc/binder binder_proc_dir_entry_root = proc_mkdir("binder", NULL); if (binder_proc_dir_entry_root) //在/proc/binder目录下创建proc目录,即/proc/binder/proc //每一个使用Binder IPC 的进程在该目录下都有一个以其PID命名的文件*/ binder_proc_dir_entry_proc = proc_mkdir("proc", binder_proc_dir_entry_root); //创建Binder设备,设备类型为杂项misc类型的字符设备 ret = misc_register(&binder_miscdev); if (binder_proc_dir_entry_root) { //在/proc/binder/proc目录下创建文件state,stats, transactions,transaction_log,failed_transaction_log,通过这些文件可以读取到Binder Driver的运行状况 create_proc_read_entry("state", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_state, NULL); create_proc_read_entry("stats", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_stats, NULL); create_proc_read_entry("transactions", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transactions, NULL); create_proc_read_entry("transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log); create_proc_read_entry("failed_transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log_failed); } return ret; }
2.打开文件
static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) printk(KERN_INFO "binder_open: %d:%d\n", current->group_leader->pid, current->pid); //创建binder_proc结构体proc,用来描述一个正在使用Binder进程间通信机制的进程process proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; get_task_struct(current); //初始化binder_proc结构体proc的成员变量,任务结构体tsk proc->tsk = current; //初始化todo队列,当进程接收到一个进程间通信请求的时候,Binder驱动程序会将请求封装成一个工作项,加入到进程的待处理队列todo队列中 INIT_LIST_HEAD(&proc->todo); //初始化wait队列,进程所开辟的Binder线程池中的空闲Binder线程会睡眠在wait队列中,当其宿主进程todo队列中增加了新的工作项,Binder驱动程序就会唤醒Binder线程去处理工作项 init_waitqueue_head(&proc->wait); //初始化binder_proc结构体proc的成员变量默认优先级default_priority为进程的优先级 proc->default_priority = task_nice(current); mutex_lock(&binder_lock); binder_stats.obj_created[BINDER_STAT_PROC]++; //将binder_proc结构体proc加入到全局哈希队列binder_procs,只要遍历队列便知哪些进程在使用Binder进程间通信机制 hlist_add_head(&proc->proc_node, &binder_procs); //初始化pid proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); //将初始化完成的binder_proc结构体proc保存在参数filp(file pointer)的成员变量private_data中 //当进程调用函数open打开设备文件/dev/binder之后,内核会返回一个文件描述符fd给进程,fd和filp所指向的binder_proc结构体关联 // 所以进程后面以fd为参数调用函数mmap或者ioctl与Binder驱动程序交互时,Binder驱动程序可以通过filp->private_data获取binder_proc结构体 filp->private_data = proc; mutex_unlock(&binder_lock); if (binder_proc_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); remove_proc_entry(strbuf, binder_proc_dir_entry_proc); //在/proc/binder/proc目录下创建以PID命名的只读文件 create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc); } return 0; }
3.映射内存
//参数vma指向一个vm_area_struct结构体,用来描述一段虚拟地址空间(0~3G范围内的用户地址空间),对应的物理页面可以不连续 static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; //area指向一个vm_struct结构体,用来描述一段虚拟地址空间( (3G+896M+8M)~4G范围内的内核地址空间,896M用来映射物理内存前896M,线性对应,8M是安全保护区,用来检测非法指针),对应物理页面可以不连续 struct vm_struct *area; //获取binder_open中创建的binder_proc结构体,保存在binder_proc结构体proc中 struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; //Binder驱动程序为进程分配的内核缓冲区最大为4M if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) printk(KERN_INFO "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { //Binder驱动程序为进程分配的内核缓冲区不能写,只读 ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } //Binder驱动程序为进程分配的内核缓冲区禁止拷贝 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; if (proc->buffer) { //如果内核缓冲区已经分配好,但是再次调用mmap则出错返回 ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } //在进程的内核空间中分配大小为(vma->vm_end - vma->vm_start)的空间area area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } //将在进程的内核空间中分配的空间area的起始地址addr保存在binder_proc结构体proc的成员变量buffer中 proc->buffer = area->addr; //将将要映射的用户空间起始地址与分配好的内核空间起始地址的差值保存在binder_proc结构体proc的成员变量user_buffer_offset中 /*进程通过用户空间地址访问内核缓冲区,Binder驱动程序通过内核空间地址访问内核缓冲区 知道用户空间起始地址再根据这个差值就可以计算出内核空间起始地址,反之亦然*/ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif //为进程要映射的虚拟地址空间vma和area分配物理页面,并将物理页面的起始地址保存在binder_proc结构体proc的成员变量pages中 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } //将在进程的内核空间中分配的空间area的大小保存在binder_proc结构体proc的成员变量内核缓冲区大小buffer_size中 proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; //为虚拟地址空间area分配一个物理页面,对于的内核空间为(proc->buffer~proc->buffer + PAGE_SIZE) if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); //将为虚拟地址空间area分配的物理页面加入到binder_proc结构体proc的成员变量的内核缓冲区列表buffers中 list_add(&buffer->entry, &proc->buffers); buffer->free = 1; //因为新分配的物理页面是空闲的,所以加入到binder_proc结构体proc的内核空闲缓冲区红黑树free_buffers中 binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma; /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: err_bad_arg: printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; }
/* 函数binder_update_page_range为指定的虚拟地址空间分配或者释放物理空间 proc指向要操作的进程 allocate为0时释放物理页面,否则分配物理页面 start指定要操作的内核地址空间的起始地址 end指向要操作的内核空间的终止地址 vma指向要映射的用户空间*/ static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) printk(KERN_INFO "binder: %d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; if (vma) mm = NULL; else mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; } if (allocate == 0) goto free_range; if (vma == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " "map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; //通过目标进程proc的物理页面结构体指针pages中获得一个与内核地址空间page_addr~(page_addr+PAGE_SIZE)对应的物理页面指针 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); //分配一个物理页面 *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (*page == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page; //将物理页面映射到内核地址空间 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; //将物理页面映射到用户地址空间 ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { //通过目标进程proc的物理页面结构体指针pages中获得一个与内核地址空间page_addr~(page_addr+PAGE_SIZE)对应的物理页面指针 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) //解除该物理页面与用户空间的映射 zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: //解除该物理页面与内核空间的映射 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: //释放物理页面 __free_page(*page); *page = NULL; err_alloc_page_failed: ; } err_no_vma: if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return -ENOMEM; }