Binder内核设备作用:
1.实现跨进程IPC调用
2.跟踪跨进程binder对象的引用计数。包括映射远程进程中的对象引用到真实对象的宿主进程中,以及确保被远程对象使用的对象不被删除。
binder用户空间代码作用:
在binder所在进程中维护一个线程池,用来处理binder设备发来的IPC请求。
Service Manager处理流程如下:
1.Service manager打开binder设备
2.Service Manager进入Loop,等待客户端通过Binder发出指令,此时Binder设备将ServiceManager主线程挂起,如a1所示.
3.客户端通过Binder发出请求,请求数据放在binder_write_read结构中,此时Binder设备将Thread1,Thread2线程挂起,如a2,a3步骤所示
4.Binder唤醒ServiceManager主线程,处理客户请求
5.Binder唤醒Thread1,Thread2线程,得到最终结果
Service Manage处理原理
ServiceManager初始化过程:
1.Open binder device /dev/binder
frameworks/base/cmds/servicemanager/service_manager.c
int main(int argc, char **argv) { struct binder_state *bs; void *svcmgr = BINDER_SERVICE_MANAGER; bs = binder_open(128*1024); //打开binder设备,设置映射内存大小为128KB if (binder_become_context_manager(bs)) { //设置自身为 LOGE("cannot become context manager (%s)\n", strerror(errno)); return -1; } svcmgr_handle = svcmgr; binder_loop(bs, svcmgr_handler); //在其中直接读写binder并处理service添加请求 return 0; }
frameworks/base/cmds/servicemanager/Binder.c
int binder_become_context_manager(struct binder_state *bs) { return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); } void binder_loop(struct binder_state *bs, binder_handler func) {... res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func); ...} struct binder_state *binder_open(unsigned mapsize) { struct binder_state *bs; bs = malloc(sizeof(*bs)); if (!bs) { errno = ENOMEM; return 0; } bs->fd = open("/dev/binder", O_RDWR); //调用到设备对应处理函数int binder_open(struct inode *nodp, struct file *filp) if (bs->fd < 0) { fprintf(stderr,"binder: cannot open device (%s)\n", strerror(errno)); goto fail_open; } bs->mapsize = mapsize; bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); if (bs->mapped == MAP_FAILED) { fprintf(stderr,"binder: cannot map device (%s)\n", strerror(errno)); goto fail_map; } /* TODO: check version */ return bs; fail_map: close(bs->fd); fail_open: free(bs); return 0; }
2.进入loop等待binder客户端指令
frameworks/base/cmds/servicemanager/Binder.c
void binder_loop(struct binder_state *bs, binder_handler func) { int res; struct binder_write_read bwr; unsigned readbuf[32]; bwr.write_size = 0; bwr.write_consumed = 0; bwr.write_buffer = 0; readbuf[0] = BC_ENTER_LOOPER; binder_write(bs, readbuf, sizeof(unsigned)); //初始指令 for (;;) { bwr.read_size = sizeof(readbuf); bwr.read_consumed = 0; bwr.read_buffer = (unsigned) readbuf; res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //第一次执行时会被binder设备挂起,直到binder设备发送指令 if (res < 0) { LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno)); break; } res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func); //解析binder设备指令 if (res == 0) { LOGE("binder_loop: unexpected reply?!\n"); break; } if (res < 0) { LOGE("binder_loop: io error %d %s\n", res, strerror(errno)); break; } } } int binder_parse(struct binder_state *bs, struct binder_io *bio, uint32_t *ptr, uint32_t size, binder_handler func) {... case BR_TRANSACTION: { //此处处理servicemanager的事务 struct binder_txn *txn = (void *) ptr; if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) { LOGE("parse: txn too small!\n"); return -1; } binder_dump_txn(txn); if (func) { unsigned rdata[256/4]; struct binder_io msg; struct binder_io reply; int res; bio_init(&reply, rdata, sizeof(rdata), 4); bio_init_from_txn(&msg, txn); res = func(bs, txn, &msg, &reply); //这里调用svcmgr_handler,实现service添加,获取,列举等功能 binder_send_reply(bs, &reply, txn->data, res); } ptr += sizeof(*txn) / sizeof(uint32_t); break; } case BR_REPLY: { struct binder_txn *txn = (void*) ptr; if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) { LOGE("parse: reply too small!\n"); return -1; } binder_dump_txn(txn); if (bio) { bio_init_from_txn(bio, txn); bio = 0; } else { /* todo FREE BUFFER */ } ptr += (sizeof(*txn) / sizeof(uint32_t)); r = 0; break; } case BR_DEAD_BINDER: { struct binder_death *death = (void*) *ptr++; death->func(bs, death->ptr); break; } ... } return r; }
以下为ServiceManager事务处理代码,实现服务列表,获取,添加,检查等功能:
frameworks/base/cmds/servicemanager/service_manager.c
int svcmgr_handler(struct binder_state *bs, struct binder_txn *txn, struct binder_io *msg, struct binder_io *reply) { struct svcinfo *si; uint16_t *s; unsigned len; void *ptr; uint32_t strict_policy; if (txn->target != svcmgr_handle) return -1; ... switch(txn->code) { case SVC_MGR_GET_SERVICE: case SVC_MGR_CHECK_SERVICE: s = bio_get_string16(msg, &len); ptr = do_find_service(bs, s, len); if (!ptr) break; bio_put_ref(reply, ptr); return 0; case SVC_MGR_ADD_SERVICE: s = bio_get_string16(msg, &len); ptr = bio_get_ref(msg); if (do_add_service(bs, s, len, ptr, txn->sender_euid)) return -1; break; case SVC_MGR_LIST_SERVICES: { unsigned n = bio_get_uint32(msg); si = svclist; while ((n-- > 0) && si) si = si->next; if (si) { bio_put_string16(reply, si->name); return 0; } return -1; } default: LOGE("unknown code %d\n", txn->code); return -1; } bio_put_uint32(reply, 0); return 0; }
以下为Binder设备内核代码:
kernel/omap4/drivers/staging/android/Binder.c
static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); //对应每一次打开设备操作,创建对应的proc来保留调用进程信息 if (proc == NULL) return -ENOMEM; get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); //每个设备用户仅拥有一个wait queue proc->default_priority = task_nice(current); mutex_lock(&binder_lock); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; mutex_unlock(&binder_lock); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); } return 0; }
binder_ioctl是binder设备的入口函数,主要处理BINDER_WRITE_READ命令来实现和binder service的联系。
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); //如果binder有错误,挂起线程 if (ret) return ret; mutex_lock(&binder_lock); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: { struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto err; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer); if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; } ... err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; mutex_unlock(&binder_lock); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); return ret; }
函数binder_thread_read将读取binder service端指令,如果无则挂起线程直到有指令。
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed, int non_block) { retry: wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); //如果此binder事务队列栈为空且todo list为空 if (thread->return_error != BR_OK && ptr < end) { if (thread->return_error2 != BR_OK) { if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (ptr == end) goto done; thread->return_error2 = BR_OK; } if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); thread->return_error = BR_OK; goto done; } thread->looper |= BINDER_LOOPER_STATE_WAITING; if (wait_for_proc_work) proc->ready_threads++; mutex_unlock(&binder_lock); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("binder: %d:%d ERROR: Thread waiting " "for process work before calling BC_REGISTER_" "LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); if (non_block) { if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); //如果此binder事务队列栈为空且todo list为空,则挂起调用线程 }
Reference:
http://book.51cto.com/art/201105/265306.htm Binder驱动的实现(5)
http://www.cnblogs.com/innost/archive/2011/01/09/1931456.html Android深入浅出之Binder机制
http://home.eeworld.com.cn/my/space.php?uid=111224&do=blog&id=33723 Android的Binder机制浅析
http://hi.baidu.com/albertchen521/blog/item/822058d0f63ea2d4562c84a1.html Android IPC 通讯机制源码分析 二_albertChen的空间