在一台Android 8.1的手机中,可以看到三个servicemanager:
/frameworks/native/cmds/servicemanager/servicemanager.rc
service servicemanager /system/bin/servicemanager
...
frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1]; //vndservicemanager启动时,传入/dev/vndbinder
} else {
driver = "/dev/binder";
}
bs = binder_open(driver, 128*1024);
if (binder_become_context_manager(bs)) {
...
}
binder_loop(bs, svcmgr_handler);
return 0;
}
frameworks/native/cmds/servicemanager/binder.c
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs)); -------------1
bs->fd = open(driver, O_RDWR | O_CLOEXEC); ------ 2
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
...
}
bs->mapsize = mapsize; // 128K
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); ---------3
....
return bs;
}
drivers/android/binder.c
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
proc = kzalloc(sizeof(*proc), GFP_KERNEL); -----------1
get_task_struct(current->group_leader); //当前线程的thread group leader
proc->tsk = current->group_leader;
binder_init_worklist(&proc->todo);
if (binder_supported_policy(current->policy)) { //binder支持的调度策略有SCHED_NORMAL,SCHED_BATCH, SCHED_FIFO,SCHED_RR 。并设置优先级。
proc->default_priority.sched_policy = current->policy;
proc->default_priority.prio = current->normal_prio;
} else {
proc->default_priority.sched_policy = SCHED_NORMAL;
proc->default_priority.prio = NICE_TO_PRIO(0);
}
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
mutex_lock(&binder_procs_lock);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
spin_lock_init(&proc->proc_lock);
binder_init_worklist(&proc->delivered_death);
atomic_set(&proc->ready_threads, 0);
proc->max_threads = 0;
proc->requested_threads = 0;
proc->requested_threads_started = 0;
INIT_LIST_HEAD(&proc->zombie_proc.list_node);
INIT_HLIST_HEAD(&proc->zombie_refs);
INIT_HLIST_HEAD(&proc->zombie_nodes);
INIT_HLIST_HEAD(&proc->zombie_threads);
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc; // binder_proc被保存在file的private_data中
驱动中的binder_open主要是创建了该进程的binder_proc并进行初始化,binder_proc具体字段的含义见TODO。
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M) -------------1
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
ret = -EPERM;
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
ret = binder_alloc_mmap_handler(&proc->alloc, vma); -------------2
if (!ret) {
proc->files = get_files_struct(current);
return 0;
}
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
const char *failure_string;
struct binder_buffer *buffer;
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); -----------1
alloc->buffer = area->addr; -------------2
WRITE_ONCE(alloc->user_buffer_offset,
vma->vm_start - (uintptr_t)alloc->buffer);
alloc->pages = kzalloc(sizeof(alloc->pages[0]) * ------------3
((vma->vm_end - vma->vm_start) / PAGE_SIZE),
GFP_KERNEL);
alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); ---------4
if (__binder_update_page_range(alloc, 1, alloc->buffer, ---------5
alloc->buffer + BINDER_MIN_ALLOC, vma)) {
}
buffer->data = alloc->buffer; ------------6
list_add(&buffer->entry, &alloc->buffers); -----------7
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer); ---------8
alloc->free_async_space = alloc->buffer_size / 2;
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
return 0;
}
首先需要了解binder_alloc和binder_buffer两个数据结构,前者用来管理binder_proc的内存分配,后者是分配内存的具体实现
native/cmds/servicemanager/binder.c
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
drivers/android/binder.c
binder_ioctl会调用binder_ioctl_set_ctx_mgr
....
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
temp = binder_new_node(proc, 0, 0);
if (temp == NULL) {
context->binder_context_mgr_uid = INVALID_UID;
ret = -ENOMEM;
goto out;
}
temp->local_weak_refs++;
temp->local_strong_refs++;
temp->has_strong_ref = 1;
temp->has_weak_ref = 1;
context->binder_context_mgr_node = temp;
binder_put_node(temp);
...
逻辑比较简单,首先设置UID,然后新建一个binder_node,对应userspace的SM的binder实体。
void binder_loop(struct binder_state *bs, binder_handler func) // func is svcmgr_handler
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t)); -------------------1
for (;;) { -------------------2
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}