ServiceManager守护进程的注册

Service Manager是整个Binder机制的守护进程,用来管理开发者创建的各种Server,并且向Client提供查询Server远程接口的功能。Service Manager作为本地服务由Init进程启动。在Init.rc配置文件中有这么一段配置:

service servicemanager /system/bin/servicemanager  
    class core  
    user system  
    group system  
    critical  
    onrestart restart zygote  
    onrestart restart media  
    onrestart restart surfaceflinger  
    onrestart restart drm  

这段配置是表示Init进程将启动servicemanager服务进程,该服务进程是系统关键进程,当该服务被杀重启时,必须重启zygote,media,surfaceflinger,drm进程。
Service Manager的源代码位于frameworks/native/cmds/servicemanager目录下,主要是由binder.h、binder.c和service_manager.c三个文件组成。Service Manager的入口位于service_manager.c文件中的main函数:

int main(int argc, char **argv)
{
    struct binder_state *bs;
    void *svcmgr = BINDER_SERVICE_MANAGER;//在binder.h中定义
    bs = binder_open(128*1024); //打开binder驱动,映射共享内存
    //告知驱动本进程成为binder通信的管理者
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
    svcmgr_handle = svcmgr;
    binder_loop(bs, svcmgr_handler);//进入循环等待请求!!!
    return 0;
}
#define BINDER_SERVICE_MANAGER ((void*) 0) 

它表示Service Manager的句柄为0。Binder通信机制使用句柄来代表远程接口,ServiceManager作为守护进程管理着服务的注册和获取,当他被其他进程获取时它的句柄便是0,其余的Server的远程接口句柄值都是一个大于0 而且由Binder驱动程序自动进行分配的。

当open binder设备时,首先执行的是kernel/drivers/staging/android/binder.c 驱动的binder_open

static int binder_open(struct inode *nodp, struct file *filp)  
{  
    struct binder_proc *proc;  

    if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)  
        printk(KERN_INFO "binder_open: %d:%d\n", current->group_leader->pid, current->pid);  

    proc = kzalloc(sizeof(*proc), GFP_KERNEL);  
    if (proc == NULL)  
        return -ENOMEM;  
    get_task_struct(current);  
    proc->tsk = current;  
    INIT_LIST_HEAD(&proc->todo);  
    init_waitqueue_head(&proc->wait);  
    proc->default_priority = task_nice(current);  
    mutex_lock(&binder_lock);  
    binder_stats.obj_created[BINDER_STAT_PROC]++;  
    hlist_add_head(&proc->proc_node, &binder_procs);  
    proc->pid = current->group_leader->pid;  
    INIT_LIST_HEAD(&proc->delivered_death);  
    filp->private_data = proc;  
    mutex_unlock(&binder_lock);  

    if (binder_proc_dir_entry_proc) {  
        char strbuf[11];  
        snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);  
        remove_proc_entry(strbuf, binder_proc_dir_entry_proc);  
        create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);  
    }  

    return 0;  
}  

主要作用是创建一个struct binder_proc数据结构来保存打开设备文件/dev/binder的进程的上下文信息,并且将这个进程上下文信息保存在打开文件结构struct file的私有数据成员变量private_data中,这样,在执行其它文件操作时,就通过打开文件结构struct file来取回这个进程上下文信息了。这个进程上下文信息同时还会保存在一个全局哈希表binder_procs中,结构体struct binder_proc中比较关键的四个成员如下:

struct binder_proc {  
    struct hlist_node proc_node;  
    struct rb_root threads;  
    struct rb_root nodes;  
    struct rb_root refs_by_desc;  
    struct rb_root refs_by_node;  
    int pid;  
    ...
    void *buffer;//保存在内核中的映射起始地址
    ......
};  

详细声明可以参考binder.c驱动源码。
这四个成员变量都是表示红黑树的节点,也就是说,binder_proc分别挂会四个红黑树下。threads树用来保存binder_proc进程内用于处理用户请求的线程,它的最大数量由max_threads来决定;node树成用来保存binder_proc进程内的Binder实体refs_by_desc树和refs_by_node树用来保存binder_proc进程内的Binder引用,即引用的其它进程的Binder实体它分别用两种方式来组织红黑树,一种是以句柄作来key值来组织,一种是以引用的实体节点的地址值作来key值来组织,它们都是表示同一样东西,只不过是为了内部查找方便而用两个红黑树来表示。

int binder_become_context_manager(struct binder_state *bs)  
{  
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);  
}  

通过调用ioctl文件操作函数来通知Binder驱动程序自己是守护进程,命令号是BINDER_SET_CONTEXT_MGR。在binder.h中定义,通过系统调用ioctl会直接调用驱动的binder_ioctl,在这里我们只关心BINDER_SET_CONTEXT_MGR操作。
在分析之前我们先了解下两个比较重要的结构体,一个是struct binder_thread表示一个线程

struct binder_thread {  
    struct binder_proc *proc;  
    struct rb_node rb_node;  
    int pid;  
    int looper;  
    struct binder_transaction *transaction_stack;  
    struct list_head todo;  
    uint32_t return_error; /* Write failed, return error code in read buf */  
    uint32_t return_error2; /* Write failed, return error code in read */  
        /* buffer. Used when sending a reply to a dead process that */  
        /* we are also waiting on */  
    wait_queue_head_t wait;  
    struct binder_stats stats;  
};  

proc表示这个线程所属的进程。struct binder_proc有一个成员变量threads,它的类型是rb_root,它表示一只红黑树,把属于这个进程的所有线程都组织起来,struct binder_thread的成员变量rb_node就是用来链入这棵红黑树的节点。
transaction_stack表示线程正在处理的事务,todo表示发往该线程的数据列表,return_error和return_error2表示操作结果返回码,wait用来阻塞线程等待某个事件的发生,stats用来保存一些统计信息。
另外一个比较重要的结构体是struct binder_node,它表示一个binder实体:

struct binder_node {  
    int debug_id;  
    struct binder_work work;  
    union {  
        struct rb_node rb_node;  
        struct hlist_node dead_node;  
    };  
    struct binder_proc *proc;  
    struct hlist_head refs;  
    int internal_strong_refs;  
    int local_weak_refs;  
    int local_strong_refs;  
    void __user *ptr;  
    void __user *cookie;  
    unsigned has_strong_ref : 1;  
    unsigned pending_strong_ref : 1;  
    unsigned has_weak_ref : 1;  
    unsigned pending_weak_ref : 1;  
    unsigned has_async_transaction : 1;  
    unsigned accept_fds : 1;  
    int min_priority : 8;  
    struct list_head async_todo;  
};  

rb_node和dead_node组成一个联合体。 如果这个Binder实体还在正常使用,则使用rb_node来连入proc->nodes所表示的红黑树的节点,这棵红黑树用来组织属于这个进程的所有Binder实体;如果这个Binder实体所属的进程已经销毁,而这个Binder实体又被其它进程所引用,则这个Binder实体通过dead_node进入到一个哈希表中去存放。proc成员变量就是表示这个Binder实例所属于进程了。refs成员变量把所有引用了该Binder实体的Binder引用连接起来构成一个链表。internal_strong_refs、local_weak_refs和local_strong_refs表示这个Binder实体的引用计数。ptr和cookie成员变量分别表示这个Binder实体在用户空间的地址以及附加数据。
接下来再分析BINDER_SET_CONTEXT_MGR操作。

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/

    trace_binder_ioctl(cmd, arg);

    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    binder_lock(__func__);
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    ......
    case BINDER_SET_CONTEXT_MGR:
        if (binder_context_mgr_node != NULL) {
            printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
            ret = -EBUSY;
            goto err;
        }
        ret = security_binder_set_context_mgr(proc->tsk);
        if (ret < 0)
            goto err;
        if (binder_context_mgr_uid != -1) {
            if (binder_context_mgr_uid != current->cred->euid) {
                printk(KERN_ERR "binder: BINDER_SET_"
                       "CONTEXT_MGR bad uid %d != %d\n",
                       current->cred->euid,
                       binder_context_mgr_uid);
                ret = -EPERM;
                goto err;
            }
        } else
            binder_context_mgr_uid = current->cred->euid;//保存Service Manager的uid
            //为Service Manager创建Binder实体
        binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
        if (binder_context_mgr_node == NULL) {
            ret = -ENOMEM;
            goto err;
        }
#ifdef BINDER_MONITOR
        strcpy(binder_context_mgr_node->name, "servicemanager");
        printk(KERN_INFO "binder: %d:%d set as servicemanager uid %d\n",
                proc->pid, thread->pid, binder_context_mgr_uid);
#endif
        //初始化binder_context_mgr_node的引用计数值
        binder_context_mgr_node->local_weak_refs++;
        binder_context_mgr_node->local_strong_refs++;
        binder_context_mgr_node->has_strong_ref = 1;
        binder_context_mgr_node->has_weak_ref = 1;
        break;
......
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;//执行binder_get_thread时,thread->looper = BINDER_LOOPER_STATE_NEED_RETURN,执行了这条语句后,thread->looper = 0。
    binder_unlock(__func__);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
        printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

随后内核BINDER_SET_CONTEXT_MGR结束,回到main函数调用binder_loop:

void binder_loop(struct binder_state *bs, binder_handler func)  
{  
    int res;  
    struct binder_write_read bwr;  
    unsigned readbuf[32];  

    bwr.write_size = 0;  
    bwr.write_consumed = 0;  
    bwr.write_buffer = 0;  
    /* * binder_write函数执行BC_ENTER_LOOPER命令告诉Binder驱动程序, * Service Manager要进入循环了。 */  
    readbuf[0] = BC_ENTER_LOOPER; 
    binder_write(bs, readbuf, sizeof(unsigned));  

    for (;;) {  
        bwr.read_size = sizeof(readbuf);  
        bwr.read_consumed = 0;  
        bwr.read_buffer = (unsigned) readbuf;  

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);  

        if (res < 0) {  
            LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));  
            break;  
        }  

        res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);  
        if (res == 0) {  
            LOGE("binder_loop: unexpected reply?!\n");  
            break;  
        }  
        if (res < 0) {  
            LOGE("binder_loop: io error %d %s\n", res, strerror(errno));  
            break;  
        }  
    }  
}  

BINDER_WRITE_READ IO操作码参数为:

struct binder_write_read {  
    signed long write_size; /* bytes to write */  
    signed long write_consumed; /* bytes consumed by driver */  
    unsigned long   write_buffer;  
    signed long read_size;  /* bytes to read */  
    signed long read_consumed;  /* bytes consumed by driver */  
    unsigned long   read_buffer;  
};  

write_bufffer和read_buffer所指向的数据结构指定了具体要执行的操作,write_bufffer和read_buffer所指向的结构体是struct binder_transaction_data:

struct binder_transaction_data {  
    /* The first two are only used for bcTRANSACTION and brTRANSACTION, 
     * identifying the target and contents of the transaction. 
     */  
    union {  
        size_t  handle; /* target descriptor of command transaction */  
        void    *ptr;   /* target descriptor of return transaction */  
    } target;  
    void        *cookie;    /* target object cookie */  
    unsigned int    code;       /* transaction command */  

    /* General information about the transaction. */  
    unsigned int    flags;  
    pid_t       sender_pid;  
    uid_t       sender_euid;  
    size_t      data_size;  /* number of bytes of data */  
    size_t      offsets_size;   /* number of bytes of offsets */  

    /* If this transaction is inline, the data immediately 
     * follows here; otherwise, it ends with a pointer to 
     * the data buffer. 
     */  
    union {  
        struct {  
            /* transaction data */  
            const void  *buffer;  
            /* offsets from buffer to flat_binder_object structs */  
            const void  *offsets;  
        } ptr;  
        uint8_t buf[8];  
    } data;  
};  
联合体target,当这个BINDER_WRITE_READ命令的目标对象是本地Binder实体时,就使用ptr来表示这个对象在本进程中的地址,否则就使用handle来表示这个Binder实体的引用。只有目标对象是Binder实体时,cookie成员变量才有意义,表示一些附加数据,由Binder实体来解释这个个附加数据。code表示要对目标对象请求的命令代码。

sender_pid和sender_euid表示发送者进程的pid和euid。
data_size表示data.ptr.buffer缓冲区的大小,offsets_size表示data.ptr.offsets缓冲区的大小。
data.buffer所表示的缓冲区数据分为两类,一类是普通数据,Binder驱动程序不关心,一类是Binder实体或者Binder引用,这需要Binder驱动程序介入处理。当一个进程A传递了一个Binder实体或Binder引用给进程B,那么,Binder驱动程序就需要介入维护这个Binder实体或者引用的引用计数,防止B进程还在使用这个Binder实体时,A却销毁这个实体,这样的话,B进程就会crash了。所以在传输数据时,如果数据中含有Binder实体和Binder引和,就需要告诉Binder驱动程序它们的具体位置,以便Binder驱动程序能够去维护它们。data.offsets的作用就在这里了,它指定在data.buffer缓冲区中,所有Binder实体或者引用的偏移位置。每一个Binder实体或者引用,通过struct flat_binder_object 来表示:

struct flat_binder_object {  
    /* 8 bytes for large_flat_header. */  
    unsigned long       type;  
    unsigned long       flags;  

    /* 8 bytes of data. */  
    union {  
        void        *binder;    /* local object */  
        signed long handle;     /* remote object */  
    };  

    /* extra data associated with local object */  
    void            *cookie;  
};  

type表示Binder对象的类型:

enum {  
    BINDER_TYPE_BINDER  = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),  
    BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),  
    BINDER_TYPE_HANDLE  = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),  
    BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),  
    BINDER_TYPE_FD      = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),  
};  

flags表示Binder对象的标志,该域只对第一次传递Binder实体时有效,因为此刻驱动需要在内核中创建相应的实体节点,有些参数需要从该域取出。binder表示这是一个Binder实体,handle表示这是一个Binder引用,当这是一个Binder实体时,cookie才有意义,表示附加数据,由进程自己解释。
在binder_loop中for循环中BINDER_WRITE_READ操作,其中bwr初值如下:

bwr.write_size = 0; 
bwr.write_consumed = 0; 
bwr.write_buffer = 0; 
readbuf[0] = BC_ENTER_LOOPER; 
bwr.read_size = sizeof(readbuf); 
bwr.read_consumed = 0; 
bwr.read_buffer = (unsigned) readbuf; 

表示将在binder驱动中读取sizeof(readbuf)字节数据

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/

    trace_binder_ioctl(cmd, arg);

    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    binder_lock(__func__);
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ: {
        struct binder_write_read bwr;
        if (size != sizeof(struct binder_write_read)) {
            ret = -EINVAL;
            goto err;
        }
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
            ret = -EFAULT;
            goto err;
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                 "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
                 proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
                 bwr.read_size, bwr.read_buffer);

        if (bwr.write_size > 0) {//bwr.write_size=0
            ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
            trace_binder_write_done(ret);
            if (ret < 0) {
                bwr.read_consumed = 0;
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        if (bwr.read_size > 0) {
            ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
            trace_binder_read_done(ret);
            if (!list_empty(&proc->todo))
                wake_up_interruptible(&proc->wait);
            if (ret < 0) {
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto err;
            }
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                 "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
                 proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
                 bwr.read_consumed, bwr.read_size);
        if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
            ret = -EFAULT;
            goto err;
        }
        break;
    }
    ......
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    binder_unlock(__func__);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
        printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

bwr.write_size等于0,于是不会执行binder_thread_write函数,接着会执行

static int  
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,  
                   void  __user *buffer, int size, signed long *consumed, int non_block)  
{  
    void __user *ptr = buffer + *consumed;  
    void __user *end = buffer + size;  

    int ret = 0;  
    int wait_for_proc_work;  

    if (*consumed == 0) {  
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))  
            return -EFAULT;  
        ptr += sizeof(uint32_t);  
    }  

retry:  
    wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);  

    if (thread->return_error != BR_OK && ptr < end) {  
        if (thread->return_error2 != BR_OK) {  
            if (put_user(thread->return_error2, (uint32_t __user *)ptr))  
                return -EFAULT;  
            ptr += sizeof(uint32_t);  
            if (ptr == end)  
                goto done;  
            thread->return_error2 = BR_OK;  
        }  
        if (put_user(thread->return_error, (uint32_t __user *)ptr))  
            return -EFAULT;  
        ptr += sizeof(uint32_t);  
        thread->return_error = BR_OK;  
        goto done;  
    }  


    thread->looper |= BINDER_LOOPER_STATE_WAITING;  
    if (wait_for_proc_work)  
        proc->ready_threads++;  
    mutex_unlock(&binder_lock);  
    if (wait_for_proc_work) {  
        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |  
            BINDER_LOOPER_STATE_ENTERED))) {  
                binder_user_error("binder: %d:%d ERROR: Thread waiting "  
                    "for process work before calling BC_REGISTER_"  
                    "LOOPER or BC_ENTER_LOOPER (state %x)\n",  
                    proc->pid, thread->pid, thread->looper);  
                wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);  
        }  
        binder_set_nice(proc->default_priority);  
        if (non_block) {  
            if (!binder_has_proc_work(proc, thread))  
                ret = -EAGAIN;  
        } else  
        /* *最终进程将会阻塞在这里,等待唤醒 */
            ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));  
    } else {  
        if (non_block) {  
            if (!binder_has_thread_work(thread))  
                ret = -EAGAIN;  
        } else  
            ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));  
    }  
        .......  
}  

最终ServicesManager进程将会阻塞在内核wait_event_interruptible_exclusive,等待请求唤醒。
在整个过程中。ServicesManager主要完成了如下操作:

1. 打开/dev/binder文件:open("/dev/binder", O_RDWR);
2. 建立128K内存映射:mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
3. 通知Binder驱动程序它是守护进程:binder_become_context_manager(bs);
4. 进入循环等待请求的到来:binder_loop(bs, svcmgr_handler);
在内核中建立了一个struct binder_proc结构、一个struct  binder_thread结构和一个struct binder_node结构,最终阻塞在wait_event_interruptible_exclusive等待请求唤醒!

你可能感兴趣的:(android,native,Binder)