二 第二部分 Binder之二:ServiceManager的创建过程
代码
ServiceManager实在init解析init.rc时启动,而且一旦ServiceManager发生问题重启,其他系统服务也会被重新启动。
0.总体流程
main函数做了如下几件事情
1.打开BinderDriver,做好初始化
2.将自己设置为ContextManager
3.进入主循环,等待处理IPC消息
int main(int argc, char **argv)
{
struct binder_state *bs;
////打开binder驱动,申请128k大小的内存空间
bs = binder_open(128*1024); //1
//成为上下文管理者
if (binder_become_context_manager(bs)) { //2
}
//进入无限循环,处理client端发来的请求
binder_loop(bs, svcmgr_handler); //3
return 0;
}
1.binder_open
open("/dev/binder", O_RDWR)会引起BInderDriver里binder_open函数的执行
ioctl会引起BinderDriver里binder_ioctl的执行
mmap会引起BinderDriver里binder_mmap的执行
binder_open做了以下几件事情
1.初始化binder_state对象bs;把dev/driver的文件描述符赋值给bs->fd;把映射区域的大小赋值给bs->mapsize;把映射区的首地址复制给bs->mapped
2.ioctl发送BINDER_VERSION命令,检查版本
3.mmap映射
struct binder_state *binder_open(size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
//为bs分配内存空间
bs = malloc(sizeof(*bs));
//打开binder驱动,并将文件描述符注册到bs中
bs->fd = open("/dev/binder", O_RDWR);
//获取Binder驱动版本
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
}
//映射内存大小注册到bs
bs->mapsize = mapsize;
//通过调用mmap映射一块内存
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
}
return bs;
}
1.1 open("/dev/binder", O_RDWR);
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
//初始化binder_proc对象proc
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
get_task_struct(current);
//当前任务注册到proc->tsk中
proc->tsk = current;
//初始化todo队列,该队列用来在该进程被唤醒后执行里面的操作
INIT_LIST_HEAD(&proc->todo);
//初始化wait,用来将该进程进入睡眠状态
init_waitqueue_head(&proc->wait);
//把当前proc加入到binder_procs集合中
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc; //proc注册给flip->private_data中
return 0;
}
1.2 ioctl(bs->fd, BINDER_VERSION, &vers)
case BINDER_VERSION:
if (size != sizeof(struct binder_version)) {
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
}
break;
1.3 mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
映射用户空间和内核空间mapsize大小的内存空间
2.binder_become_context_manager:成为上下文管理者
2.1 binder_become_context_manager
ServiceManager启动的很早,能保证自己成为上下文管理者。
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
这个函数会调用到BinderDriver里的binder_ioctl函数,再根据命令协议就会指向第4个操作
也就是想BinderDriver发送BINDER_SET_CONTEXT_MGR命令
2.2. binder_ioctl
case BINDER_SET_CONTEXT_MGR:
//binder_context_mgr_node是一个全局变量
if (binder_context_mgr_node != NULL) {//系统里只能有一个ContextManger
...
goto err;
}
//根据ContextManager的binder_proc类型变量proc创建一个节点并赋值给这个全局变量
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
//引用计数
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
对binder_context_mgr_node 进行赋值,全局只有一个binder_context_mgr_node
2.3.binder_new_node
binder_new_node(proc, NULL, NULL);
static struct binder_node *binder_new_node(struct binder_proc *proc,
void __user *ptr,
void __user *cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
//为binder_node分配内存空间
node = kzalloc(sizeof(*node), GFP_KERNEL);
binder_stats_created(BINDER_STAT_NODE);
//将新建的node添加到红黑树中
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc;//将proc注册到node上
node->ptr = ptr;//这里传递过来的ptr是NULL
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
return node;
}
3.binder_loop
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
//用户空间也就是SM的binder_write_read,根据其中write_buffer和read_buffer来决定是读还是写
struct binder_write_read bwr;
uint32_t readbuf[32];
//write为空
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
//设置read数据
//write为0,read有数据,所以这里只执行操作
readbuf[0] = BC_ENTER_LOOPER;
//将BC_ENTER_LOOPER发送给BinderDriver,让ServiceManger进入循环
binder_write(bs, readbuf, sizeof(uint32_t));
//这里并没有消息队列,所有的消息都是从BinderDriver获取到的
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;//读取的消息放到bwr中
//循环中
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //通过ioctl处理这条消息
if (res < 0) {
}
//解析
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
3.1.binder_write
// uint32_t readbuf[32];
//readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
}
return res;
}
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);调到binder_ioctl里的BINDER_WRITE_READ
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
}
break;
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
switch (cmd) {
case BC_ENTER_LOOPER:
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
*consumed = ptr - buffer;
}
return 0;
}
3.2 res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
因为write没有数据,read有数据,SM进入睡眠状态,等待被其他进程唤醒
注意:唤醒之后,会继续执行binder_thread_read里面的代码,把从其他进程获取的ipc数据取出来然后回到binder_ioctl中,执行copy_to_user将数据从内核空间拷贝到用户空间,这样就继续回到binder_loop中去执行,如果这个过程中没有错误发生,就会走到binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);这行代码中
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
}
if (bwr.write_size > 0) {
//这里不会执行
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
//这下面的代码是进程从睡眠中醒来后的代码
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);//醒来
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
//把数据从内核空间拷贝到用户空间
}
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
}
break;
}
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
mutex_unlock(&binder_lock);
if (wait_for_proc_work) {
//这两个状态在binder_write中设置过
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
if (non_block) {//根据传递过来的参数filp->f_flags & O_NONBLOCK,不走这里
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else//进入睡眠状态
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
}
}
3.3.binder_parse
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_NOOP:
break;
case BR_TRANSACTION_COMPLETE:
break;
case BR_TRANSACTION: {
//BR_TRANSACTION的情况
break;
}
case BR_REPLY: {
//BR_REPLY的情况
break;
}
}
}
return r;
}
3.3.1 BR_TRANSACTION
BR_TRANSACTION执行的流程是,调用func也就是svcmgr_handler处理,然后将结果发送给BInderDriver
操作包括addService,checkService,这些操作都是基于SM中的svclist列表所管理的svcinfo进行的。
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//处理消息
res = func(bs, txn, &msg, &reply);
//将数据返回给BinderDriver
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
3.3.2 BR_REPLY
BR_REPLY中并没有做什么实质性操作
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
}
ptr += sizeof(*txn);
r = 0;
break;
}
3.4binder_send_reply
int binder_write(struct binder_state *bs, void *data, unsigned len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
void *buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
void *buffer;
uint32_t cmd_reply;
struct binder_txn txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offs_size = 0;
data.txn.data = &status;
data.txn.offs = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data = reply->data0;
data.txn.offs = reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
3.5. svcmgr_handler
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
switch(txn->code) {
//查询服务
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
bio_put_ref(reply, handle);
return 0;
//添加服务
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
//获取服务列表
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid)) {
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
}
bio_put_uint32(reply, 0);
return 0;
}
3.6 svcinfo和svclist
struct svcinfo
{
struct svcinfo *next;//链表的下一个节点
void *ptr;//存储ptr
struct binder_death death;
unsigned len;
uint16_t name[0];//存储service的名字
};
struct svcinfo *svclist = 0;//svclist链表
3.7 do_add_service
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
if (!handle || (len == 0) || (len > 127))
return -1;
//权限检查
if (!svc_can_register(s, len, spid)) {
}
//查询服务
si = find_svc(s, len);
if (si) {//在list中找到符合要求的svcinfo
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {//没有找到就添加
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;//添加到svclist的头部
svclist = si;//更新svclist的头部信息
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);
return 0;
}
3.8 do_find_service
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
//查询服务
struct svcinfo *si = find_svc(s, len);
if (!si || !si->handle) {
return 0;
}
if (!si->allow_isolated) {
// If this service doesn't allow access from isolated processes,
// then check the uid to see if it is isolated.
uid_t appid = uid % AID_USER;
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
//检查服务
if (!svc_can_find(s, len, spid)) {
return 0;
}
return si->handle;
}
3.9
遍历svclist,比较每个svcinfo->name和传递过来的参数是否一样,找到就返回,找不到返回0
struct svcinfo *find_svc(uint16_t *s16, unsigned len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return 0;
}
总结
ContextManager先通过open,mmap,setcontextmgr,ioctl等一系列的操作,准备好接受IPC数据的buffer,并将自己设置成管理者后进入睡眠状态
其他进程发送IPC数据给ContextManger,并唤醒它之后,ContextManger在根据传递过来的IPC数据中的RPC数据和RPC代码执行相应的操作如addService和getService(这些操作其实就是对svcinfo列表svclist进行添加和查询操作),然后将处理后的结果在发送给BInderDriver。
参考
参考源码
《深入理解andrid内核设计》
Binder系列3—启动ServiceManager