下面从源码分析ServiceManager的启动流程和服务流程。
@Service_manager.c(frameworks/base/cmds/servicemanager)
int main(int argc, char **argv) {
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;
//打开binder设备
bs = binder_open(128*1024);
//成为Service manager
if (binder_become_context_manager(bs)) {
LOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
//在binder_loop中循环检测binder中是否有新的请求
binder_loop(bs, svcmgr_handler);
return 0;
}
main函数主要有三个功能:
下面我们就分别介绍这三个步骤。
@binder.c(\frameworks\base\cmds\servicemanager\)
struct binder_state *binder_open(unsigned mapsize) {
//创建binder_state结构体并分配内存
struct binder_state *bs;
bs = malloc(sizeof(*bs));
//打开binder设备
bs->fd = open("/dev/binder", O_RDWR);
bs->mapsize = mapsize;
//将binder物理空间映射为ServiceManager可用的虚拟空间
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
//将结构体返回给servicemanager
return bs;
}
上面打开Binder设备的过程其实就是构建一个binder_state结构体对象,然后对其各个成员初始化的过程。我们来看一下binder_state数据结构:
struct binder_state {
//文件描述符,这里指的是/dev/binder设备文件描述符
int fd;
//把设备文件/dev/binder映射到进程空间的起始地址,这里就是128*1024
void *mapped;
//映射的空间大小
unsigned mapsize;
};
这个结构体只有三个成员变量,其中fd是底层binder设备的文件描述符,mapped是ServiceManager得到的虚拟空间地址,这块虚拟空间映射了底层binder的物理地址,而mapsize是这块虚拟空间的大小,也就是128*1024。
2、用Binder的物理地址映射为ServiceManager可用的虚拟地址;
int binder_become_context_manager(struct binder_state *bs)
{
//向Binder驱动发送BINDER_SET_CONTEXT_MGR的消息
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
我们看到,当ServiceManager向Binder驱动发送BINDER_SET_CONTEXT_MGR的消息时,Binder就会把他注册为“管理员”。
void binder_loop(struct binder_state *bs, binder_handler func) {
int res;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//告诉Binder,ServiceManager将要进入LOOPER状态了
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {
//准备要发送的数据
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
//与底层Binder通讯,得到客户端的请求
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
//用func解析请求并构建应答返回给客户端
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
}
}
上面的过程表明了Service_manager进入循环的过程,主要分为三个步骤:
int binder_parse(struct binder_state *bs,struct binder_io *bio,uint32_t *ptr,uint32_t size,binder_handler func){
while (ptr < end) {
uint32_t cmd = *ptr++;
switch(cmd) {
case BR_NOOP:
case BR_TRANSACTION_COMPLETE:
case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
break;
case BR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//调用service_manager.c中的svcmgr_handler去处理数据
res = func(bs, txn, &msg, &reply);
//将Service_manager对客户端的回应数据(reply)返回给Binder驱动
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
case BR_REPLY:
case BR_DEAD_BINDER:
case BR_FAILED_REPLY:
case BR_DEAD_REPLY:
default:
}
}
return r;
}
这个函数中我们只关心BR_TRANSACTION分支,通过调用func去解析拿到的请求,然后把返回值作为回应通过binder_send_reply()函数返回给客户端。
binder_loop(bs, svcmgr_handler);
因此func()就是svcmgr_handler():
int svcmgr_handler(struct binder_state *bs,struct binder_txn *txn,struct binder_io *msg,struct binder_io *reply){
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
//得到一个Service
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len, txn->sender_euid);
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
//添加一个Service
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
//得到当前所有的Service
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
}
return 0;
}
从svcmgr_handler的case分支我们可以看出,作为Service_manager主要完成三个功能:
int do_add_service(struct binder_state *bs,uint16_t *s,unsigned len,void *ptr,unsigned uid,int allow_isolated) {
struct svcinfo *si;
//检查当前注册的Service的uid和注册的服务名称是否有权限
if (!svc_can_register(uid, s)) {
return -1;
}
//查看是否已经add过了
si = find_svc(s, len);
if (si) {
if (si->ptr) {
svcinfo_death(bs, si);
}
si->ptr = ptr;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
si->ptr = ptr;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
//把当前需要注册的Service添加到svclist中,完成注册过程
svclist = si;
}
}
从add的过程可以看出,
所谓向ServiceManager注册一个服务,其实就是为当前的Service创建svcinfo的结构体,并把该结构体添加到svclist中。
void *do_find_service(struct binder_state *bs, uint16_t *s, unsigned len, unsigned uid) {
struct svcinfo *si;
//从svclist链表中得到当前请求的Service信息
si = find_svc(s, len);
if (si && si->ptr) {
//得到一个Service需要这个Service的许可
if (!si->allow_isolated) {
//还要检查申请者的uid是否匹配
unsigned appid = uid % AID_USER;
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
return si->ptr;
} else {
return 0;
}
}
这个过程确实如我们所料,需要通过find_svc()在svclist中寻找需要的Service并把该Service节点发送给请求的Client。
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
//把svclist返回给请求者
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
经过这些过程,ServiceManager就完成了解析数据的过程,下面就需要把相应的数据返回给客户端。
int binder_parse(struct binder_state *bs,struct binder_io *bio,uint32_t *ptr,uint32_t size,binder_handler func){
while (ptr < end) {
switch(cmd) {
case BR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
binder_dump_txn(txn);
if (func) {
//reply就是svcmgr_handler()中得到的回应
res = func(bs, txn, &msg, &reply);
//将回应数据(reply)返回给Binder驱动
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
}
}
return r;
}
在binder_parse()中又调用binder_send_reply()函数完成回应的操作:
void binder_send_reply(struct binder_state *bs, struct binder_io *reply, void *buffer_to_free, int status) {
struct {
uint32_t cmd_free;
void *buffer;
uint32_t cmd_reply;
struct binder_txn txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offs_size = 0;
data.txn.data = &status;
data.txn.offs = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data = reply->data0;
data.txn.offs = reply->offs0;
}
//向Binder写回应的数据
binder_write(bs, &data, sizeof(data));
}
在给Binder发送返回值时,构建了data的数据,并把reply放入其中,并标记了数据的大小,最后通过binder_write()函数将数据写到Binder中,而且写的方法仍然是调用ioctl()。
int binder_write(struct binder_state *bs, void *data, unsigned len) {
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
//向Binder写数据
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n", strerror(errno));
}
return res;
}
经过以上步骤,就完成了一次完整的请求调用过程。