ServiceManager是android系统中Binder IPC通信过程中的守护进程,主要负责系统服务的注册和获取。
ServiceManager进程是init进程通过init.rc脚本里的配置文件启动的。init.rc有关的配置如下,
service servicemanager /system/bin/servicemanager
class core
user system
group system
critical
onrestart restart healthd
onrestart restart zygote
onrestart restart media
onrestart restart surfaceflinger
onrestart restart drm
可以看到,servicemanager是一种native service。这种native service都是需要用C/C++编写的。
Service Manager Service对应的实现代码位于frameworks/native/cmds/servicemanager/service_manager.c文件中。
这个文件中有每个C程序员都熟悉的main()函数。
service_manager.c的main方法主要代码如下,
int main(int argc, char **argv)
{
struct binder_state *bs;
bs = binder_open(128*1024);
•••
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
•••
binder_loop(bs, svcmgr_handler);
return 0;
}
1,声明一个binder_state类型的结构体。
2,调用binder_open方法打开binder驱动。
3,调用binder_become_context_manager方法注册成为binder服务的大管家。
4,调用binder_loop方法进入无限循环, 处理binder驱动发来的请求。
struct binder_state *bs;
binder.c 中binder_state定义如下,
struct binder_state
{
int fd; // 驱动文件dev/binder的描述符
void *mapped; //指向驱动文件映射的内存地址
size_t mapsize; //分配的内存大小,默认为128KB
};
打开驱动的调用流程图如下,
binder_open的方法如下,
struct binder_state *binder_open(size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
bs->fd = open("/dev/binder", O_RDWR); //通过系统调用陷入内核,打开Binder设备驱动
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
//通过系统调用,ioctl获取binder版本信息
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
bs->mapsize = mapsize; //通过系统调用,mmap内存映射.
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
这三个方法都是通过系统调用来对binder驱动进行操作。
Binder.c 的binder_become_context_manager方法如下,
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
binder_become_context_manager()的作用是让当前进程成为整个系统中唯一的上下文管理器,即service管理器。
仅仅是把BINDER_SET_CONTEXT_MGR发送到binder驱动而已。通过调用ioctl文件操作函数来通知Binder驱动程
序自己是守护进程,命令是BINDER_SET_CONTEXT_MGR.
binder.h中对该命令的定义如下,
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
内核态的binder驱动的binder_ioctl方法对BINDER_SET_CONTEXT_MGR命令处理如下,
binder_context_mgr_node = binder_new_node(proc, 0, 0);
•••
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
binder驱动为servicemanager进程生成一个binder_node节点,并记入静态变量binder_context_mgr_node。
binder_loop调用流程图如下,
binder.c的binder_loop方法如下,
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//将BC_ENTER_LOOPER命令发送给binder驱动
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
// 读取驱动的命令
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
// 解析命令
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
binder_loop方法主要分为3个阶段,
1,首先调用binder_write方法向binder驱动发送BC_ENTER_LOOPER命令, 告诉binder驱动“本线程要进入循环状态了”。
2,然后调用ioctl方法读取binder驱动信息。
3,读取驱动信息之后,调用binder_parse方法进行解析。
binder_write方法如下,
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
实质也是调用ioctl方法。发送BINDER_WRITE_READ命令,内容为BC_ENTER_LOOPER。
binder驱动的binder_ioctl方法对BINDER_WRITE_READ命令处理如下,
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
首先调用copy_from_user方法将用户态数据复制到内核态,然后调用binder_thread_write进行处理,有关BC_ENTER_LOOPER处理如下,
case BC_ENTER_LOOPER:
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
执行完BC_ENTER_LOOPER时,thread->looper值就变为BINDER_LOOPER_STATE_ENTERED了,表明当前线程进入循环状态了。
回到binder_ioctl函数,由于bwr.read_size == 0,binder_thread_read函数就不会被执行了,这样,binder_ioctl的任务就完成了。
读取驱动信息也是一个ioctl命令,发送的是BINDER_WRITE_READ命令
输入参数bwr各个成员的值
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
再次进入到binder_ioctl方法, binder_ioctl对BINDER_WRITE_READ命令处理如下,
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
•••
这次,bwr.write_size等于0,于是不会执行binder_thread_write方法,bwr.read_size等于32,
于是进入到binder_thread_read方法:
static int binder_thread_read(struct binder_proc *proc,
•••
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
•••
当然,这时候该读取线程一直在唱独角戏, 线程处于等待状态。那什么时候会唤醒呢?
binder_parse方法中会处理各种类型的消息,现在主要看BR_TRANSACTION类型消息的处理,主要包含服务的注册和获取。
相关代码如下,
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn); //从txn解析出binder_io信息
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
1,首先从txn解析出binder_io信息。
2,回调svcmgr_handler方法进行处理,func指向的是service_manager.c中的svcmgr_handler方法,binder驱动的请求会回调该方法。
3,处理完成之后通知处理结果。
service_manager.c的svcmgr_handler有关注册和获取服务的处理如下,
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
注册服务时会调用do_add_service方法。
获取服务时会调用do_find_service方法。
某个服务进程调用Service Manager Service接口,向其注册service。这个注册动作到最后就会走到svcmgr_handler()的
case SVC_MGR_ADD_SERVICE分支。此时会先获取三个数据,而后再调用do_add_service()函数。
do_add_service方法如下,
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
//ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
// allow_isolated ? "allow_isolated" : "!allow_isolated", uid);
if (!handle || (len == 0) || (len > 127))
return -1;
if (!svc_can_register(s, len, spid)) {
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}
si = find_svc(s, len);
if (si) {
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);
return 0;
}
1,首先调用svc_can_register方法检查进程是否可以注册服务。并不是android系统中所有的服务都可以注册, 如果发起端
是root进程或者system server进程的话,是可以注册service的;至于其他绝大部分普通进程,不允许注册service。
2,调用find_svc方法在service链表里查询对应的service是否已经添加过了。如果可以查到,那么就不用生成新的service
节点了。否则就需要在链表起始处再加一个新节点,节点类型为svcinfo。
3,如果未注册,则添加到service链表中。
在service被注册进service manager之后,其他应用都可以调用ServiceManager的getService()来获取相应的服务代理,
并调用代理的成员函数。这个getService()函数最终会向service manager进程发出SVC_MGR_GET_SERVICE命令,
调用do_find_service方法。do_find_service方法如下,
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
struct svcinfo *si = find_svc(s, len);
if (!si || !si->handle) {
return 0;
}
if (!si->allow_isolated) {
// If this service doesn't allow access from isolated processes,
// then check the uid to see if it is isolated.
uid_t appid = uid % AID_USER;
if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
return 0;
}
}
if (!svc_can_find(s, len, spid)) {
return 0;
}
return si->handle;
}
直接调用find_svc方法从svclist链表中查找服务。
find_svc方法如下,
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return NULL;
}
结构体svcinfo定义如下,
struct svcinfo
{
struct svcinfo *next;
uint32_t handle;
struct binder_death death;
int allow_isolated;
size_t len;
uint16_t name[0];
};
其实,使用handle来表示服务代理对应的句柄值,最后返回的也是handle。
当svcmgr_handler()返回后,会接着把整理好的reply对象send出去, 也就是把查找到的信息,发送给发起查找的一方。
Binder.c的binder_send_reply方法如下,
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
直接调用binder_write方法,如下,
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
还是调用ioctl方法进入到binder驱动中。
小结:
1, servicemanager进程打开binder驱动后,读取驱动的命令,相当于监听。
2, servicemanager进程一般也是调用ioctl方法通过系统调用进入到binder驱动。
3, servicemanager进程简单的完成消息的注册和获取,是binder的桥梁。
4, servicemanager和binder驱动交互通过简单的指令标记。
其实, binder驱动对应的文件是 /kernel/drivers目录里面的binder.c。
根据实际情况,该文件在/kernel/drivers里的不同目录下。
Ioctl方法对应的是驱动binder.c的binder_ioctl方法。