系统启动的时候会先启动一个ServiceManager的一个进程,用于binder服务的管理,写程序先在服务端定义我们的服务,也就是一系列的函数方法,注册服务,客户端就可以获取到服务,拿到服务之后就可以发起远程调用。
c函数库,封装了一些底层函数给我们使用,对外给我们提供的一些接口,不用关心这些接口是如何实现的,比如open,ioctl,mmap,close。
常用函数
open:打开驱动
mmap:内核地址和用户地址映射
ioctrl:应用层和内核层操作
源码路径是
framework/native/cmds/servicemanager
main函数
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
//初始化binder驱动,128*1024 = 128k,mmap的大小
bs = binder_open(driver, 128*1024);
if (!bs) {
#ifdef VENDORSERVICEMANAGER
ALOGW("failed to open binder driver %s\n", driver);
while (true) {
sleep(UINT_MAX);
}
#else
ALOGE("failed to open binder driver %s\n", driver);
#endif
return -1;
}
//注册系统服务管家,也就是把当前上下文进程设置为servicemanager
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
#ifdef VENDORSERVICEMANAGER
cb.func_log = selinux_vendor_log_callback;
#else
cb.func_log = selinux_log_callback;
#endif
selinux_set_callback(SELINUX_CB_LOG, cb);
#ifdef VENDORSERVICEMANAGER
sehandle = selinux_android_vendor_service_context_handle();
#else
sehandle = selinux_android_service_context_handle();
#endif
selinux_status_open(true);
if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
//在驱动中循环读写数据,解析好的数据在svcmgr_handler中回调处理
binder_loop(bs, svcmgr_handler);
return 0;
}
大概就是:
ServiceManager的main函数为入口,
调用binder_open,使用一系列的系统函数,open,ioctrl等初始化驱动,
binder_become_context_manager将当前进程也就是我们的ServiceManager注册。
binder_loop告诉驱动进入循环读取数据。
binder_open
第一个参数 ,设备节点的位置路径,第二个参数是内存映射大小
struct binder_state *binder_open(const char* driver, size_t mapsize)
{ //结构体,存储binder_open的返回值
struct binder_state *bs;
struct binder_version vers;
//分配内存空间
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//open系统调用函数,打开"/dev/binder",拿到handle句柄,
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open %s (%s)\n",
driver, strerror(errno));
goto fail_open;
}
//ioctl 查询版本BINDER_VERSION,就是当前我们Android中的binder驱动的版本是多少的意思
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
bs->mapsize = mapsize;
//用户空间和地址空间的映射,传入的大小映射内存mapsize
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
binder_open是属于应用层google给我们封装好的函数,去调用内核的接口
binder_become_context_manager
int binder_become_context_manager(struct binder_state *bs)
{
//发送的数据结构体
struct flat_binder_object obj;
memset(&obj, 0, sizeof(obj));
obj.flags = FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
//告诉驱动我当前启动的这个进程为servicemanager,BINDER_SET_CONTEXT_MGR_EXT发送这个命令,
//flat_binder_object传递这个结构体给内核驱动
int result = ioctl(bs->fd, BINDER_SET_CONTEXT_MGR_EXT, &obj);
// fallback to original method
if (result != 0) {
android_errorWriteLog(0x534e4554, "121035042");
result = ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
return result;
}
flat_binder_object结构体
在C语言中,u32是一种特殊的数据类型,通常用于表示32位的无符号整数。这个类型通常被用在Linux内核开发中,表示无符号32位整数。在Linux系统中,使用双下划线前缀()的标识符是为了避免和用户空间的标识符冲突,而u32表示unsigned 32-bit integer(无符号32位整数)。
struct flat_binder_object {
struct binder_object_header hdr;
__u32 flags;
/* 8 bytes of data. */
union {
binder_uintptr_t binder; /* local object */
__u32 handle; /* remote object */
};
/* extra data associated with local object */
binder_uintptr_t cookie;
};
binder_loop分析
binder_handler func是回调函数
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
回调函数
//bs是binder_open返回的结构体,txn_secctx,msg应用层发来的数据,reply返回的数据
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data_secctx *txn_secctx,
struct binder_io *msg,
struct binder_io *reply)
先看看这些数据结构是什么意思
binder_io,包含了一系列的数据,客户端把数据赋值给这个结构体,接收端接收,用于存数据的一个集合,占有一段内存
//四个指针,四个整型变量
struct binder_io
{
//数据
char *data; /* pointer to read/write from */
//偏移
binder_size_t *offs; /* array of offsets */
size_t data_avail; /* bytes available in data buffer */
size_t offs_avail; /* entries available in offsets array */
char *data0; /* start of data buffer */
binder_size_t *offs0; /* start of offsets buffer */
uint32_t flags;
uint32_t unused;
};
在回调函数中处理数据
int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
int status;
//数据都是放在这个数组里面的
unsigned iodata[512/4];
struct binder_io msg, reply;
//构造数据结构体
bio_init(&msg, iodata, sizeof(iodata), 4);
//写入32位整型的数据
bio_put_uint32(&msg, 0); // strict mode header
//写入一个字符串
bio_put_string16_x(&msg, SVC_MGR_NAME);
bio_put_string16_x(&msg, name);
bio_put_obj(&msg, ptr);
if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
return -1;
status = bio_get_uint32(&reply);
binder_done(bs, &msg, &reply);
return status;
}
服务端接收到数据
在svcmgr_handler中接收数据, 要按顺序取
// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don't propagate it
// further (since we do no outbound RPCs anyway).
strict_policy = bio_get_uint32(msg);
bio_get_uint32(msg); // Ignore worksource header.
s = bio_get_string16(msg, &len);
binder_io从初始化
//开辟内存空间
unsigned iodata[512/4];
//声明一个结构体,
struct binder_io msg, reply;
//构造数据结构体,初始化,第一个参数binder_io结构体指针,第二个参数binder_io管理的内存指针,第三个参数内存开辟的大小
//第四个参数,偏移区大小
bio_init(&msg, iodata, sizeof(iodata), 4);
void bio_init(struct binder_io *bio, void *data,
size_t maxdata, size_t maxoffs)
{
//内存偏移量所占的字节数
size_t n = maxoffs * sizeof(size_t);
//超过我们所设置的内存区大小,报错
if (n > maxdata) {
bio->flags = BIO_F_OVERFLOW;
bio->data_avail = 0;
bio->offs_avail = 0;
return;
}
//赋值
bio->data = bio->data0 = (char *) data + n;
bio->offs = bio->offs0 = data;
//偏移量大小,和数据区大小
bio->data_avail = maxdata - n;
bio->offs_avail = maxoffs;
//
bio->flags = 0;
}
向我们的bio结构体中放入整型数据,
void bio_put_uint32(struct binder_io *bio, uint32_t n)
{
//分配内存动态
uint32_t *ptr = bio_alloc(bio, sizeof(n));
if (ptr)
*ptr = n;
}
分配内存的函数
//size分配内存的大小
static void *bio_alloc(struct binder_io *bio, size_t size)
{
size = (size + 3) & (~3);
if (size > bio->data_avail) {
bio->flags |= BIO_F_OVERFLOW;
return NULL;
} else {
//分配数据在内存数据区上的位置
void *ptr = bio->data;
bio->data += size;
bio->data_avail -= size;
return ptr;
}
}
//第一个参数binder_io结构体数据集合
void bio_put_obj(struct binder_io *bio, void *ptr)
{
//最终我们是会把flat_binder_object这个结构体存入到binder_io这个数据集合里面去
struct flat_binder_object *obj;
//分配内存空间
obj = bio_alloc_obj(bio);
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->hdr.type = BINDER_TYPE_BINDER;
obj->binder = (uintptr_t)ptr;
obj->cookie = 0;
}
static struct flat_binder_object *bio_alloc_obj(struct binder_io *bio)
{
struct flat_binder_object *obj;
//分配内存在我们所指定的内存空间数据区域
obj = bio_alloc(bio, sizeof(*obj));
//记录偏移量,目的是把flat_binder_object存储在binder_io管理的那块内存空间的哪一个位置上
if (obj && bio->offs_avail) {
bio->offs_avail--;
*bio->offs++ = ((char*) obj) - ((char*) bio->data0);
return obj;
}
bio->flags |= BIO_F_OVERFLOW;
return NULL;
}
作用,从驱动中读取客户端传递下来的数据,然后解析这些数据,
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
//读写数据的结构体
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
//写数据 通知驱动程序
binder_write(bs, readbuf, sizeof(uint32_t));
//死循环
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//读写操作,从驱动内核读出数据
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
//解析数据,解析的数据传递给func这个函数回调
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
binder_write分析
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
//写数据
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
//表示读数据
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
//向binder驱动发送一个写数据的指令,把data这个数据写入内核
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
struct binder_write_read {
//写数据的内存区
binder_size_t write_size; /* bytes to write */
//等待写入的数据
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
//读数据的内存区
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};
//uintptr_t ptr读出来的数据readbuf,func回调函数
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
//解析出数据中的指令cmd
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
#if TRACE
fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
switch(cmd) {
case BR_NOOP:
break;
case BR_TRANSACTION_COMPLETE:
break;
case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
#if TRACE
fprintf(stderr," %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
ptr += sizeof(struct binder_ptr_cookie);
break;
case BR_TRANSACTION_SEC_CTX:
case BR_TRANSACTION: {
struct binder_transaction_data_secctx txn;
if (cmd == BR_TRANSACTION_SEC_CTX) {
if ((end - ptr) < sizeof(struct binder_transaction_data_secctx)) {
ALOGE("parse: txn too small (binder_transaction_data_secctx)!\n");
return -1;
}
memcpy(&txn, (void*) ptr, sizeof(struct binder_transaction_data_secctx));
ptr += sizeof(struct binder_transaction_data_secctx);
} else /* BR_TRANSACTION */ {
if ((end - ptr) < sizeof(struct binder_transaction_data)) {
ALOGE("parse: txn too small (binder_transaction_data)!\n");
return -1;
}
//复制数据到结构体transaction_data里面存放
memcpy(&txn.transaction_data, (void*) ptr, sizeof(struct binder_transaction_data));
ptr += sizeof(struct binder_transaction_data);
txn.secctx = 0;
}
binder_dump_txn(&txn.transaction_data);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, &txn.transaction_data);
//把解析好的数据传入回调函数,然后调用它
res = func(bs, &txn, &msg, &reply);
if (txn.transaction_data.flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn.transaction_data.data.ptr.buffer);
} else {
//
binder_send_reply(bs, &reply, txn.transaction_data.data.ptr.buffer, res);
}
}
break;
}
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0;
break;
}
case BR_DEAD_BINDER: {
struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
ptr += sizeof(binder_uintptr_t);
death->func(bs, death->ptr);
break;
}
case BR_FAILED_REPLY:
r = -1;
break;
case BR_DEAD_REPLY:
r = -1;
break;
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
回调函数,解析完数据后走这个方法函数
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data_secctx *txn_secctx,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
uint32_t dumpsys_priority;
struct binder_transaction_data *txn = &txn_secctx->transaction_data;
//ALOGI("target=%p code=%d pid=%d uid=%d\n",
// (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
if (txn->target.ptr != BINDER_SERVICE_MANAGER)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
// Equivalent to Parcel::enforceInterface(), reading the RPC
// header with the strict mode policy mask and the interface name.
// Note that we ignore the strict_policy and don't propagate it
// further (since we do no outbound RPCs anyway).
//按顺序解析出数据
strict_policy = bio_get_uint32(msg);
bio_get_uint32(msg); // Ignore worksource header.
//这个是字符串数据 ,IServiceManager
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
#ifdef VENDORSERVICEMANAGER
struct selabel_handle *tmp_sehandle = selinux_android_vendor_service_context_handle();
#else
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
#endif
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
//执行对应的函数
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid,
(const char*) txn_secctx->secctx);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
dumpsys_priority = bio_get_uint32(msg);
if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
txn->sender_pid, (const char*) txn_secctx->secctx))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
uint32_t req_dumpsys_priority = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid, (const char*) txn_secctx->secctx, txn->sender_euid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
// walk through the list of services n times skipping services that
// do not support the requested priority
while (si) {
if (si->dumpsys_priority & req_dumpsys_priority) {
if (n == 0) break;
n--;
}
si = si->next;
}
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}