应用层主要逻辑
//1. 打开驱动,mmap映射
binder_open
//2. 构造binder_write_read通过ioctl和驱动交互添加server到servicemanager
ioctl(bs->fd, BINDER_WRITE_READ, &bwr)
//3. looper解析接受到的数据,server_handler_func是具体执行的函数
binder_loop(bs, server_handler_func);
- 打开驱动,mmap映射,binder_open,和
servicemanager
启动一样流程
- 构造binder_write_read通过ioctl和驱动交互添加server到servicemanager,svcmgr_publish , binder_call
// target = BINDER_SERVICE_MANAGER =0,代表servicemanager的handle, ptr代表收到消息处理的函数
int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
int status;
unsigned iodata[512/4];
struct binder_io msg, reply;
//构造binder_io
bio_init(&msg, iodata, sizeof(iodata), 4);
bio_put_uint32(&msg, 0); // strict mode header
bio_put_string16_x(&msg, SVC_MGR_NAME);
bio_put_string16_x(&msg, name);
//构建flat_binder_object
bio_put_obj(&msg, ptr);
// 构造binder_transation_data 和iocol交互 ,code是SVC_MGR_ADD_SERVICE
if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
return -1;
status = bio_get_uint32(&reply);
binder_done(bs, &msg, &reply);
return status;
}
int binder_call(struct binder_state *bs,
struct binder_io *msg, struct binder_io *reply,
uint32_t target, uint32_t code)
{
int res;
struct binder_write_read bwr;
struct {
uint32_t cmd;
struct binder_transaction_data txn;
} __attribute__((packed)) writebuf;
unsigned readbuf[32];
// 驱动层消息为 BC_TRANSACTION
writebuf.cmd = BC_TRANSACTION;
writebuf.txn.target.handle = target;
writebuf.txn.code = code;
writebuf.txn.flags = 0;
writebuf.txn.data_size = msg->data - msg->data0;
writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);
writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;
bwr.write_size = sizeof(writebuf);
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) &writebuf;
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//消息反馈 这里write有数据,驱动层进入 binder_thread_write, 见 --> 2.1
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
//消息解析
res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
if (res == 0) return 0;
if (res < 0) goto fail;
}
fail:
memset(reply, 0, sizeof(*reply));
reply->flags |= BIO_F_IOERROR;
return -1;
}
2.1 ioctl(bs->fd, BINDER_WRITE_READ, &bwr)
,进入binder驱动,由于这里write有数据,就进入
binder_thread_write
,binder_thread_write
里面根据cmd=BC_TRANSACTION
进入把bindr_transaction_data
从用户控件拷贝到内核控件,在调用binder_transaction,主要作用是根据当前server的
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
//目标进程
struct binder_proc *target_proc;
//目标线程
struct binder_thread *target_thread = NULL;
//目标binder节点
struct binder_node *target_node = NULL;
//目标todo链表
struct list_head *target_list;
//目标等待链表
wait_queue_head_t *target_wait;
//reply
struct binder_transaction *in_reply_to = NULL;
//...
//这里为0,代表servermanager
if (tr->target.handle) {
//...
} else {
//指定targer_node 为servermanager的binder_node
target_node = binder_context_mgr_node;
//...
}
//...
//根据binder_node找到binder_proc
target_proc = target_node->proc;
//...
if (target_thread) {
//...
} else {
//根据binder_proc获取todo链表,和wait链表
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
//...
//在内核分配binder_transation
t = kzalloc(sizeof(*t), GFP_KERNEL);
t->to_proc = target_proc;
t->to_thread = target_thread;
//SVC_MGR_ADD_SERVICE
t->code = tr->code;
//分配内存
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
//...
t->buffer->transaction = t;
t->buffer->target_node = target_node;
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
//把数据copy过来
copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)
//offp copy
copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)
for (; offp < off_end; offp++) {
// binder实体还是handle
struct flat_binder_object *fp;
//...
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
//这里是service段传递,是binder实体,对应这里
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
//...
struct binder_ref *ref;
//根据当前binder_proc获取当前服务的binder_node
struct binder_node *node = binder_get_node(proc, fp->binder);
//
if (node == NULL) {
//创建binder_node
node = binder_new_node(proc, fp->binder, fp->cookie);
//...
}
//根据目标的binder_proc和当前binder_node获取binder_ref
ref = binder_get_ref_for_node(target_proc, node);
//...
//赋值
fp->handle = ref->desc;
//更新计数,赋值type = BINDER_TYPE_HANDLE
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
break;
}
//这里是handle引用
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
//...
break;
}
}
}
//...
//work类型指定为 BINDER_WORK_TRANSACTION
t->work.type = BINDER_WORK_TRANSACTION;
//添加到todo链表
list_add_tail(&t->work.entry, target_list);
//...
//等待
if (target_wait)
wake_up_interruptible(target_wait);
}
}
主要作用是根据当前server
的binder
实体创建flat_binder_object
结构体,构造出binder_write_read
与驱动交互,驱动层根据flat_binder_object
构建出binder_node
,根据 target_proc
创建binder_ref
,这个binder_ref
的node
指向这个binder_node
,然后构造binder_transaction
,并对flat_binder_object
结构进行重新赋值,加入目标进程的todo队列。
servermanager进程收到唤醒的消息之后开始接收消息,进入binder_thread_read
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
//...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//...
//从todo队列获取数据
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
}
//...
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
break;
}
//...
}
//...
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
//根据target_node给binder_transaction_data赋值
tr.target.ptr = target_node->ptr;
//...
//设置cmd
cmd = BR_TRANSACTION;
}
//...
//code赋值 add_service
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
// 给binder_transaction_data赋值
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
//把cmd copy到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
//把binder_transaction_data copy到用户空间
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
}
}
这时候servermanager通过binder_loop传入的read_buffer
有值了,执行到了对应的binder_parse解析函数里面
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
//func 对应servicemanager的 svcmgr_handler函数
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
//构造binder_io
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
}
}
//...
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
//..
switch(txn->code) {
//将handle和name加入到 svclist
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
//添加到svclist
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
}
//..
}
-
binder_loop(bs, server_handler_func);
和servermanager
一样,开启循环,有数据了出发server_handler_func
函数,server_handler_func
就是自己的业务处理逻辑。
总结:
- server进程生成对应服务的
flat_binder_object
,构造binder_write_read
结构体和驱动层进行交互。 - 驱动根据
flat_binder_object
生成binder_node
结构体,在为目标进程servermanager的binder_proc
创建binder_ref
,这个binder_ref
的node
指向这个服务的binder_node
,在构造binder_transaction_data
结构体并更改flat_binder_object
的部分数据(比如type
更改为BINDER_TYPE_HANDLE
),在把binder_transaction_data
加入servermanager
的todo
队列并唤醒。 - 在
servermanager
进程唤醒后收到binder_transaction_data
并进行解析,把相关数据copy到用户空间,用户空间收到后进行binder_parse
,然后调用自己的处理函数svcmgr_handler
-
svcmgr_handler
根据code = SVC_MGR_ADD_SERVICE
和name,handle
添加到svcinfo
。 -
reply
后续在说。 - 开启自己的服务
looper
接受。