接前文
3.2 数据读写
3.2.1 BINDER_WRITE_READ
首先我们来看一下BINDER_WRITE_READ
的定义如下,它是通过调用内核提供的_IOWR
宏来构造。关于ioctl
命令的构造方法,有兴趣可以看看这篇文章, 基本讲清楚了。 —— 构造IOCTL学习心得.
#define BINDER_WRITE_READ _IOWR(‘b’/*type 魔数域*/, 1/*command 序号数*/, struct binder_write_read/*size:用来求数据大小域*/)
3.2.2 读写处理函数binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
/*读取arg的大小,通过3.2.1 可知改大小应为 sizeof(struct binder_write_read) 的大小 */
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
/* 从用户态地址读取struct binder_write_read结构体 */
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
/* write_size大于0,表示用户进程有数据发送到驱动,则调用binder_thread_write发送数据 详见:3.2.2.1*/
if (bwr.write_size > 0) {
ret = **binder_thread_write**(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
/*binder_thread_write中有错误发生,则read_consumed设为0,表示kernel没有数据返回给进程*/
bwr.read_consumed = 0;
/*将bwr返回给用户态调用者,bwr在binder_thread_write中会被修改*/
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
/*read_size大于0, 表示进程用户态地址空间希望有数据返回给它,则调用binder_thread_read进行处理*/
if (bwr.read_size > 0) {
ret = **binder_thread_read**(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
**filp->f_flags & O_NONBLOCK**);
trace_binder_read_done(ret);
/*读取完后,如果proc->todo链表不为空,则唤醒在proc->wait等待队列上的进程*/
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
/*如果binder_thread_read返回小于0,可能处理一半就中断了,需要将bwr拷贝回进程的用户态地址*/
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
/* 处理成功的情况,也需要将bwr拷贝回进程的用户态地址空间*/
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
-
binder_ioctl_write_read
整个处理逻辑相对简单,它首先从arg
中读取用户态传进来的struct binder_write_read
结构体,然后根据其成员变量write_size
和read_size
是否大于0,分别调用binder_thread_write
和binder_thread_read
来处理发送和接收请求。这里要注意的一点是:在binder_thread_write
和binder_thread_read
的调用中,有两个参数bwr.write_consumed
和bwr.read_consumed
是传址作为参数的,这意味着这两个成员变量是会在被调用函数中修改的。如果在binder_thread_write
处理过程中出错(返回值小于0),则不再处理read_size
大于0的情况。最后不管是成功还是失败,都会将bwr
通过copy_to_user
返回给进程,进程可以通过write_consumed
和read_consumed
字段得知驱动读取和写入多少字节的数据。其实上述三处的copy_to_user
调用,其实可以统一合并到最后一处,然后将out
跳转标签移到最后一处之前,其他两处的copy_to_user
直接移除即可,以减少重复代码。
3.2.2.1 数据发送 —— binder_thread_write
这个函数代码量比较大,我们需要分段来看。
函数头部及增减binder_ref
强弱引用计数的四个命令
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
/*从用户态地址空间bwr的write_buffer中读取一个32位无符号整型到cmd*/
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
/*指针后移4个字节*/
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
/*更新该cmd相关的统计信息*/
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
/*增加或者减少强(BC_ACQUIRE,BC_RELEASE),弱(BC_INCREFS, BC_DECREFS)引用计数*/
case BC_INCREFS:
case BC_ACQUIRE:
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
struct binder_ref *ref;
const char *debug_string;
/*从传入参数的用户态地址中读取想要修改引用计数的struct binder_ref的目标handle*/
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (target == 0 && binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
/* - 如果是想请求增加ServiceManager的强或弱的binder_ref引用,**binder_get_ref_for_node**会先在proc的refs_by_node红黑树中查找,
* desc域等于target的binder_ref。如果有找到,就返回找到的binfer_buf;如果没有找到,就新创建一个并插入到`proc->ref_by_node`红黑树中。
* 还要为新创建的节点通过`rb_node_desc`域加入到`proc->refs_by_desc`红黑树中。
*
*- `refs_by_desc`红黑树是以`binder_buf`中的desc为序组织的,新创建节点的`desc`的值是该`proc`的`refs_by_desc`红黑树中最小的且还未被使用值,
* 即如果引用的`binder_node`是`binder_context_mgr_node`则是0,其他的就是1开始最小的还没被其他节点使用的值。最后还要将新创建的节点
* 通过其`node_entry`域,链入`binder_context_mgr_node`的`refs`哈希链表中。
*/
ref = **binder_get_ref_for_node**(proc,
binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
ref->desc);
}
} else
/* 与binder_get_ref_for_node类似,也是在proc->refs_by_node红黑树中查找desc域等于target的binder_ref
* 但是如果没找到,不会创建新的binder_ref节点,而是直接返回NULL
*/
ref = **binder_get_ref**(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
break;
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
binder_inc_ref(ref, 0, NULL );/* 增加弱引用计数(ref->weak--) */
break;
case BC_ACQUIRE:
debug_string = "Acquire";
/* 增加强引用计数(ref->strong++)。如果增加前strong的值为0,则还需要增加其所对应(引用)
* binder_node节点的internal_strong_refs的值
*/
binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
/* 减少强引用计数(ref->strong--)。如果减少后strong的值为0,则还需要减少其所对应(引用)
* binder_node节点的internal_strong_refs的值。
* strong减完后,如果发现此时strong和weak都为0,还要删除该binder_ref节点
*/
binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
/* 减少弱引用计数(ref->weak—)。减完后,如果发现此时strong和weak都为0,还要删除该binder_ref节点*/
binder_dec_ref(ref, 0);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid, debug_string, ref->debug_id,
ref->desc, ref->strong, ref->weak, ref->node->debug_id);
break;
} /*到这里BC_INCREFS, BC_ACQUIRE, BC_RELEASE, BC_DECREFS四个命令处理结束*/
....../*其他命令的处理*/
- 函数开始先确定了写缓冲区中开始(
ptr
)和结束的位置(end
)的位置,接着就开始进入循环,读取命令,更新相关统计信息。然后进入switch
分支根据不同的命令类型处理执行相应的处理。 - 第一个处理的是以下四个用于增加或者减少client端的驱动层表示
binder_ref
的强弱引用计数的命令-
BC_INCREFS
: 增加binder_ref的弱引用计数。如果是第一次增加(即,ref->weak == 0),还会去增加对应的binder_node的弱引用计数。在BpBinder
的构造函数中,通过调用IPCThreadState
的incWeakHandle
发出。 -
BC_DECREFS
:减少binder_ref的弱引用计数,但不会去减少对应binder_node的弱引用计数,即使ref->weak == 0,这与强引用计数的处理是不同的。在BpBinder
的析构函数中,通过调用IPCThreadState
的decWeakHandle
发出。 -
BC_ACQUIRE
:增加binder_ref的强引用计数。如果是第一次增加(即,ref->strong == 0),还会去增加对应的binder_node的强引用计数。在BpBinder
的onFirstRef
函数中,通过调用IPCThreadState
的incStrongHandle
发出。 -
BC_RELEASE
:减少binder_ref的强引用计数。如果减少后强引用计数为0(即,ref->strong == 0),还会去减少对应的binder_node的强引用计数。在BpBinder
的onLastStrongRef
函数中,通过调用IPCThreadState
的decStrongHandle
发出。
如果减少强或弱引用计数后,发现强弱引用计数都变为0,则会调用binder_delete_ref
删除对应的binder_ref
。
-
BC_INCREFS_DONE
和BC_ACQUIRE_DONE
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
/*说明此时传入的是一个flat_binder_object*/
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
/* 从进程用户态地址空间中读取BBinder对象的弱引用计数器成员mRefs的地址
* BBinder继承自IBinder,后者继承自RefBase,mRefs为RefBase的类型为weakref_impl的对象
*/
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
/*从进程用户态地址空间中读取`BBinder`对象的地址,放到cookie变量中*/
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
/*根据之前读取的node_ptr,在proc中的nodes红黑树中查找对应的binder_node*/
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
binder_user_error("%d:%d %s u%016llx no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
"BC_ACQUIRE_DONE",
(u64)node_ptr);
break;
}
if (cookie != node->cookie) {
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
break;
}
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
break;
}
/*将pending_strong_ref重新置为0,表示增加强引用计数操作已完成,在发出`BR_ACQUIRE`命令前,该值会被设成1*/
node->pending_strong_ref = 0;
} else {/*cmd == BC_INCREFS_DONE*/
if (node->pending_weak_ref == 0) {
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
break;
}
/*将pending_weak_ref重新置为0,表示增加弱引用计数操作已完成,在发出`BR_INCREFS`命令前,该值会被设成1*/
node->pending_weak_ref = 0;
}
binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s node %d ls %d lw %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
node->debug_id, node->local_strong_refs, node->local_weak_refs);
break;
}
-
BC_INCREFS_DONE
和BC_ACQUIRE_DONE
两个命令分别是进程用户态在处理完对应的BR_INCREFS
和BR_ACQUIRE
回复Binder驱动的两个命令。关于后面两个命令BR_INCREFS
和BR_ACQUIRE
分别用于Binder驱动请求进程用户态增加IPCThreadState
中的mProcess
成员的(类型为:ProcessState
)弱引用和强引用计数。
BC_ATTEMPT_ACQUIRE
, BC_ACQUIRE_RESULT
case BC_ATTEMPT_ACQUIRE:
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
return -EINVAL;
case BC_ACQUIRE_RESULT:
pr_err("BC_ACQUIRE_RESULT not supported\n");
return -EINVAL;
这两个命令,还不支持。
binder_bufffer
缓冲区释放命令 BC_FREE_BUFFER
case BC_FREE_BUFFER: {
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
/*读取binder_buffer的**data域**,在用户态的地址*/
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
/* 先把进程用户态的地址data_ptr,换算成内核态binder_buffer节点的地址
* (data_ptr - proc->user_buffer_offset - **offset(struct binder_buffer, data****)**;需要
* 然后再在proc->allocated_buffers红黑树中查找,该红黑树是以binder_buffer内核态地址大小为序组织的
*/
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
break;
}
if (!buffer->allow_user_free) {
/* 如果该节点地址空间不允许用户释放,则输出出错信息,并跳过该命令的处理 */
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
proc->pid, thread->pid, (u64)data_ptr);
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
"%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
/*删除binder_buffer与transaction的关联*/
if (buffer->transaction) {
/*移除transaction中的buffer域对该binder_buffer的引用*/
buffer->transaction->buffer = NULL;
/*移除该binder_buffer对该transaction的引用*/
buffer->transaction = NULL;
}
/*处理该binder_buffer相关的异步事务*/
if (buffer->async_transaction && buffer->target_node) {
BUG_ON(!buffer->target_node->has_async_transaction);
if (list_empty(&buffer->target_node->async_todo))
/*target_node的异步待处理事务队列为空,则将是否有异步事物标志位重置为0*/
buffer->target_node->has_async_transaction = 0;
else
/*将该binder_buffer对应的target_node中所有待处理的异步事务(async_todo),移到当前线程的todo队列中*/
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
/*释放binder_buffer管理的空间*/
binder_free_buf(proc, buffer);
break;
}
-
BC_FREE_BUFFER
命令是在IPCThreadState
的free_buffer
函数中发出的。而freeBuffer
一般在两个地方被调用,一个是waitForResponse
处理BR_REPLY
命令时;另一个时处理BR_TRANSACTION
和BR_REPLY
命令时,通过Parcel的ipcSetDataReference
注册后到mOwner
中,然后在之后间接调用。 -
binder_transaction_buffer_release
会先看一下buffer->target_node
是否为空,不是话就先减少对这个binder_node
的引用计数,因为在binder_transaction
(该函数在下一节有详细的介绍)中对它递增了引用计数,因防止改binder_node
被释放。接着循环处理在该binder_buffer
的offsets
区域存放的flat_binder_object
,根据其类型分别减少对应binder_node
或者binder_ref
的引用计数,具体是:-
BINDER_TYPE_BINDER
减少对应binder_node
的强引用计数,BINDER_TYPE_WEAK_BINDER
减少对应binder_node
的弱引用计数。 -
BINDER_TYPE_HANDLE
减少对应binder_ref
的强引用计数,BINDER_TYPE_WEAK_HANLE
减少对应binder_ref
的弱引用计数。
-
-
binder_free_buf
释放binder_buffer
管理空间,如果可以还会合并相邻前后空闲的节点,具体可参考Binder驱动之 binder_buffer的分配与回收的3.1小节。
Binder事务处理核心 —— BC_TRANSACTION
, BC_REPLY
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
/*从将binder_transaction_data从进程用户态地址拷到内核地址空间*/
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
/*直接调用binder_transaction进行事务处理*/
**binder_transaction**(proc, thread, &tr, cmd == BC_REPLY);
break;
}
接下来我们看一下事务处理的核心实现binder_transaction
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;**
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
/*填充日志信息*/
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
/*先确定目标线程(target_thread)和目标进程(target_proc)*/
if (reply) { /*BC_REPLY命令,说明这是一个Server发给Client的事务处理回复。在server端的线程上。*/
/*从取出栈顶reply对应的transaction*/
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
/*transaction的目标线程需是当前线程*/
if (in_reply_to->to_thread != thread) {
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
/*从栈顶移除该transaction*/
thread->transaction_stack = in_reply_to->to_parent;
/*本次reply的目标线程是对应transaction的发起线程*/
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
/*目标线程(client)的栈顶事务需是本次reply对应的transaction的*/
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
/*通过目标线程找到目标进程*/
target_proc = target_thread->proc;
} else {/*BC_TRANSACTION 命令,说明这是一个Client发给Server的请求事务。在Client端线程上。*/
/*第一步:先确定target_node*/
if (tr->target.handle) {/*目标service是普通service,handle > 0 */
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {/*目标service是ServiceManager*/
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
/*第二步:根据target_node找到目标进程target_proc*/
target_proc = target_node->proc;
if (target_proc == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
/*第三步:根据目标进程target_proc查找目标线程target_thread*/
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { /*非one_way, 需要replay,且transaction栈不为空*/
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
/* 从事务栈(transaction_stack)的栈顶向下搜索,
* 找到最后(最早)一个目标进程中向当前进程发起事务请求的线程为本次请求的目标线程。
*/
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
/*找到target_thread, 则target_list和target_wait分别初始化为目标线程的todo和wait队列*/
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
/* 没有找到target_thread, target_list和target_wait分别初始化为目标进程的todo和wait队列
* 这个情况只有BC_TRANSACTION命令才有可能发生
*/
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
/*分配一个binder_transaction*/
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
/*分配一个binder_work*/
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);**
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
e->debug_id = t->debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size);
if (!reply && !(tr->flags & TF_ONE_WAY)) /*BC_TRANSACTION,且不是one way,即需要replay,则发起线程(from)设为当前线程*/
t->from = thread;
else/*BC_REPLY,from置为空*/
t->from = NULL;
/*初始化binder_transaction各域*/
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
trace_binder_transaction(reply, t, target_node);
t->buffer = **binder_alloc_buf**(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
if (target_node) /*该target_node被binder_buffer引用,所以增加引用计数*/
binder_inc_node(target_node, 1, 0, NULL);
/*计算offset区的起始地址*/
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
/*将用户态binder_transaction_data中的数据拷贝到内核驱动的binder_buffer中,binder通信的一次拷贝就是发生在这里*/
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
**tr->data.ptr.buffer**, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
/* 拷贝binder_transaction_data的offset区到内核驱动
*/
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size; /*offset区的结束地址*/
off_min = 0;
/*接下来是循环处理在前一步从binder_transaction_data中拷贝进来所有flat_binder_object*/
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
*offp < off_min ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
proc->pid, thread->pid, (u64)*offp,
(u64)off_min,
(u64)(t->buffer->data_size -
sizeof(*fp)));
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
/*获取flat_binder_object的地址*/
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
/*BBinder的flat_binder_object*/
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
/*内核态驱动还没有相应的binder_node来表示该binder service,则新建一个*/
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
(u64)fp->binder, node->debug_id,
(u64)fp->cookie, (u64)node->cookie);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (security_binder_transfer_binder(proc->tsk,
target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
/*在proc的`refs_by_node`红黑树中查找该binder_node对应的binder_ref, 如果没有找到的话,会新建一个插入到该红黑树中*/
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
/* 转换成客户端表示,分别修改type和handle */
if (fp->type == BINDER_TYPE_BINDER**)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;**
/*增加引用计数,防止该binder_ref被释放*/
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
ref->debug_id, ref->desc);
} break;
/*BpBinder的flat_binder_object*/
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (security_binder_transfer_binder(proc->tsk,
target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
/*该flat_binder_object的目标进程是该binder service所在进程*/
if (**fp->type == BINDER_TYPE_HANDLE**)
**fp->type = BINDER_TYPE_BINDER;**
else
**fp->type = BINDER_TYPE_WEAK_BINDER;**
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node**(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
(u64)ref->node->ptr);
} else {
/*该flat_binder_object的目标进程不是该binder service所在进程*/
struct binder_ref *new_ref;
/*在目标进程中binder_node找到对应的binder_ref。每个进程都有自己的binder_ref来对应binder_node*/
new_ref = **binder_get_ref_for_node**(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;/*更新handle的值,设置成目标进程binder_ref的desc*/
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); /*增加目标进程binde_ref的引用计数*/
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {/*客户端进程不接受fd*/
binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {/*目标进程不接受fd*/
binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
file = fget(fp->handle); /*根据文件描述符找到对应的struct file结构体*/
if (file == NULL) {
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
if (security_binder_transfer_file(proc->tsk,
target_proc->tsk,
file) < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); /*从目标进程获取一个可用的文件描述符*/
if (target_fd < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file); /*在目标进程中,将file与刚获取到文件描述符target_fd对应起来。这样两个进程中虽然fd不同,但其实都是对应同一个struct file结构体*/**
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
/*事务处理完成,将本次reply对应的transaction从目标线程(Client)事务栈中移除,并释放其所占用的地址空间*/
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {/*一个client到server的transaction,且需要reply*/
/*将本次事务的binder_transaction加入到本线程事务栈中*/
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
**thread->transaction_stack = t;**
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
*target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo); /添加一个本线程的todo队列中,稍后在线程处理todo队列的该binder_work时,会发送个BR_WORK_TRANSCAION_COMPLETE给进程,告知请求/回复已发送出去。*/
if (target_wait)
wake_up_interruptible(target_wait);
return;
/*接下来是出错处理*/
err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
/* 递减transaction相关的binder_node, binder_ref及data中的flat_binder_object有关的binder_ref,binder_node的引用计数
* 如果传递的文件描述符,还要关闭该文件描述符
*/
binder_transaction_buffer_release(target_proc, t->buffer, offp);
t->buffer->transaction = NULL;
/*释放之前分配的binder_buffer*/
binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
/*释放binder_work*/
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
/*释放binder_transaction*/
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d, size %lld-%lld\n",
proc->pid, thread->pid, return_error,
(u64)tr->data_size, (u64)tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
BUG_ON(thread->return_error != BR_OK);
if (in_reply_to) {
thread->return_error = BR_TRANSACTION_COMPLETE;
binder_send_failed_reply(in_reply_to, return_error);
} else
/*将错误码存储起来,以便在之后的binder_thread_read中处理*/
thread->return_error = return_error;
}
-
binder_transaction
函数虽然代码挺长的,但逻辑上并不算复杂,大体可以分为四个步骤:- 第一步:确定
target_proc
及target_thread
,并根据target_thread
和target_proc
确定target_list
和target_wait
; - 第二步:创建一个
binder_transaction -->t
,binder_buffer --> t->buffer
,binder_work --> tcomplete
及做相关初始化,binder_transaction
中的data.prt.buffer
和data.prt.offsets
的内容从用户态拷贝到内核态的binder_buffer
中,这里就是binder通信中仅有的一次数据拷贝发生的地方; - 第三步:依次处理从
binder_transaction_data
中拷贝进来的的flat_binder_object
:- 如果类型是
BINDER_TYPE_BINDER
或者BINDER_TYPE_WEAK_BINDER
,说明这个flat_binder_object
是一个binder service,或者说BBinder
,flat_binder_object
中存储的binder对象地址(binder
和cookie两个域
)在不同进程的客户端中无法直接使用,需将其转换成对应代理或者说句柄(handle
)。这个代理的handle
即是其内核态表示binder_ref
的desc
域的值。转换过程如下,先在proc->nodes
红黑树中查找该binder service在内核态表示binder_node
,如果没有找到就创建一个;接着根据这个binder_node
在proc->refs_by_node
红黑树中查找这个binder service在内核态的表示binder_ref
, 同样的如果未在该红黑树找到,也创建一个插入到树中;得到binder_ref
后,其desc
域就是转换flat_binder_object
的handle
值,即客户端代理的句柄,并将flat_binder_object
的类型改为对应的BINDER_TYPE_HANDLE
或者BINDER_TYPE_WEAK_HANLE
。 - 如果类型是
BINDER_TYPE_HANDLE
或者BINDER_TYPE_WEAK_BINDER
,则说明这个flat_binder_object
是一个binder service的客户端代理,或者说BpBinder
,此时先从proc->refs_by_desc
根据flat_binder_object
的handle
找到相应的binder_ref
。然后分两种情况处理:如果此次transaction的目标进程是该binder service所在的进程,就需要将flat_binder_object
转换成对应的服务端表示,即flat_binder_object
的binder
和cookie
域设置为binder_node
中的ptr
和cookie
, 这两个域前一个是BBinder
基类RefBase
中引用计数计数器成员mRef
, 后一个是BBinder
的地址,这样用户态读到到flat_binder_object
后就可以很容易的转换得到BBinder
对象,还需将类型type
设置成BINDER_TYPE_BINDER
或者BINDER_TYPE_WEAK_BINDER
,最后增加binder_node
的强或者弱引用计数;如果此次transaction的目标进程不是该binder service所在的进程,则需先在目标进程中的proc->refs_by_node
查找对应的binder_ref
,如果没有找到的话就会创建一个,然后将flat_binder_object
中的handle
域,修改为找到binder_ref
的desc
值,因为handle
的作用域是一个进程中,不同进程中相同的handle
表示的不同binder_ref
,因此它对应的binder_node
也可能是不一样的。最后增加这个binder_ref
的强或者弱引用计数。 - 如果类型是
BINDER_TYPE_FD
,说明这个flat_binder_object
传递的是一个文件描述符,这时flat_binder_object
的handle
域存放的就是要传递的文件描述符。首先根据handle
即fd
的值用fget
获取到相应的struct file
,接着调用task_get_unused_fd_flags
在目标进程获取一个还没被使用的fd,然后使用task_fd_install
将目标中的fd和第一个步骤获取到的struct file
建立关联,最后将flat_binder_object
中的handle
修改为目标进程的fd,这样目标进程在用户态操作这个fd就作用在了与发送进程相同的struct file
上。所以文件描述符传递本质上是共享struct file
。
- 如果类型是
- 第四步:先处理事务栈,如果本次处理的是
BC_REPLY
命令,说明前一次的BC_TRANSACTION
命令已经处理完毕了,因此将目标线程的事务栈顶元素出栈;如果本次处理的是一个BC_TRANSACTION
命令,且是不带有ONE_WAY
标识(需要对方reply),则将第二步创建的事务t
入栈;如果本次处理的是一个BC_TRANSACTION
命令,但是带有ONE_WAY
标识(不需要对方reply),则根据对方线程是否已经有异步的binder_transaction
决定是否将target_list
修改为目标线程的async_todo
队列,还是仅仅设立has_async_transaction
标志位,target_list
依旧为目标线程的todo
队列。然后将事物t
的binder_work
类型设置为BINDER_WORK_TRANSACTION
,通过work.entry
插入到target_list
(目标线程/进程的todo
或者async_todo
)中。同时也把第二步创建的tcomplete
的work类型设置为BINDER_WORK_TRANSACTION_COMPLETE
, 链入到本地线程的todo
队列的队尾,用于通知进程本次TRANSACTION请求或REPLY已发出。最后,如果根据target_wait
是否为空,决定是否需要唤醒等待在target_thread
/target_proc
的wait
队列上的进程。
- 第一步:确定
-
binder_transaction_data
中的offset
区(即data.ptr.offset
域指向的缓冲区),存储的是每一个flat_binder_object
在data
区(即data.ptr.buffer
域指向的缓冲区)的偏移量。
分析完binder_thread_write
中最核心的两个命令BC_TRANSACTION
,BC_REPLY
我们接着来看剩余的命令
Binder线程状态相关的三个命令: BC_REGISTER_LOOPER
, BC_ENTER_LOOPER
BC_EXIT_LOOPER
case BC_REGISTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
} else if (proc->requested_threads == 0) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
proc->pid, thread->pid);
} else {
proc->requested_threads--;
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
case BC_EXIT_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_EXIT_LOOPER\n",
proc->pid, thread->pid);
thread->looper |= BINDER_LOOPER_STATE_EXITED;
break;
-
BC_REGISTER_LOOPER
是进程的非主线程调用IPCThreadState::joinThreadPool
时向Binder驱动发送的命令,表示该线程将进入的binder循环状态,即不断读取并执行binder的请求。IPCThreadState::joinThreadPool
的一个经典的调用地方就是BinderServer
(framework/native/include/binder/BinderService.h
)的publishAndJoinThreadPool
, 用于一个binder service向ServiceManager注册后进入Binder循环状态,不断的处理客户端发过来的请求。它与BC_ENTER_LOOPER
的主要区别是:从进程用户态角度看,它是非主线程调用IPCThreadState::joinThreadPool
发出的命令,而BC_ENTER_LOOPER
则是主线程发出的;从内核驱动的角度看,则是收到该命令时,proc->request_threads
需不为于0,而proc->request_threads
的++
操作是发生在binder_thread_read
向进程用户态发出BR_SPAWN_LOOPER
时发生的。因此BC_REGISTER_LOOPER
是进程用户态在处理完驱动发给它的BR_SPAWN_LOOPER
命令后发给内核Binder驱动的 。处理BR_SPAWN_LOOPER
用户进程会创建新线程,调用IPCThreadState::joinThreadPool
。至于内核Binder驱动的binder_thread_read
何时会发出BR_SPAWN_LOOPER
命令,我们等到本篇后面章节中分析binder_thread_read
的时候再说明。 -
BC_ENTER_LOOPER
,该命令在进程用户态的两个接口中会发出,一个是主线程调用IPCThreadState::joinThreadPool
;另一个是IPCThreadState::setupPolling
(如system/core/healthd/healthd_mode_android.cpp
)。 -
BC_EXIT_LOOPER
: 退出binder循环状态。在进程用户态的IPCThreadState::joinThreadPool
函数中退出while
循环处理binder请求时发出。
所以这三个命令就是跟Binder驱动同步线程状态的命令,Binder驱动获知相应状态后,将其更新到thread->looper
中。
Binder"死亡"相关的几个命令:BC_REQUEST_DEATH_NOTIFICATION
,BC_CLEAR_DEATH_NOTIFICATION
,BC_DEAD_BINDER_DONE
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
/*这两个命令是Binder客户端(BpBinder)才会发出的*/
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
/*读取客户端的句柄(handle),以便查找到对应的binder_ref*/
if (get_user(**target**, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
/*读取BpBinder对象的地址*/
if (get_user(**cookie**, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
/*根据读取到handle在proc->refs_by_desc红黑树中查找对应的binder_ref*/
**ref = binder_get_ref(proc, target);**
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
break;
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
(u64)cookie, ref->debug_id, ref->desc,
ref->strong, ref->weak, ref->node->debug_id);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
/*之前已经注册过了*/
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
break;
}
/*创建一个binder_ref_death对象*/
death = kzalloc(sizeof(*death), GFP_KERNEL);
if (death == NULL) {
thread->return_error = BR_ERROR;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
proc->pid, thread->pid);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
INIT_LIST_HEAD(&death->work.entry);
death->cookie = cookie; /*保存要通知的BpBinder*/
ref->death = death;
/* binder_deferred_func —> binder_deferred_release->binder_node_release函数中会将proc设为NULL*/
if (ref->node->proc == NULL) {
/* 如果binder service所在的进程已经不在了,说明对应的binder service已经挂啦
*/
ref->death->work.type = **BINDER_WORK_DEAD_BINDER**;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
/*线程已经进入binder循环,则将binder_ref_death通过work域加入线程的todo队列*/
list_add_tail(&ref->death->work.entry, &thread->todo);
} else {
/*否则,将binder_ref_death通过work域加入进程的todo队列*/
list_add_tail(&ref->death->work.entry, &proc->todo);
/*唤醒等待在这个wait队列上的进程*/
wake_up_interruptible(&proc->wait);
}
}
} else {/* BC_CLEAR_DEATH_NOTIFICATION */
if (ref->death == NULL) {/*该binder_ref没有注册binder service死亡通知*/
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
break;
}
death = ref->death;
/*每个BpBinder只能清除自己注册的死亡通知,不能清除别人注册的*/
if (death->cookie != cookie) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
break;
}
ref->death = NULL;
if (list_empty(&death->work.entry)) {/*对应的binder service没有死亡*/
death->work.type = **BINDER_WORK_CLEAR_DEATH_NOTIFICATION**;
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
**list_add_tail(&death->work.entry, &thread->todo);**
} else {
** list_add_tail(&death->work.entry, &proc->todo);**
**wake_up_interruptible(&proc->wait);**
}
} else {/*对应的binder service已经死亡了,此时death通过work链接在proc->delivered_death队列*/
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);/*确保对应的binder service已经挂了*/
death->work.type = **BINDER_WORK_DEAD_BINDER_AND_CLEAR**;
}
}
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
break;
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
break;
}
list_del_init(&death->work.entry);/*从proc->delivered_death链表中取出,然后重新初始化即pre,next都指向自己*/
/*之前客户端给驱动发送了BC_CLEAR_DEATH_NOTIFICATION命令清除死亡通知*/
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;/*修改type*/
/*将death从proc->delivered_death队列移动到thread->todo或者proc->todo队列*/
if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
list_add_tail(&death->work.entry, &thread->todo);
} else {
list_add_tail(&death->work.entry, &proc->todo);
wake_up_interruptible(&proc->wait);
}
}
} break;
/*其他命令,直接打印一条错误消息,返回EINVAL*/
default:
pr_err("%d:%d unknown command %d\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}//end of switch
*consumed = ptr - buffer;
}// end of while
return 0;
}//end of binder_thread_write
- 前两个命令分别是客户端(BpBinder)用于注册和取消服务端(BBinder)死亡通知的。当一个服务端(BBinder或binder service)因正常或者异常情况下退出时,Binder驱动发送
BR_DEAD_BINDER
会告知客户端,以便客户端发“讣告”(sendObituary),执行一些资源清理相关的工作。在准备发送“讣告”前,客户端会调用clearDeathNotification
,发送BC_CLEAR_DEATH_NOTIFICATION
命令给驱动,以注销死亡通知。具体过程代码基本已经注明了,就不再赘述了。有两点需要注意的这里简单说明一下:- 在注册死亡通知,即处理
BC_REQUEST_DEATH_NOTIFICATION
命令时,要先判断服务端是否就已经死亡(通过ref->node->proc == NULL
来判断)。如果服务端已经死亡了,就将death->work.type
设为BINDER_WORK_DEAD_BINDER
,然后看当前线程是否已经进入binder循环,来决定是将该work加入到线程的todo
队列还是进程的todo
队列。 - 在取消死亡通知,即处理
BC_CLEAR_DEATH_NOTIFICATION
命令时,需判断服务端是否已经死亡,这时的判断方法是查看death->work.entry
队列是否为空,因为如果服务端已经死亡,death->work
会加入线程的todo
队列。如果death->work.entry
为空,则将work
的类型设为BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,然后加入线程或者进程的todo
队列;如果如果death->work.entry
不为空,则说明服务端已经死亡,work
已经在todo
队列,不需要加入队列的操作了,但类型需修改为BINDER_WORK_DEAD_BINDER_AND_CLEAR
。
- 在注册死亡通知,即处理
-
BC_DEAD_BINDER_DONE
命令是binder_thread_write
中处理的最后一个命令,它是客户端(BpBinder)在收到Binder驱动发的BR_DEAD_BINDER
命令之后,回复给Binder驱动的命令,用于告知驱动客户端已经处理完服务端死亡相关操作(如:发送“讣告”)。如前所述:在发送“讣告”前,客户端还会先调用clearDeathNotification
发送一个BC_CLEAR_DEATH_NOTIFICATION
命令给Binder驱动,用于清除服务端(binder service)的死亡通知。 -
BC_DEAD_BINDER_DONE
命令在驱动中主要操作就是就death.work.type
修改为BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,并将death从proc->delivered_death
队列移动到thread->todo
或者proc->todo
队列,以便在之后的binder_thread_read
处理todo
队列时,释放death
、回复客户端等操作,具体详见3.2.2.2小节。
下面这个时序图是binder service死亡时,death.work类型及所在队列的流转图:
图中proc
和thread
都是客户端的proc
和thread
。但binder_deferred_func->binder_defrred_release->binder_node_release
,这个调用时发生服务端的内核线程中的。
到这里binder_thread_write
处理涉及的所有命令我们就分析完啦,其他无法识别的命令全都走default
分支,输出一条错误消息后,直接返回-EINVAL
。