Binder驱动的属于Binder 框架的最核心,无论是应用层的Binder Client 还是Binder Server,或者是Service Manager都是它的Client.
Binder驱动的源码位于:
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
和其他字符驱动一样,binder驱动也提供了实现了一套file_operations, 接下来逐项分析这些函数
binder_ioctl完成了几乎所有的数据交互活动。因为涉及操作较多(读写),分析篇幅比较长
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;//获取通过open操作穿件的proc对象
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd); //获取cmd的数据长度,及arg中有效数据的长度
void __user *ubuf = (void __user *)arg;//__user的意思是地址空间来自应用层进程地址空间(低地址),在内核地址空间是处于高地址
binder_lock(__func__);
thread = binder_get_thread(proc); //获取一个可用的binder_thread对象,表示当前操作的线程,若没有则创建并返回。
//依据cmd来判断操作的类型
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS:
break;
case BINDER_SET_CONTEXT_MGR:
break;
case BINDER_THREAD_EXIT:
break;
case BINDER_VERSION: {
break;
}
default:
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
binder_unlock(__func__);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}
这里涉及到一个操作wait_event_interruptible
,该操作的作用是阻塞当前task(对应于应用层的进程/线程)直到条件满足。在binder
驱动中的同步操作就是靠这种机制来完成
binder_thread对象的创建,对应的是应用层的thread
binder 命令的组成比较有意思,该幻数包括几个分量: dir| type | nr | size
对于命令BINDER_WRITE_READ交给函数binder_ioctl_write_read来完成,注意该函数的参数:穿入的参数分别是filp, 应用层穿入的cmd, arg,
以及刚刚创建的代表调用该ioctl的线程的对象binder_thread thread.
static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);//从cmd幻数中取出data size(arg代表的长度)
void __user *ubuf = (void __user *)arg;//arg是指向应用进程的地址,地址所指向内容的长度有cmd幻数中的data size来指定
struct binder_write_read bwr;//应用层调用ioctl的时候会把数据组织成binder_write_read的结构
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
//binder_ioctl_write_read根据 struct binder_write_read的read_size 或者是 write_size分别完成对应的工作
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
//根据binder_thread_write的结果返回给应用层,
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);//如果是读操作,则阻塞值条件满足
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
binder_ioctl的读写操作由 binder_ioctl_write_read
完成; binder_ioctl_write_read函数根据用户层穿入的 arg
即 struct binder_write_read来决定是
是写操作还是读操作,然后分别调用binder_thread_write
/binder_thread_read
struct binder_write_read {
binder_size_t write_size; /* bytes to write */
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};
可以看到该结构体属于固定长度的, 分辨用 size /consume/buffer来描述信息,从这些信息可以猜测 write_buffer/read_buffer是 __user
即应用空间地址指
针
根据 binder_thread_read/ binder_thread_write的执行结果, 依然是修改arg中的内容,然后copy_to_user
返回给应用层
注意读
操作线程的阻塞试在binder_ioctl_write_read中
注意binder_thread_write的参数别是:代表当前进程的binder_proc, 代表当前线程的binder_thread, 以及通过arg
参数转换为 struct binder_write_read
结构中的描述内容的三个变量: binder_write_read.write_size/ binder_write_read.write_consumed/ binder_write_read.write_buffer, 以及是否是阻塞写
由于这个篇幅比较长,删除一些不影响这次分析的内容
static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) {
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;//不出意料binder_buffer寄对应上 struct binder_write_read.write_buffer
void __user *ptr = buffer + *consumed; //偏移到正确的位置,write操作的开始位置
void __user *end = buffer + size; //end指向write操作的最终位置
//到这里可以知道write操作的数据有ptr开始到end, 而且这些指针都是指向应用进程地址空间的
while (ptr < end && thread->return_error == BR_OK) {
//这里的get_user操作,从技术的角度上市从引用进程地址空间拿到 sizeo(*ptr)大小的字节的内容,并存储到cmd中
//从语义的角度上说,ptr和end框定的buffer的前四个字节的内容的含义是cmd (区别 binder_ioctl使用的cmd, 但是其幻数构成方式还是一样的)
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
// _IOC_NR是一个宏,方便拿出cmd幻数中的序号。序号的最大值寄代表cmd的数量
//注意这些cmd都由binder_stat的东西来描述,
//全局的binder_stats/binder_proc的stats/binder_thread的stats描述的内容是一致的
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
//由下面的处理case方式可以知道,这些cmd是可以进行分类的
switch (cmd) {
//分类1
case BC_INCREFS:
case BC_ACQUIRE:
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
struct binder_ref *ref;
const char *debug_string;
//ptr和end框定的buffer中,紧跟着cmd的四个字节是target, 描述的通讯的目标对象
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
//target是一个句柄(handle), 0 代表的是特殊的句柄,指向的是ServiceManager对象,按照官方属于其实context manager
//其实其作用作用是名称空间转换, 比如以你给它字符串"com.mediaplayer" 描述一个服务,然后它会返回给你一个句柄代表这个server
//binder驱动依据这个handle来在其维护的binder_proc树中找到对应的该服务的binder_proc
if (target == 0 && binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
ref = binder_get_ref_for_node(proc, binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
ref->desc);
}
} else
ref = binder_get_ref(proc, target);
//到这里 ref就是保存的就是就是target对应的对象(服务/服务代理)binder_ref
//BC_INCREFS/BC_ACQUIRE/BC_RELEASE/BC_DECREFS 操作都是属于比较直接,仅仅是引用计数的操作
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
binder_dec_ref(ref, 0);
break;
}
break;
}//分类1 结束
//分类2
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
//可以知道ptr和end框架的内存空间紧跟着cmd, target的内容是binder_transaction_data
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
} //分类2结束
*consumed = ptr - buffer;
}
return 0;
}
对于cmd 是BC_TRANSACTION/Reply的操作时(即数据的夸进程传输), 即将调用的函数是binder_transaction
对于struct binder_write_read (即用户层传入的arg)的内容的组织形式如下:
struct binder_write_read
一一一一一一一一一 write_buffer
| consumed 0
| ….
| consumed n
一一一一一一一一一 write_consumed (ptr)
| cmd (4Byte)
| target (4Byte)
| struct binder_trasaction_data (sizeof(struct binder_trasaction_data))
| ….
| N bytes
一一一一一一一一一 (end)
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
/* target descriptor of command transaction */
__u32 handle;
/* target descriptor of return transaction */
binder_uintptr_t ptr;
} target;
binder_uintptr_t cookie; /* target object cookie */
__u32 code; /* transaction command */
/* General information about the transaction. */
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
binder_uintptr_t buffer;
/* offsets from buffer to flat_binder_object structs */
binder_uintptr_t offsets;
} ptr;
__u8 buf[8];
} data;
};
从这个结构体可以看出, ptr 到end框定的内容还有其他东西的,这些这些内容应该是有binder_transaction_data.data.ptr.buffer 和 binder_transaction_data.data.ptr.offsets来指定, 具体怎么样使用还需要看binder_transaction
在看这个函数之前需要强调下其参数:binder_proc/binder_thread 以及刚从用户层copy_form_user的 struct binder_transaction_data tr, 以及是否是BC_REPLY
这个函数内容也是巨长,为了分析方便讲删除部分不影响此次分析的内容(这次不分析reply的情况)
static void binder_transaction(struct binder_proc *proc,struct binder_thread *thread, struct binder_transaction_data *tr, int reply) {
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
uint32_t return_error;
if (reply) {
.....
} else {
if (tr->target.handle) { //handle是通讯的目标对象的句柄, binder驱动可以根据这个handle来拿到目标的binder_proc
struct binder_ref *ref;
//拿到通讯目标的binder_ref(可以当做是拿到了binder_proc)
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
//binder_node binder_proc binder_ref的关系几乎可以说是一一对应,拿到其中一个对象便知道其他两个对象
target_node = ref->node;
}
target_proc = target_node->proc;
//确定该函数的功能是利用LSM模块检查通讯的权限
if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
//获取目标通讯对象的binder_thread并保存在target_thread中(只有需要reply才需要执行以下语句)
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}//end of if
if (target_thread) {
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
//t是struct binder_transaction指针
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
//tcomplete的类型是struct binder_work
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
//从以下的debug msg中可以看到,传输的数据放在传入的 struct binder_transaction_data中的data.ptr.buffer中。这点符合之前的分析中的猜测
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size);
//从这可以看出,需要传输的数据将在这里被包装成 struct binder_transaction. 以下语句都是在填充信息
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
//struct binder_transaction.buffer即将存放应用进程传入的数据(需要透传的数据)
//特别需要注意的是buffer的空间分配来自于target_proc的地址空间,实际上对应着其物理页帧
//因此数据的拷贝便是跨进程的数据传输
t->buffer = binder_alloc_buf(target_proc, tr->data_size,tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
//拷贝的内容分data_size内的和offset的内容,分两次拷贝进行拷贝
//拷贝有ptr与end框定的空间剩下的内容1(之前已经分析了三项, cmd, target, struct binder_transaction_data)
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
//拷贝有ptr与end框定的空间剩下的内容2 注意tr->data.ptr.offsets 和 tr->data.ptr.buffer是不一样的
if (copy_from_user(offp, (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
//接下来的for循环处理offset部分的内容: offp指向内容的开始位置, off_end指向内容的最后位置
off_end = (void *)offp + tr->offsets_size;
off_min = 0;
//通过阅读下面的代码可以知道 tr->data.ptr.buffer需要和 tr->data.ptr.offsets配合起来使用
//buffer + offsets[i] 对应这一项struct flat_binder_object
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
(u64)fp->binder, node->debug_id,
(u64)fp->cookie, (u64)node->cookie);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
(u64)ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
file = fget(fp->handle);
if (file == NULL) {
binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file);
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}//end of switch case
} //end of for
//到这里处理完所有的从引用进程空间传递下来的struct flat_binder_object项
if (reply) {
....
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
//把该struct binder_transaction 挂进当前进程的thread的thread_stack中
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
//把这个struct binder_transaction挂进目标通讯对象的target_list中,让目标去处理
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
//唤醒等待队列处理刚解析完的 struct flat_binder_object对象
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
到此已经基本分析完成90%, 已经知道内容是如何夸task传输, 内容的进一步处理还有待分析
N Bytes
的内容就是一序列需要处理的flat_binder_object 序列