今天下午去下棋,好久没下了,都没啥感觉了,不过还是一直胜,因为对手还不够我的等级,哈哈哈哈。周末在家,陪老爸老妈吃午饭,聊聊天,感觉这就是最实在的生活,可惜老婆和臭蛋不在身边,一家人都一起出去逛逛就好了。行了,不多说了,进入我们的正题。
上节我们分析了Binder通信中从Application开始,是如何通过JNI进入到Native层,最后我们来的Kernal的门前,IPCThreadState、ProcessState两个Binder的适配器为我们准备好了所有的工作,下面就会通过 ioctl 系统调用进入到内核当中,我们接着上一节继续前行。IPCThreadState文件中的waitForResponse方法首先调用talkWithDriver来和Binder驱动进行交互。talkWithDriver方法也是实现在IPCThreadState文件中,该文件的路径为 frameworks\native\libs\binder\IPCThreadState.cpp,talkWithDriver方法的源码如下:
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(__ANDROID__)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
我们当前的场景下是调用AMS去启动Activity的,这里的doReceive的值应该为true,表示需要接收数据,查看了老罗的博客上,也是说值为true,但是从逻辑上还是没看明白,waitForResponse方法中调用时没有传值,这里为什么会是true呢?如果有弄懂的朋友,请指教一下。好了,我们继续往下分析,第一句if (mProcess->mDriverFD <= 0)就是检查参数的合法性,mProcess就是一个类型为ProcessState的成员变量,它是在IPCThreadState的构造方法中传入的,当我们的应用进程启动时,会在ProcessState类的构造方法中调用open_driver,传入的const char *driver常量指针就是"/dev/binder",成功打开binder文件节点后,得到的文件描述符就会赋值为ProcessState类的成员变量mDriverFD。参数检查合法后,就定义一个binder_write_read结构体bwr,它是承载我们传入到内核中的数据载体,它也是定义在binder.h头文件中。接着调用bwr.read_buffer = (uintptr_t)mIn.data() 将我们要传入的参数赋值给bwr的成员变量read_buffer,赋值工作完成后,继续判断bwr的write_size、read_size成员变量是否都等于0,如果是,那就说明不需要作什么工作,直接返回NO_ERROR;如果否,则调用ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) 开始和内核通信。
此处的ioctl就会由内核中的binder驱动来处理,binder驱动中定义的操作函数结构体为file_operations binder_fops,定义在binder.c文件中,源码如下:
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
我们要跟踪的就是binder_ioctl函数,它的实现源码如下:
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
mutex_lock(&binder_lock);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
bwr.read_size, bwr.read_buffer);
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_SET_MAX_THREADS:
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}
} else
binder_context_mgr_uid = current->cred->euid;
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION:
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
mutex_unlock(&binder_lock);
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
binder_proc描述的就是当前主动调用的进程信息,cmd指我们从IPCThreadState中执行talkWithDriver时传入的binder命令协议,值为BINDER_WRITE_READ,往下会涉及到非常多的结构体,如果大家想了解清楚这些结构体,可以去 Android 8.0系统源码分析--开篇 博客中下载 《Android系统源代码情景分析【罗升阳著】》电子书,当中有非常详细的讲解。wait_event_interruptible是一个内核函数,它在当条件成立时将当前进程的状态设置成TASK_INTERRUPTIBLE,然后调用schedule(),而schedule()会将位于TASK_INTERRUPTIBLE状态的当前进程从runqueue队列中删除。从runqueue队列中删除的结果是,当前这个进程将不再参与调度,而当唤醒条件满足时,又调用wake_up()重新将当前进程加入到runqueue队列中,然后该进程就可以继续被调度了。我们当前传入的binder命令协议为BINDER_WRITE_READ,所以就进入到第一个case分支中继续处理,调用copy_from_user内核函数将用户空间传递进来的数据拷贝到该case分支中定义的一个临时变量bwr中,如果拷贝失败,则退出执行,在我们当前的场景下,bwr.read_size为0,因为我们从应用层进来,没有需要读取的数据,而所有的调用参数全部封装在bwr.write_size写入参数中了,所以接下来我们重点关注if (bwr.write_size > 0) 分支,这里调用binder_thread_write函数来继续处理。
binder_thread_write函数的实现也是在binder.c文件中,该函数中就是case分支对不同场景下的binder命令协议进行处理,函数代码非常长,在IPCThreadState中调用writeTransactionData方法封装参数时,最后是调用mOut.writeInt32(cmd)来往输出参数中写入cmd命令的,此时写入的命令就是BC_TRANSACTION,表示我们当前要进行binder通信,所以下面我们只关注BC_TRANSACTION分支,源码中该分支的实现也非常清晰,先调用copy_from_user拷贝用户数据,然后调用binder_transaction函数进一步处理。
binder_transaction函数的实现也是在binder.c文件中,该函数就是binder通信中最重要的实现了。该函数的完整源码如下:
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("binder: %d:%d got reply transaction "
"with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction stack,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad target transaction stack %d, "
"expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
} else {
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
target_proc = target_node->proc;
if (target_proc == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("binder: %d:%d got new "
"transaction with bad transaction stack"
", transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
e->debug_id = t->debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d BC_REPLY %d -> %d:%d, "
"data %p-%p size %zd-%zd\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d BC_TRANSACTION %d -> "
"%d - node %d, data %p-%p size %zd-%zd\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offsets size, %zd\n",
proc->pid, thread->pid, tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(void *))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("binder: %d:%d sending u%p "
"node %d, cookie mismatch %p != %p\n",
proc->pid, thread->pid,
fp->binder, node->debug_id,
fp->cookie, node->cookie);
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%p -> ref %d desc %d\n",
node->debug_id, node->ptr, ref->debug_id,
ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction with invalid "
"handle, %ld\n", proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%p\n",
ref->debug_id, ref->desc, ref->node->debug_id,
ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
file = fget(fp->handle);
if (file == NULL) {
binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %ld -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
binder_user_error("binder: %d:%d got transactio"
"n with invalid object type, %lx\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
binder_transaction_buffer_release(target_proc, t->buffer, offp);
t->buffer->transaction = NULL;
binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"binder: %d:%d transaction failed %d, size %zd-%zd\n",
proc->pid, thread->pid, return_error,
tr->data_size, tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
BUG_ON(thread->return_error != BR_OK);
if (in_reply_to) {
thread->return_error = BR_TRANSACTION_COMPLETE;
binder_send_failed_reply(in_reply_to, return_error);
} else
thread->return_error = return_error;
}
大家可以看到,一进入该函数,首先就定义了大量的结构体,这些都是为后续的工作做准备的,我们进行到这里,还是在我们应用进程的线程当中,停下来想想,我们要干什么呢?就是要找到AMS所在进程的binder处理线程,然后封装一个待处理事务t和一个待完成工作项tcomplete,把它添加到AMS所在进程的binder线程中,如果AMS的binder处理线程没有睡眠,那么它就会处理我们的请求,如果是睡眠的,那么唤醒它,因为有事情要作了。好了,搞清楚我们的目标后,我们继续前行。这里的target_proc就是我们要找的目标进程,也就是SystemServer进程,target_thread也就是接收当前binder事务处理的线程了。
首先给局部变量e赋值,e的类型为binder_transaction_log_entry,是记录binder通信日志的,接下来的if/else分支中,因为我们不仅要请求AMS去启动目标Activity,而且还要接收返回结果,这里的reply是指我们调用该方法时,在binder_thread_write方法中的case BC_TRANSACTION分支中对比cmd == BC_REPLY的值,当前的场景下,我们执行的cmd命令为BC_TRANSACTION,所以reply的值为false,也就是说我们还行进在将数据发送给binder驱动的过程中,而等到我们数据发送完毕之后,下次才是等待接收Server端的返回数据,那个时候的reply才会为true。进入else分支,这里又通过判断tr->target.handle的值将下面的逻辑分为if/else,else分支中很清楚就是为了获取ServiceManager为binder实体对象,也就是binder_context_mgr_node,这就是ServiceManager作为binder进程间通信的上下文的特殊所在了,因为它的名柄值为0。当前场景下我们要获取的是IActivityManager.Stub注册在binder驱动中的实体对象,所以它的handle值不为0,进入if分支,先声明一个binder_ref结构体,它是一个binder实体的引用对象,要搞清楚这些结构体的具体含义,请大家到:Android 8.0系统源码分析--开篇 中下载老罗的书,书中对所有结构体有非常详细的解释,理解这些结构体也是我们搞清楚binder通信的关键所在。找到了ActivityManagerService注册在binder驱动程序中的binder引用之后,就可以调用 ref->node获取到binder实体对象了,binder_ref结构体的成员变量node指向的就是一个binder实体对象,它的结构体定义如下:
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
int debug_id;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
uint32_t desc;
int strong;
int weak;
struct binder_ref_death *death;
};
得到了对应的binder实体对象后,我们也就可以找到目标进程target_proc的binder实体了,接下来的if (target_thread) 分支中的逻辑是根据上面寻找最优binder处理线程的结果来给target_list、target_wait赋值,如果上面可以找到当前binder事务处理的最优线程,则指定线程处理,否则后面就会将当前的事务插入到binder实体所在的进程的事务列表中。接着为我们的待处理事务 t 和待完成工作项tcomplete分配内存,分配内存完毕后,就会给待处理事务 t 的各成员变量赋值,t->buffer指的一个binder内核缓冲区,它是在当前进程打开binder驱动时就分配好的,但是会根据使用情况动态增长。然后调用binder_inc_node函数增加当前binder实体的引用计数,避免binder实体被销毁。
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
最后的这几行代码就是给我们的待处理事务 t 和待完成工作项的成员变量type赋值,分别为BINDER_WORK_TRANSACTION、BINDER_WORK_TRANSACTION_COMPLETE,最后检查当前目标进程的target_wait是否为空,target_wait是一个wait_queue_head_t类型的局部变量,如果它不为空,表示当前的队列中有事情需要处理,则调用wake_up_interruptible系统函数唤醒目标进程。
往下的逻辑就要分为两个分支了,一个就是我们当前的调用者,也就是代表Client的应用进程,一个是代表Server的目标进程,也就是SystemServer了。Client进程在完成了把数据通过binder驱动发送给目标进程之后就会返回到IPCThreadState文件中的talkWithDriver函数继续和binder驱动进程交互,看是否还有binder进程间通信数据需要处理;Server进程在被唤醒之后就需要处理它的队列中的事务了。我们先来看一下Client进程的逻辑,然后回过头来再分析Server进程的处理逻辑。
Client进程发送完数据后返回到IPCThreadState文件中的talkWithDriver函数,此时的返回值为NO_ERROR,while条件不成立,退出循环,然后在接下来的if分支中将成员变量mOut、mIn中的数据清除,接着再往上返回到waitForResponse方法中,binder驱动接受了当前进程的binder请求之后,会往成员变量mIn中写入一个BR_TRANSACTION_COMPLETE命令协议,接下来通过break跳出switch/case分支,继续执行while循环,接着又调用talkWithDriver和binder驱动进行交互,此时bwr.write_size为0,但是bwr.read_size不为0,因为它要等待Server端返回请求数据,于是在binder.c文件中的binder_ioctl方法的第一个case BINDER_WRITE_READ分支中,就不会再执行binder_thread_write方法了,而是执行binder_thread_read函数。
binder_thread_read函数也是实现在binder.c文件中,源码如下:
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
if (put_user(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (ptr == end)
goto done;
thread->return_error2 = BR_OK;
}
if (put_user(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
thread->return_error = BR_OK;
goto done;
}
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
mutex_unlock(&binder_lock);
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("binder: %d:%d ERROR: Thread waiting "
"for process work before calling BC_REGISTER_"
"LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
}
mutex_lock(&binder_lock);
if (wait_for_proc_work)
proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"binder: %d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
uint32_t cmd = BR_NOOP;
const char *cmd_name;
int strong = node->internal_strong_refs || node->local_strong_refs;
int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
if (weak && !node->has_weak_ref) {
cmd = BR_INCREFS;
cmd_name = "BR_INCREFS";
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
} else if (strong && !node->has_strong_ref) {
cmd = BR_ACQUIRE;
cmd_name = "BR_ACQUIRE";
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
} else if (!strong && node->has_strong_ref) {
cmd = BR_RELEASE;
cmd_name = "BR_RELEASE";
node->has_strong_ref = 0;
} else if (!weak && node->has_weak_ref) {
cmd = BR_DECREFS;
cmd_name = "BR_DECREFS";
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(node->ptr, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
if (put_user(node->cookie, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,
"binder: %d:%d %s %d u%p c%p\n",
proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"binder: %d:%d node %d u%p c%p deleted\n",
proc->pid, thread->pid, node->debug_id,
node->ptr, node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"binder: %d:%d node %d u%p c%p state unchanged\n",
proc->pid, thread->pid, node->debug_id, node->ptr,
node->cookie);
}
}
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(death->cookie, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"binder: %d:%d %s %p\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
death->cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
list_del(&w->entry);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} else
list_move(&w->entry, &proc->delivered_death);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
}
if (!t)
continue;
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
current->nsproxy->pid_ns);
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data +
proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d %s %d %d:%d, cmd %d"
"size %zd-%zd ptr %p-%p\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
tr.data.ptr.buffer, tr.data.ptr.offsets);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_debug(BINDER_DEBUG_THREADS,
"binder: %d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
}
return 0;
}
当前的consumed为0,于是写入一个值BR_NOOP命令到参数ptr指向的缓冲区中去,即用户传进来的bwr.read_buffer缓冲区。thread代表执行当前binder进程调用的Client端的线程,当前场景下,thread->transaction_stack == NULL,而它的thread->todo不为空,因为我们还在等待前面返回的调用结果,于是wait_for_proc_work为false,表示我们现在还在处理自己的事情,不能去查看当前线程所属的进程proc中是否有事务需要处理。于是进入else分支,这里的non_block表示打开的文件描述符是否为非阻塞模式,打开文件描述符是在ProcessState.cpp文件的open_driver方法中,ProcessState.cpp文件的路径为frameworks\native\libs\binder\ProcessState.cpp,open_driver方法的源码如下:
static int open_driver(const char *driver)
{
int fd = open(driver, O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
close(fd);
fd = -1;
}
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '%s' failed: %s\n", driver, strerror(errno));
}
return fd;
}
可以看到,并未指定以非阻塞方式打开,所以non_block的值为false,最后就进入else分支中的ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread))逻辑,当前线程也就睡眠在这里,等待Server处理完成后将结果返回回来时来唤醒它了。
我们再回头来看一下Server进程被唤醒后的逻辑。binder驱动程序在binder_transaction方法中的构建好待处理事务 t 和待完成项tcomplete数据之后,将它添加到目标进程的任务队列当中,最后通过wake_up_interruptible方法将目标进程唤醒,假设我们的目标进程正在binder_thread_read方法中的wait_event_interruptible_exclusive系统调用中睡眠,当它被内核唤醒后,就会继续执行,去检查它的任务队列,然后取出其中的事务进行处理。
上面我们已经看到,最后Client进程添加到Server进程中的待处理事务 t 的type类型为BINDER_WORK_TRANSACTION,于是在while循环中,通过w = list_first_entry(&thread->todo, struct binder_work, entry)取出当前thread的待处理事务的todo列表的第一个元素,赋值给binder_work类型的局部变量指针 *w。接着调用container_of 方法获取到待处理事务 t 的指针地址。container_of是一个Linux内核宏,有三个参数, ptr是成员变量的指针, type是指结构体的类型, member是成员变量的名字。 container_of 的作用就是在已知某一个成员变量的名字、指针和结构体类型的情况下,计算结构体的指针,也就是计算结构体的起始地址。 计算的方法其实很简单,就是用该成员变量的指针减去它相对于结构体起始位置的偏移量。接下来就是将待处理事务 t 中的数据拷贝到binder_transaction_data类型的局部变量 tr 中了。老罗的博客:Android系统进程间通信(IPC)机制Binder中的Server启动过程源代码分析 中对这里有非常高的评价,这里的两行代码就是Binder进程间通信的精髓所在。
这块的代码我也反复看过多次,但是还是未能理解其中的深意,感觉硬功夫跟老罗还是差的很远啊!!!
下面的逻辑就是将tr的数据拷贝到用户传进来的缓冲区中去,由于待处理事务 t 已经被处理了,所以就要调用list_del(&t->work.entry)将它从队列中删除。这里的cmd是在上面的if分支最后被赋值为BR_TRANSACTION的,并且 (t->flags & TF_ONE_WAY)为false,就是说当前是同步通信,所以if判断为true,于是调用thread->transaction_stack = t;,把待处理事务 t 放在当前thread的头部,最后通过break跳出while循环。
回到binder_ioctl方法中,先调用if (!list_empty(&proc->todo))查看当前进程的todo队列中是否为空,如果不为空,则唤醒当前进程,最后调用copy_to_user系统函数将bwr中的数据拷贝到用户空间中的缓冲区了。
回到Server端的IPCThreadState类的talkWithDriver方法中,返回结果也是NO_ERROR,跳出do/while循环,然后将bwr中的数据读取到成员变量mIn中,再往上返回到waitForResponse方法中,从我们前面的分析过程可以看到,binder_thread_read方法中最后赋值给待处理事务的成员变量的cmd命令为BR_TRANSACTION,所以执行最后一个default分支,调用executeCommand来继续响应Client端的请求。
executeCommand方法的实现在IPCThreadState.cpp文件中,该文件路径为frameworks\native\libs\binder\IPCThreadState.cpp,executeCommand方法的源码如下:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
case BR_OK:
break;
case BR_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
obj->incStrong(mProcess.get());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_ACQUIRE from driver on %p", obj);
obj->printRefs();
}
mOut.writeInt32(BC_ACQUIRE_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_RELEASE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
ALOG_ASSERT(refs->refBase() == obj,
"BR_RELEASE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
IF_LOG_REMOTEREFS() {
LOG_REMOTEREFS("BR_RELEASE from driver on %p", obj);
obj->printRefs();
}
mPendingStrongDerefs.push(obj);
break;
case BR_INCREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
refs->incWeak(mProcess.get());
mOut.writeInt32(BC_INCREFS_DONE);
mOut.writePointer((uintptr_t)refs);
mOut.writePointer((uintptr_t)obj);
break;
case BR_DECREFS:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
// NOTE: This assertion is not valid, because the object may no
// longer exist (thus the (BBinder*)cast above resulting in a different
// memory address).
//ALOG_ASSERT(refs->refBase() == obj,
// "BR_DECREFS: object %p does not match cookie %p (expected %p)",
// refs, obj, refs->refBase());
mPendingWeakDerefs.push(refs);
break;
case BR_ATTEMPT_ACQUIRE:
refs = (RefBase::weakref_type*)mIn.readPointer();
obj = (BBinder*)mIn.readPointer();
{
const bool success = refs->attemptIncStrong(mProcess.get());
ALOG_ASSERT(success && refs->refBase() == obj,
"BR_ATTEMPT_ACQUIRE: object %p does not match cookie %p (expected %p)",
refs, obj, refs->refBase());
mOut.writeInt32(BC_ACQUIRE_RESULT);
mOut.writeInt32((int32_t)success);
}
break;
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
Parcel reply;
status_t error;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_TRANSACTION thr " << (void*)pthread_self()
<< " / obj " << tr.target.ptr << " / code "
<< TypeCode(tr.code) << ": " << indent << buffer
<< dedent << endl
<< "Data addr = "
<< reinterpret_cast(tr.data.ptr.buffer)
<< ", offsets addr="
<< reinterpret_cast(tr.data.ptr.offsets) << endl;
}
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
// mCallingPid, origPid, origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
case BR_CLEAR_DEATH_NOTIFICATION_DONE:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->getWeakRefs()->decWeak(proxy);
} break;
case BR_FINISHED:
result = TIMED_OUT;
break;
case BR_NOOP:
break;
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
ALOGE("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
if (result != NO_ERROR) {
mLastError = result;
}
return result;
}
我们来看BR_TRANSACTION分支,首先定义一个binder_transaction_data类型的局部变量 tr,从成员变量mIn中将数据读取出来,if (tr.target.ptr) 分支成立,因为我们当前的ptr对象就是我们要找的ActivityManagerService,而不是ServiceManager,于是接着将tr.target.ptr强转为一个BBinder对象,并调用它的transact方法进一步处理。处理完成后判断if ((tr.flags & TF_ONE_WAY) == 0) 是否成立,也就是当前的调用是否是同步调用,如果是则执行sendReply将结果返回给客户端,如果否则什么也不作了。
BBinder的路径为frameworks\native\libs\binder\Binder.cpp,它的transact方法的源码如下:
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
reply->writeInt32(pingBinder());
break;
default:
err = onTransact(code, data, reply, flags);
break;
}
if (reply != NULL) {
reply->setDataPosition(0);
}
return err;
}
该方法的逻辑很简单,就是直接调用onTransact来进一步处理。这里一定要注意,Server端的BBinder实际上是一个JavaBBinder对象,它是在当前的Service服务进程启动的时候,调用JavaBBinderHolder类的get方法时构建的,JavaBBinder对象定义在frameworks\base\core\jni\android_util_Binder.cpp文件中,onTransact方法的源码如下:
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
{
JNIEnv* env = javavm_to_jnienv(mVM);
ALOGV("onTransact() on %p calling object %p in env %p vm %p\n", this, mObject, env, mVM);
IPCThreadState* thread_state = IPCThreadState::self();
const int32_t strict_policy_before = thread_state->getStrictModePolicy();
//printf("Transact from %p to Java code sending: ", this);
//data.print();
//printf("\n");
jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
code, reinterpret_cast(&data), reinterpret_cast(reply), flags);
if (env->ExceptionCheck()) {
jthrowable excep = env->ExceptionOccurred();
report_exception(env, excep,
"*** Uncaught remote exception! "
"(Exceptions are not yet supported across processes.)");
res = JNI_FALSE;
/* clean up JNI local ref -- we don't return to Java code */
env->DeleteLocalRef(excep);
}
// Check if the strict mode state changed while processing the
// call. The Binder state will be restored by the underlying
// Binder system in IPCThreadState, however we need to take care
// of the parallel Java state as well.
if (thread_state->getStrictModePolicy() != strict_policy_before) {
set_dalvik_blockguard_policy(env, strict_policy_before);
}
if (env->ExceptionCheck()) {
jthrowable excep = env->ExceptionOccurred();
report_exception(env, excep,
"*** Uncaught exception in onBinderStrictModePolicyChange");
/* clean up JNI local ref -- we don't return to Java code */
env->DeleteLocalRef(excep);
}
// Need to always call through the native implementation of
// SYSPROPS_TRANSACTION.
if (code == SYSPROPS_TRANSACTION) {
BBinder::onTransact(code, data, reply, flags);
}
//aout << "onTransact to Java code; result=" << res << endl
// << "Transact from " << this << " to Java code returning "
// << reply << ": " << *reply << endl;
return res != JNI_FALSE ? NO_ERROR : UNKNOWN_TRANSACTION;
}
该方法中最重要的就是调用env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact, code, reinterpret_caststatic inline jmethodID GetMethodIDOrDie(JNIEnv* env, jclass clazz, const char* method_name,
const char* method_signature) {
jmethodID res = env->GetMethodID(clazz, method_name, method_signature);
LOG_ALWAYS_FATAL_IF(res == NULL, "Unable to find method %s", method_name);
return res;
}
获取的实际的Server对象的execTransact方法ID后,就会调用env来执行该Server对象的该方法,这里的JNi是从native层调用的Java层,execTransact方法的实现在Binder.java文件中,该文件的路径为frameworks\base\core\java\android\os\Binder.java,execTransact方法的源码如下:
// Entry point from android_util_Binder.cpp's onTransact
private boolean execTransact(int code, long dataObj, long replyObj,
int flags) {
Parcel data = Parcel.obtain(dataObj);
Parcel reply = Parcel.obtain(replyObj);
// theoretically, we should call transact, which will call onTransact,
// but all that does is rewind it, and we just got these from an IPC,
// so we'll just call it directly.
boolean res;
// Log any exceptions as warnings, don't silently suppress them.
// If the call was FLAG_ONEWAY then these exceptions disappear into the ether.
final boolean tracingEnabled = Binder.isTracingEnabled();
try {
if (tracingEnabled) {
Trace.traceBegin(Trace.TRACE_TAG_ALWAYS, getClass().getName() + ":" + code);
}
res = onTransact(code, data, reply, flags);
} catch (RemoteException|RuntimeException e) {
if (LOG_RUNTIME_EXCEPTION) {
Log.w(TAG, "Caught a RuntimeException from the binder stub implementation.", e);
}
if ((flags & FLAG_ONEWAY) != 0) {
if (e instanceof RemoteException) {
Log.w(TAG, "Binder call failed.", e);
} else {
Log.w(TAG, "Caught a RuntimeException from the binder stub implementation.", e);
}
} else {
reply.setDataPosition(0);
reply.writeException(e);
}
res = true;
} catch (OutOfMemoryError e) {
// Unconditionally log this, since this is generally unrecoverable.
Log.e(TAG, "Caught an OutOfMemoryError from the binder stub implementation.", e);
RuntimeException re = new RuntimeException("Out of memory", e);
reply.setDataPosition(0);
reply.writeException(re);
res = true;
} finally {
if (tracingEnabled) {
Trace.traceEnd(Trace.TRACE_TAG_ALWAYS);
}
}
checkParcel(this, code, reply, "Unreasonably large binder reply buffer");
reply.recycle();
data.recycle();
// Just in case -- we are done with the IPC, so there should be no more strict
// mode violations that have gathered for this thread. Either they have been
// parceled and are now in transport off to the caller, or we are returning back
// to the main transaction loop to wait for another incoming transaction. Either
// way, strict mode begone!
StrictMode.clearGatheredViolations();
return res;
}
从该方法最前面的注释也可以看到,它不是提供给该类内部调用的,而是从native层调用上来的,首先将binder驱动中的传入的数据拷贝到Java层,然后调用onTransact方法继续处理,onTransact方法就是由aidl文件生成的Stub类重写的,我们还是以上节举例的ICameraService.aidl来看一下该方法的实现,ICameraService.Stub类的完整源码如下:
public static abstract class Stub extends android.os.Binder implements com.huawei.cameraservice.ICameraService {
private static final java.lang.String DESCRIPTOR = "com.huawei.cameraservice.ICameraService";
/**
* Construct the stub at attach it to the interface.
*/
public Stub() {
this.attachInterface(this, DESCRIPTOR);
}
/**
* Cast an IBinder object into an com.huawei.cameraservice.ICameraService interface,
* generating a proxy if needed.
*/
public static com.huawei.cameraservice.ICameraService asInterface(android.os.IBinder obj) {
if ((obj == null)) {
return null;
}
android.os.IInterface iin = obj.queryLocalInterface(DESCRIPTOR);
if (((iin != null) && (iin instanceof com.huawei.cameraservice.ICameraService))) {
return ((com.huawei.cameraservice.ICameraService) iin);
}
return new com.huawei.cameraservice.ICameraService.Stub.Proxy(obj);
}
@Override
public android.os.IBinder asBinder() {
return this;
}
@Override
public boolean onTransact(int code, android.os.Parcel data, android.os.Parcel reply, int flags) throws android.os.RemoteException {
switch (code) {
case INTERFACE_TRANSACTION: {
reply.writeString(DESCRIPTOR);
return true;
}
case TRANSACTION_getMainCameraId: {
data.enforceInterface(DESCRIPTOR);
int _result = this.getMainCameraId();
reply.writeNoException();
reply.writeInt(_result);
return true;
}
case TRANSACTION_openCamera: {
data.enforceInterface(DESCRIPTOR);
int _arg0;
_arg0 = data.readInt();
int _result = this.openCamera(_arg0);
reply.writeNoException();
reply.writeInt(_result);
return true;
}
}
return super.onTransact(code, data, reply, flags);
}
private static class Proxy implements com.huawei.cameraservice.ICameraService {
private android.os.IBinder mRemote;
Proxy(android.os.IBinder remote) {
mRemote = remote;
}
@Override
public android.os.IBinder asBinder() {
return mRemote;
}
public java.lang.String getInterfaceDescriptor() {
return DESCRIPTOR;
}
@Override
public int getMainCameraId() throws android.os.RemoteException {
android.os.Parcel _data = android.os.Parcel.obtain();
android.os.Parcel _reply = android.os.Parcel.obtain();
int _result;
try {
_data.writeInterfaceToken(DESCRIPTOR);
mRemote.transact(Stub.TRANSACTION_getMainCameraId, _data, _reply, 0);
_reply.readException();
_result = _reply.readInt();
} finally {
_reply.recycle();
_data.recycle();
}
return _result;
}
@Override
public int openCamera(int id) throws android.os.RemoteException {
android.os.Parcel _data = android.os.Parcel.obtain();
android.os.Parcel _reply = android.os.Parcel.obtain();
int _result;
try {
_data.writeInterfaceToken(DESCRIPTOR);
_data.writeInt(id);
mRemote.transact(Stub.TRANSACTION_openCamera, _data, _reply, 0);
_reply.readException();
_result = _reply.readInt();
} finally {
_reply.recycle();
_data.recycle();
}
return _result;
}
}
static final int TRANSACTION_getMainCameraId = (android.os.IBinder.FIRST_CALL_TRANSACTION + 0);
static final int TRANSACTION_openCamera = (android.os.IBinder.FIRST_CALL_TRANSACTION + 1);
}
当前的code值就是我们在Client端开始执行binder调用之前传入的,它是以android.os.IBinder.FIRST_CALL_TRANSACTION为基数按照aidl中定义的方法次序递增的,按照我们当前的场景,实际上的实现就是ActivityManagerService类的startActivity方法了。
转了一大圈,终于来到我们的Server服务端了。
看了这么多源码,再回头看看老罗的博客,感觉差别真是非常的大,中间还有很多逻辑没有涉及到,比如Client端的数据在中间是如何传输的,是怎么封装的,还有就是老罗所说的binder通信的精华那个地址计算的过程,看来还要继续努力啊!!!
夜深了,今天就到这吧,后面我们继续跟着老罗的步子继续学习!!!