addService的过程涉及三个模块:
看了网上很多资料都是以MediaPlayerService为例的,本篇也用它来分析。
frameworks/av/media/mediaserver/main_mediaserver.cpp
int main(int argc __unused, char **argv __unused)
{
signal(SIGPIPE, SIG_IGN);
sp proc(ProcessState::self()); -------------1
sp sm(defaultServiceManager()); -------------2
ALOGI("ServiceManager: %p", sm.get());
InitializeIcuOrDie();
MediaPlayerService::instantiate(); -------------3
ResourceManagerService::instantiate();
registerExtensions();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
void MediaPlayerService::instantiate() {
defaultServiceManager()->addService(
String16("media.player"), new MediaPlayerService()); //这行代码信息量很大
}
随后就调用了BpServiceManager的addService:
virtual status_t addService(const String16& name, const sp& service,
bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor()); //"android.os.IServiceManager"
data.writeString16(name); //"media.player"
data.writeStrongBinder(service); // MediaPlayerService
data.writeInt32(allowIsolated ? 1 : 0);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply); //code: ADD_SERVICE_TRANSACTION
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
需要注意的一点是BpServiceManager的继承关系,如下
-->IServiceManager-->IInterface
BpServiceManager-->BpInterface
-->BpRefBase: mRemote(BpBinder)
这种继承方式十分巧妙,一方面保证了BpServiceManager作为interface的功能,可以通过它来进行远程接口调用,另一方面保证了BpServiceManager可以拿到SM的对应的远程binder即BpBinder,基于以上两点实现RPC功能。所以:
作为BpBinder::transact(…)的第二个参数,MediaPlayerService类的继承方式同样也很巧妙,如下:
-->IMediaPlayerService
MediaPlayerService-->BnMediaPlayerService->BnInterface
-->BBinder
MediaPlayerService整合了Binder数据结构(BBinder)和MediaPlayerService提供的接口类(IMediaPlayerService)。
总结一下BBinder和BpBinder在数据结构上的套路:
BBinder和BpBinder都继承自IBinder,前者是本地对象,后者是远程对象。
一句话,不管是远程还是本地,都既要对应的IBinder数据结构(找到TA),又要接口(使用TA)。
回到addService,主要做了两件事:
关于Parcel的分析,见binder_context_mgr_node。这里写入的数据为:
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor()); //"android.os.IServiceManager"
data.writeString16(name); //"media.player"
data.writeStrongBinder(service); // MediaPlayerService
data.writeInt32(allowIsolated ? 1 : 0); // 0
数据分两种:
status_t Parcel::writeStrongBinder(const sp& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp& /*proc*/,
const sp& binder, Parcel* out)
{
flat_binder_object obj; ------------------ 1
if (IPCThreadState::self()->backgroundSchedulingDisabled()) {
/* minimum priority for all nodes is nice 0 */
obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
} else {
/* minimum priority for all nodes is MAX_NICE(19) */
obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS;
}
if (binder != NULL) {
IBinder *local = binder->localBinder(); ------------------- 2
if (!local) {
......
} else {
obj.type = BINDER_TYPE_BINDER; -------------------3
obj.binder = reinterpret_cast(local->getWeakRefs());
obj.cookie = (local);
}
} else {
......
}
return finish_flatten_binder(binder, obj, out); --------------------4
}
165 status_t status = IPCThreadState::self()->transact(
166 mHandle, code, data, reply, flags);
mHandle的值为0,是BpBinder在被new的时候的初始化参数,因为是ServiceManager对应的BpBinder,而SM的默认handle就是0。默认flags的值为0,非ONEWAY通信。
接下来调用了IPCThreadState::self()->transact
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL); ----------------------1
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply); ------------------------2
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
IPCThreadState::transact是进入驱动之前应用中的最后一步调用,所以这时候要加上Binder Command(即BC开头的宏)——BC_TRANSACTION。
该函数构造了binder_transaction_data结构体,初始化、保存了binder事物相关的数据,其中也保存了之前的parcel data,然后再次将binder_transaction_data和BC_TRANSACTION写入到IPCThreadState的mOut成员,该成员也是一个Parcel类型数据,在IPCThreadState构造时被初始化。writeTransactionData处理后的数据封装如下图:
分两块理解:
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
bwr.write_consumed = 0;
bwr.read_consumed = 0;
do {
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) --------------2
err = NO_ERROR;
else
err = -errno;
} while (err == -EINTR);
if (err >= NO_ERROR) { ------------------3
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
又构造了一个数据结构binder_write_read,对通信的数据再做一次封装,如下:
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
thread = binder_get_thread(proc); ---------------1
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, &thread); ----------------2
if (ret)
goto err;
break;
该函数负责binder驱动的读写
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread **threadp)
{
int ret = 0;
int thread_pid = (*threadp)->pid;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg; // arg就是userspace的 binder_write_read
struct binder_write_read bwr;
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { // copy到kernel space的binder_write_read
ret = -EFAULT;
goto out;
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, *threadp,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
}
...
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
这里bwr.write_buffer指向的就是userspace的IPCThreadState::mOut,省略了binder_thread_read,最后还有个copy_to_user操作,我看了下代码,对binder_write_read 有影响的只有consumed成员,所以相当于更新了userspace的consumed成员。直接看binder_thread_write:
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer; //对应userspace的IPCThreadState::mOut
void __user *ptr = buffer + *consumed; //获取有效数据
void __user *end = buffer + size;
// mOut中可能存在着多个CMD+ binder_transaction_data的组合,放在一个while循环中处理
while (ptr < end && thread->return_error.cmd == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr)) // cmd 即BC_TRANSACTION
return -EFAULT;
ptr += sizeof(uint32_t); // 更新数据指针
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr))) //copy完CMD,再copy binder_transaction_data
return -EFAULT;
ptr += sizeof(tr); //再次更新数据指针
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
*consumed = ptr - buffer // ptr是已处理数据指针,减去数据起始地址,即已消耗/处理的数据
return 0;
}
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end, *off_start;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct binder_ref *target_ref = NULL;
struct binder_context *context = proc->context;
if (reply) {
...
} else {
if (tr->target.handle) {
...
} else {
target_node = context->binder_context_mgr_node;
}
target_proc = target_node->proc;
addService是与SM通信,所以handle为0,直接拿到binder_context_mgr_node。通过binder_node再拿到对应的bind_proc。
binder_proc_lock(thread->proc, __LINE__);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error(...);
...
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
binder_proc_unlock(thread->proc, __LINE__);
}
这一段代码很值得研究,是为了实现binder线程的复用,防止binder线程循环传输时,创建不必要的多余线程。详细分析见TODO
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL); ----------1
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); -----------2
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread; -------------3
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk); ----------------4
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
if (!(t->flags & TF_ONE_WAY)) {
t->priority.sched_policy = current->policy;
t->priority.prio = current->normal_prio;
} else {
/* Oneway transactions run at default priority of the target */
t->priority = target_proc->default_priority;
}
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, --------5
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
if (target_node) {
binder_inc_node(target_node, 1, 0, NULL); ------------------6
if (target_ref)
binder_put_ref(target_ref);
target_ref = NULL;
}
off_start = (binder_size_t *)(t->buffer->data + ----------1
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) -----------2
tr->data.ptr.buffer, tr->data_size)) {
...
}
if (copy_from_user(offp, (const void __user *)(uintptr_t) ---------3
tr->data.ptr.offsets, tr->offsets_size)) {
...
}
off_end = (void *)off_start + tr->offsets_size; ----------3
sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
sg_buf_end = sg_bufp + extra_buffers_size;
off_min = 0;
这时候再看1,off_start是数组的起始地址,表示该数组就放在t->buffer->data + tr->data_size之后,如下:
for (; offp < off_end; offp++) {
struct binder_object_header *hdr;
size_t object_size = binder_validate_object(t->buffer, *offp);
if (object_size == 0 || *offp < off_min) {
binder_user_error(...);
}
hdr = (struct binder_object_header *)(t->buffer->data + *offp); -------------1
off_min = *offp + object_size;
switch (hdr->type) {
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr); ---------------------1
ret = binder_translate_binder(fp, t, thread); ---------------2
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
...
} break;
case BINDER_TYPE_FD: {
...
} break;
......
default:
...
}
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
struct binder_node *node;
struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
node = binder_get_node(proc, fp->binder); ------------1
if (!node) {
s8 priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
int sched_policy =
(fp->flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node = binder_new_node(proc, fp->binder, fp->cookie); ------------2
if (!node)
return -ENOMEM;
binder_proc_lock(node->proc, __LINE__);
node->sched_policy = sched_policy;
node->min_priority = to_kernel_prio(sched_policy, priority);
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS)
binder_proc_unlock(node->proc, __LINE__);
}
ref = binder_get_ref_for_node(target_proc, node, &thread->todo); ------------3
if (!ref) {
binder_put_node(node);
return -EINVAL;
}
if (fp->hdr.type == BINDER_TYPE_BINDER) -------------------4
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0; -----------------------5
fp->handle = ref->desc;
fp->cookie = 0;
binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
trace_binder_transaction_node_to_ref(t, node, ref);
binder_put_ref(ref);
binder_put_node(node);
return 0;
}
t->work.type = BINDER_WORK_TRANSACTION; ----------1
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; ----------2
binder_enqueue_work(tcomplete, &thread->todo, __LINE__); --------3
oneway = !!(t->flags & TF_ONE_WAY);
if (reply) {
...
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_proc_lock(thread->proc, __LINE__);
t->need_reply = 1; ---------------4
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_proc_unlock(thread->proc, __LINE__);
binder_proc_lock(target_proc, __LINE__);
binder_proc_transaction(t, target_proc, target_thread);
binder_proc_unlock(target_proc, __LINE__);
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
binder_proc_lock(target_node->proc, __LINE__);
/*
* Test/set of has_async_transaction
* must be atomic with enqueue on
* async_todo
*/
if (target_node->has_async_transaction) {
binder_enqueue_work(&t->work, &target_node->async_todo,
__LINE__);
} else {
target_node->has_async_transaction = 1;
binder_proc_transaction(t, target_proc, NULL);
}
binder_proc_unlock(target_node->proc, __LINE__);
}
return;
}
static void binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
struct binder_worklist *target_list = NULL;
wait_queue_head_t *target_wait = NULL;
bool oneway = !!(t->flags & TF_ONE_WAY);
if (!thread) {
thread = binder_select_thread(proc); -----------------1
}
if (thread) { ------------2
target_list = &thread->todo;
target_wait = &thread->wait;
binder_transaction_priority(thread->task, t,
t->buffer->target_node);
} else {
target_list = &proc->todo;
}
binder_enqueue_work(&t->work, target_list, __LINE__); -------3
binder_wakeup_thread(proc, thread, !oneway /* sync */); --------4
}
SM启动后会进入binder_loop,死循环调用ioctl发送BINDER_WRITE_READ命令,然后从driver读数据,并用binder_parse进行处理。这期间可能睡眠,请求端有需求是会将其唤醒。分两块:
跟之前一样,binder_ioctl_write_read先把userspace的binder_write_read copy到kernel space。参数non_block为false,可以看ProcessState::open_driver打开/dev/binder时的参数。
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread **threadp,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
struct binder_thread *thread = *threadp;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
struct binder_worklist *wlist = NULL;
int ret = 0;
bool wait_for_proc_work;
if (*consumed == 0) { ----------------1
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
binder_proc_lock(proc, __LINE__);
wait_for_proc_work = binder_available_for_proc_work(thread); --------2
binder_proc_unlock(proc, __LINE__);
if (wait_for_proc_work)
atomic_inc(&proc->ready_threads);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
!binder_worklist_empty(&thread->todo));
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work) {
BUG_ON(!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)));
binder_set_priority(current, proc->default_priority,
true /* restore */);
}
if (non_block) {
if (!binder_has_work(thread, wait_for_proc_work))
ret = -EAGAIN;
} else {
ret = binder_wait_for_work(thread, wait_for_proc_work); --------------3
if (wait_for_proc_work)
atomic_dec(&proc->ready_threads);
if (ret)
return ret;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_transaction *t = NULL;
struct binder_work *w = NULL;
wlist = NULL;
binder_proc_lock(thread->proc, __LINE__);
spin_lock(&thread->todo.lock);
if (!_binder_worklist_empty(&thread->todo)) { -------------1
w = list_first_entry(&thread->todo.list,
struct binder_work,
entry);
wlist = &thread->todo;
binder_freeze_worklist(wlist);
}
spin_unlock(&thread->todo.lock);
if (!w) {
spin_lock(&proc->todo.lock);
if (!_binder_worklist_empty(&proc->todo) &&
wait_for_proc_work) {
w = list_first_entry(&proc->todo.list,
struct binder_work,
entry);
wlist = &proc->todo;
binder_freeze_worklist(wlist);
}
spin_unlock(&proc->todo.lock);
if (!w) {
binder_proc_unlock(thread->proc, __LINE__);
/* no data added */
if (ptr - buffer == 4 &&
!READ_ONCE(thread->looper_need_return))
goto retry;
break;
}
}
binder_proc_unlock(thread->proc, __LINE__);
if (end - ptr < sizeof(tr) + 4) {
if (wlist)
binder_unfreeze_worklist(wlist);
break;
}
switch (w->type) {
case BINDER_WORK_TRANSACTION: { -----------------2
t = container_of(w, struct binder_transaction, work);
} break;
if (t->buffer->target_node) { -----------------------1
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr; ------------------2
tr.cookie = target_node->cookie;
/* Don't need a lock to check set_priority_called, since
* the lock was held when pulling t of the workqueue,
* and it hasn't changed since then
*/
if (!t->set_priority_called)
binder_transaction_priority(current, t,
target_node);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code; //ADD_SERVICE_TRANSACTION
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr)) { ---------------3
binder_unfreeze_worklist(wlist);
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) { ----------------4
binder_unfreeze_worklist(wlist);
return -EFAULT;
}
ptr += sizeof(tr);
done:
*consumed = ptr - buffer; //更新consumed的值
binder_proc_lock(thread->proc, __LINE__);
if (proc->requested_threads + -----------------------1
atomic_read(&proc->ready_threads) == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
/* the user-space code fails to */
/* spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_proc_unlock(thread->proc, __LINE__);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) ------------2
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
} else
binder_proc_unlock(thread->proc, __LINE__);
return 0;
}
1.binder_porc中线程相关字段的含义:
解析从binder driver读取的数据
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func) // func == svcmgr_handler
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) { ------------1
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
...
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn); ----------2
res = func(bs, txn, &msg, &reply); ----------3
if (txn->flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); ----------4
}
}
ptr += sizeof(*txn);
break;
}
...
return r;
void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
{
bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer; //Parcel data的地址
bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets; // flat_binder_object在Parcel data中的偏移
bio->data_avail = txn->data_size; //Parcel data大小
bio->offs_avail = txn->offsets_size / sizeof(size_t); //offsets_size应该是flat_binder_object的数量,除以sizeof(size_t)没看懂
bio->flags = BIO_F_SHARED;
}
s = bio_get_string16(msg, &len); //"android.os.IServiceManager"
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
...
case SVC_MGR_ADD_SERVICE: //对应code: ADD_SERVICE_TRANSACTION
s = bio_get_string16(msg, &len); //"media.player"
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg); // 拿到MediaPlayerServer的 weak ref
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
...
bio_put_uint32(reply, 0);
return 0;
}
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
if (!svc_can_register(s, len, spid, uid)) { -------------------1
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}
si = find_svc(s, len); -----------------2
if (si) {
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death); ----------------3
return 0;
}
代码分析就到这里,对addService的整体梳理,见TODO。