第五部分 Binder之五:服务检索getService
1.入口
IMediaDeathNotifier.cpp
IMediaDeathNotifier::getMediaPlayerService()
{
Mutex::Autolock _l(sServiceLock);
if (sMediaPlayerService == 0) {
//SM的获取,返回一个BpServiceManager
sp sm = defaultServiceManager();
sp binder;
do {
//调用sm->getService返回一个binder对象
binder = sm->getService(String16("media.player"));
if (binder != 0) {
break;
}
usleep(500000); // 0.5 s
} while (true); //如果获取失败就0.5s之后再次去获取
if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(sDeathNotifier);
//
sMediaPlayerService = interface_cast(binder);
}
return sMediaPlayerService;
}
2. sm->getService
virtual sp getService(const String16& name) const
{
unsigned n;
for (n = 0; n < 5; n++){
//调用checkService(name)
sp svc = checkService(name);
if (svc != NULL) return svc;
sleep(1);
}
return NULL;
}
3.checkService(name)
virtual sp checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
其中remote()返回一个BpBinder
4.BpBinder::transact
transact(CHECK_SERVICE_TRANSACTION, data, &reply);
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
5. IPCThreadState::self()
创建IPCThreadState对象,在它的构造方法中,设置了mIn和mOut
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
if (gShutdown) return NULL;
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mMyThreadId(androidGetTid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this);
clearCaller();
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
}
6.IPCThreadState::transact
这个时候的参数
code:CHECK_SERVICE_TRANSACTION
data:第3步中设置的,
handle:0,SM的handle
transact的流程
- errorCheck() //数据错误检查
- writeTransactionData() // 传输数据
- waitForResponse() //f等待响应
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if ((flags & TF_ONE_WAY) == 0) {//flag为0,计入这一步
if (reply) {//等待回应
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {//不需要返回走这里
err = waitForResponse(NULL, NULL);
}
return err;
}
7.writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
writeTransactionData(BC_TRANSACTION, 0, 0, CHECK_SERVICE_TRANSACTION, data, NULL)
把code和data等数据放大binder_transaction_data中,把br和cmd放到mOut中
其中mOut定义在IPCThreadState.h中 Parcel mOut;
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = statusBuffer;
tr.offsets_size = 0;
tr.data.ptr.offsets = NULL;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
8. waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
while (1) {
//通过talkWithDriver与BinderDriver交互,并获取返回数据mIn
if ((err=talkWithDriver()) < NO_ERROR) break;//交互
err = mIn.errorCheck();//获取到数据mIn
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
//下面的代码是对mIn里的数据进行分析,这里暂且略过
}
}
9.talkWithDriver
talkWithDriver( )把数据放到binder_write_read变量bwr中,并通过ioctl向BinderDriver发送命令BINDER_WRITE_READ
这里bwr中write区域有数据,read区域没有数据
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (long unsigned int)mOut.data();//获取传递过来数据
if (doReceive && needRead) {
//接收数据缓冲区信息的填充。如果以后收到数据,就直接填在mIn中了。
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (long unsigned int)mIn.data();
} else {//根据参数,doReceive为false,所以走这里
bwr.read_size = 0;
bwr.read_buffer = 0;
}
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(HAVE_ANDROID_OS)//如果是android系统,通过ioctl发送BINDER_WRITE_READ命令,将bwr传递给BInderDriver
//放在循环中,进行不停地读写操作
ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr);
#else
err = INVALID_OPERATION;
#endif
} while (err == -EINTR);
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < (ssize_t)mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
10.ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr);
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg; //用户空间传递过来的bwr数据
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
mutex_lock(&binder_lock);
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ: {
//创建内核空间的bwr数据并检查
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
}//将用户空间的bwr数据复制给内核的bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
}
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {//这里先不讨论
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
}
break;
}
}
ret = 0;
}
11. binder_thread_write
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
//ptr和end指向数据的开头和结尾
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))//获取数据中的cmd指令,这里的cmd是BC_TRANSACTION
return -EFAULT;
ptr += sizeof(uint32_t);//处理了一个,数据的指针移动一个指令的距离
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
//将用户空间的binder_transaction_data复制给内核空间的binder_transaction_data
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);//数据开头移动,因为处理过了
//调用binder_transaction
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
}
*consumed = ptr - buffer;
}
return 0;
}
12. binder_transaction
找到目标进程和目标线程
创建一个binder_transaction表示本次transaction动作,并把用户空间的tr数据放到binder_transaction对象t中,因为t->buffer所指向的内存空间和目标对象时共享的,所以通过一次复制就能把数据复制给目标对象,然后把t放到目标进程的todo队列中,把未完成的tcomplete放入该进程中,然后唤醒目标进程。
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
if (reply) {
//暂不分析
} else {
//在writeTransactionData方法中有tr.target.handle = handle;但handle=0;所以走else
if (tr->target.handle) {
} else {
//获取到target_node,目标节点
target_node = binder_context_mgr_node;
}
//获取target_proc对象,即目标进程
target_proc = target_node->proc;
if (target_proc == NULL) {
}
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
//生成一个binder_transaction对象t,描述本次transaction动作
//最后这个t会被放入到target_thread->todo中,当目标进程醒来,就从这个队列中取出需要做的工作
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
}
binder_stats_created(BINDER_STAT_TRANSACTION);
//调用者线程有一个未完成的transaction,被放到当前进程的todo队列中
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
e->debug_id = t->debug_id;
//t->from表示这个transaction是由谁发起的
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
//t->to_proc和t->to_thread表示目标对象的进程和线程信息
t->to_proc = target_proc;
t->to_thread = target_thread;
//向BinderServer的请求码,这里是GET_SERVICE_TRANSACTION
t->code = tr->code;//tr的code,传递过来的是
t->flags = tr->flags;//flag
t->priority = task_nice(current);
//t->buffer是为了本次transaction申请的内存,也就是mmap所管理的内存区域
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
//申请到t->buffer后,把用户空间的数据复制过去,因为t->buffer所指向的内存空间和目标对象时共享的,所以通过一次复制就能把数据复制给目标对象
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
//对binder_object的处理,暂时略过
}
if (reply) {//没有reply不走这里
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
//用户没有设置TF_ONE_WAY,所以走这里
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
}
//这里设置了work.type,在SM中会使用到
t->work.type = BINDER_WORK_TRANSACTION;
//将本次transaction动作加入到目标的todo队列中
list_add_tail(&t->work.entry, target_list);
//这里设置类型,以后会用到
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
//对本进程的todo队列中加入一个未完成任务
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)//唤醒目标进程
wake_up_interruptible(target_wait);
return;
}
13. SM被唤醒后SM的操作
注意要看SM是在哪里睡着的,在ioctl中睡着,醒来后执行下一行代码,也就是binder_parse
醒来之后,将保存在todo队列中的数据提取出来,并把内核空间的数据拷贝的进程空间,并把cmd设置为BR_TRANSACTION
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//取出在binder_transaction中放在todo队列中的对象binder_work
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {//当时放入的type是BINDER_WORK_TRANSACTION
case BINDER_WORK_TRANSACTION: {
//拿出binder_trasaction对象t
t = container_of(w, struct binder_transaction, work);
} break;
}
if (!t)
continue;
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
//这里设置cmd为BR_TRANSACTION
cmd = BR_TRANSACTION;
} else {
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
current->nsproxy->pid_ns);
} else {
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data +
proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_stat_br(proc, thread, cmd);
list_del(&t->work.entry);//把todo队列中的处理过的数据删除掉
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
14.从ioctl返回,执行binder_parse
1.初始化reply
2.调用func处理数据,随后将处理结果放到reply中去
3.将reply发送给BinderDriver
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uint32_t *ptr, uint32_t size, binder_handler func)
{
int r = 1;
uint32_t *end = ptr + (size / 4);
while (ptr < end) {
uint32_t cmd = *ptr++;
switch(cmd) {
case BR_TRANSACTION: {
struct binder_txn *txn = (void *) ptr;
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
//初始化reply
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//处理请求
res = func(bs, txn, &msg, &reply);
//将reply发送给BinderDriver
binder_send_reply(bs, &reply, txn->data, res);
}
ptr += sizeof(*txn) / sizeof(uint32_t);
break;
}
}
}
return r;
}
15.bio_init函数初始化reply
首先看binder_io结构体
struct binder_io
{
char *data; /* 数据区当前地址 */
binder_size_t *offs; /* offset区当前地址 */
size_t data_avail; /* 数据区剩余空间 */
size_t offs_avail; /* offset区剩余空间 */
char *data0; /* data区域起始地址 */
binder_size_t *offs0; /* offset区域起始地址*/
uint32_t flags;
uint32_t unused;
};
unsigned rdata[256/4];
bio_init(&reply, rdata, sizeof(rdata), 4);
void bio_init(struct binder_io *bio, void *data,
uint32_t maxdata, uint32_t maxoffs)
{
uint32_t n = maxoffs * sizeof(uint32_t);
if (n > maxdata) {
bio->flags = BIO_F_OVERFLOW;
bio->data_avail = 0;
bio->offs_avail = 0;
return;
}
bio->data = bio->data0 = data + n;
bio->offs = bio->offs0 = data;
bio->data_avail = maxdata - n;
bio->offs_avail = maxoffs;
bio->flags = 0;
}
16.bio_init_from_txn,初始化msg
void bio_init_from_txn(struct binder_io *bio, struct binder_txn *txn)
{
bio->data = bio->data0 = txn->data;
bio->offs = bio->offs0 = txn->offs;
bio->data_avail = txn->data_size;
bio->offs_avail = txn->offs_size / 4;
bio->flags = BIO_F_SHARED;
}
17. res = func(bs, txn, &msg, &reply);,也就是调用svcmgr_handler
查找符合要求的Service
把符合条件的Service放到reply中
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
if (txn->target != svcmgr_handle)
return -1;
strict_policy = bio_get_uint32(msg);
//这里取出来的字符串s = "android.os.IServiceManager"
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
//这里取出来的字符串s = "media.player"
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len);//查找符合要求的Service
if (!ptr)
break;//将符合要求的Service放入reply中
bio_put_ref(reply, ptr);
return 0;
}
bio_put_uint32(reply, 0);
return 0;
}
18. do_find_service查找符合条件的svcinfo对象
void *do_find_service(struct binder_state *bs, uint16_t *s, unsigned len)
{
struct svcinfo *si;
si = find_svc(s, len);
if (si && si->ptr) {
return si->ptr;
} else {
return 0;
}
}
struct svcinfo
{
struct svcinfo *next;
void *ptr;
struct binder_death death;
unsigned len;
uint16_t name[0];
};
struct svcinfo *find_svc(uint16_t *s16, unsigned len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return 0;
}
19.bio_put_ref 将结果放入reply中
在bio中创建一个binder_object对象obj,并把通过do_find_service查找到的ptr注册到obj->pointer上,obj的类型是BINDER_TYPE_HANDLE
void bio_put_ref(struct binder_io *bio, void *ptr)
{
//为binder_object开辟内存空间并初始化
struct binder_object *obj;
if (ptr)
obj = bio_alloc_obj(bio);//根据bio创建一个binder_object对象obj
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;//设置type为BINDER_TYPE_HANDLE
obj->pointer = ptr;//把通过do_find_service找到的ptr注册给binder_object对象
obj->cookie = 0;
}
新建一个binder_object对象obj,并从bio中开辟一个内存空间,将该内存空间的首地址赋值给obj
static struct binder_object *bio_alloc_obj(struct binder_io *bio)
{
struct binder_object *obj;
//将从bio的区域中获取的地址空间赋值给obj
obj = bio_alloc(bio, sizeof(*obj));
if (obj && bio->offs_avail) {
bio->offs_avail--;
*bio->offs++ = ((char*) obj) - ((char*) bio->data0);
return obj;
}
bio->flags |= BIO_F_OVERFLOW;
return 0;
}
bio_alloc(bio,size)在bio获取一个size大小的空间,并将首地址放回
static void *bio_alloc(struct binder_io *bio, uint32_t size)
{
size = (size + 3) & (~3);
if (size > bio->data_avail) {
bio->flags |= BIO_F_OVERFLOW;
return 0;
} else {
void *ptr = bio->data;
bio->data += size;
bio->data_avail -= size;
return ptr;
}
}
20. binder_send_reply(bs, &reply, txn->data, res);
将相关数据封装到新建的结构体data中,其中data->txn保存了reply的数据和一些其他数据,data->cmd_reply=BC_REPLY
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
void *buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
void *buffer;
uint32_t cmd_reply;
struct binder_txn txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;//这里要注意的命令
data.txn.target = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
//这里先不讨论
} else {
data.txn.flags = 0;
//当前地址减去起始地址
data.txn.data_size = reply->data - reply->data0;
data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data = reply->data0;//数据区域
data.txn.offs = reply->offs0;//offset区
}
binder_write(bs, &data, sizeof(data));
}
21.binder_write
把data数据封装到binder_write_read中,通过ioctl将数据发送给BinderDriver
int binder_write(struct binder_state *bs, void *data, unsigned len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
}
return res;
}
22发送数据的流程
还是按照binder_ioctl->binder_thread_write->binder_transaction的流程,走到binder_transaction
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
if (reply) {
//找到目标进程和目标线程
in_reply_to = thread->transaction_stack;
binder_set_nice(in_reply_to->saved_priority);
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
target_proc = target_thread->proc;
} else {
//不是reply的情况
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
//初始化一个binder_transaction对象t,代表要执行的动作
t = kzalloc(sizeof(*t), GFP_KERNEL);
//初始化一个tcomplete,表示SM未完成的操作
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
//t赋值
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
//t->buffer指向目标进程的mmap所映射的区域
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
//把数据从SM的用户空间拷贝到内核空间,由于t->buffer的作用,也就是拷贝到了目标进程的内核空间
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
}
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(void *))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {//SM里设置的type是BINDER_TYPE_HANDLE
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref->node->proc == target_proc) {//如果是同一个进程,则可以直接传递内存地址
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
} else {//如果不是同一个进程,则通过handle句柄进行通信
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
} break;
}
}
if (reply) {
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
} else {
}
//设置type
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
//tcomplete被加入到SM的todo队列中
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);//唤醒目标进程
return;
}
24.SM进入睡眠
把SM的todo队列里加入一个BINDER_WORK_TRANSACTION_COMPLETE类型的数据,改变类型后就删除了,如果再次执行binder_thread_read函数,SM就进入睡眠
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
list_del(&w->entry);//删除数据
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
23.getService的调用者被唤醒
getService 的调用者经过waitForResponse->talkWithDriver->binder_ioctl->binder_thread_read进入睡眠,此时它发起的服务请求已经有回复了,所以就被唤醒
23.1进入睡眠
调用getService 的进程在SM被唤醒的时候就进入了睡眠了,bindr_thread_write返回,执行binder_thread_read函数
23.2 被唤醒
把SM填充的数据提取出来,然后复制到getService调用者的用户空间
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
void __user *buffer, int size,
signed long *consumed, int non_block)
{
//被唤醒之后
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//把SM插入在todo队列里的binder_work对象取出来
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
//把binder_work中的binder_transaction对象取出来
t = container_of(w, struct binder_transaction, work);
} break;
}
if (!t)
continue;
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY;//业务类型发生变化
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
current->nsproxy->pid_ns);
} else {
tr.sender_pid = 0;
}
//准备数据
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data +
proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
//将数据复制到用户空间
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_stat_br(proc, thread, cmd);
//已经处理,将work从todo队列中删除
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
return 0;
}
24.执行完binder_thread_read,返回到binder_ioctl,再返回到talkWithDriver,然后回到waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = mIn.readInt32();
switch (cmd) {
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t),
freeBuffer, this);
} else {
}
} else {
}
}
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
return err;
}
25.ipcSetDataReference
将数据封装到Parcel中
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
{
freeDataNoInit();
mError = NO_ERROR;
mData = const_cast(data);
mDataSize = mDataCapacity = dataSize;
//ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)\n", this, mDataSize, getpid());
mDataPos = 0;
ALOGV("setDataReference Setting data pos of %p to %d\n", this, mDataPos);
mObjects = const_cast(objects);
mObjectsSize = mObjectsCapacity = objectsCount;
mNextObjectHint = 0;
mOwner = relFunc;
mOwnerCookie = relCookie;
scanForFds();
}
26.回到ServiceManagerProxy的getService中
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
//执行transact
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
//从reply中获取数据
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
27.readStrongBinder
sp Parcel::readStrongBinder() const
{
sp val;//
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
28. unflatten_binder(ProcessState::self(), *this, &val);
status_t unflatten_binder(const sp& proc,
const Parcel& in, sp* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = static_cast(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
29. getWeakProxyForHandle
为handle生成一个BpBinder或者是查询一个已经有的BpBinder对象,并转换成sp
sp ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}