应用层主要逻辑
//1. 打开驱动,mmap映射
binder_open
//2. 构造binder_write_read通过ioctl和驱动交互通过name获取到server的handle
ioctl(bs->fd, BINDER_WRITE_READ, &bwr)
//3. 拿到数据
handle =bio_get_ref(&reply);
- 打开驱动,mmap映射,binder_open,和
servicemanager
启动一样流程
- 构造binder_write_read通过ioctl和驱动交互通过servicemanager获取对应服务的handle,svcmgr_lookup , binder_call
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
uint32_t handle;
unsigned iodata[512/4];
struct binder_io msg, reply;
//构造bio_init
bio_init(&msg, iodata, sizeof(iodata), 4);
//header
bio_put_uint32(&msg, 0); // strict mode header
// 设置参数
bio_put_string16_x(&msg, SVC_MGR_NAME);
bio_put_string16_x(&msg, name);
if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))
return 0;
//获取到了handle
handle = bio_get_ref(&reply);
if (handle)
//发送acquire
binder_acquire(bs, handle);
//结束
binder_done(bs, &msg, &reply);
return handle;
}
binder_call
流程和server注册基本一致,会走到server_manager
的用户空间处理函数svcmgr_handler
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
//...
struct svcinfo *si;
uint32_t handle;
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
//把handle put到reply里面-->2.1
bio_put_ref(reply, handle);
return 0;
}
//...
}
//找到service的handle
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
struct svcinfo *si;
si = find_svc(s, len);
//ALOGI("check_service('%s') handle = %x\n", str8(s, len), si ? si->handle : 0);
if (si && si->handle) {
//...
return si->handle;
} else {
return 0;
}
}
//找到name找到svcinfo
struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
struct svcinfo *si;
for (si = svclist; si; si = si->next) {
if ((len == si->len) &&
!memcmp(s16, si->name, len * sizeof(uint16_t))) {
return si;
}
}
return NULL;
}
2.1 bio_put_ref主要作用将对应name
的handle
写入reply
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
//写入类型
obj->type = BINDER_TYPE_HANDLE;
//写入handle
obj->handle = handle;
obj->cookie = 0;
}
经过service_manager
的looper
parser
处理之后,会调用binder_send_reply
回执数据
nt binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
//...
switch(cmd) {
//...
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
//数据回传 见2.2
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
ptr += sizeof(*txn);
break;
}
}
}
2.2 binder_send_reply
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
//free命令
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
//reply命令
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
//status是0表示没有错误
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
//走这里
//设置相关数据,handle在reply->data0里面
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
//binder交互
binder_write(bs, &data, sizeof(data));
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
//交互
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
最终组装数据进入驱动binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
//...
switch (cmd) {
//...
case BC_FREE_BUFFER: {
//..这里主要回事binder层开辟的buff,不看了
}
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
//见2.3
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break
}
//...
}
}
}
2.3 binder_transaction
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
//..
if (reply) {
//..
//根据transaction_stack找到需要回传的binder_proc
in_reply_to = thread->transaction_stack;
//...
//更新当前线程和目标线程的transaction_stack信息
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
//...
target_proc = target_thread->proc;
}
//...
//找到todo队列
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
}
// 目标进程分配buffer,copy数据
t = kzalloc(sizeof(*t), GFP_KERNEL);
//..
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
//..
t->buffer->transaction = t;
t->buffer->target_node = target_node;
//..
//copy data.ptr.buffer数据到目标进程
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
//...
}
//copy 偏移量
f (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
//...
}
//..
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
//组装flat_binder_object
struct flat_binder_object *fp;
//..
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
//...
switch (fp->type) {
//...
//这里是handle类型
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
//根据handle获取具体的binder_ref
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
//在根据binder_ref创建或者获取binder_node
//判断这个binder_ref的bind_proc是不是目标进程,目标进程是client,这里binder_ref的bind_proc是server,不是一个,走else
if (ref->node->proc == target_proc) {
}else{
//走的这里
struct binder_ref *new_ref;
//在目标进程,也就是client进程根据这个服务的binder_node创建新的binder_ref并把proc指向client进程,这个属于client进程的binder_ref
new_ref = binder_get_ref_for_node(target_proc, ref->node);
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
//...
}
}
}
}
if (reply) {
//出栈
binder_pop_transaction(target_thread, in_reply_to);
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
//...
if (target_wait)
//唤醒客户端
wake_up_interruptible(target_wait);
}
这一步主要的操作处理BC_REPLY
命令,根据server_manager
进程的目标handle
找到对应的binder_ref
,即可以找到对应的binder_node
,然后在目标进程,也就是client
进程创建自己的binder_ref
,然后构建flat_binder_object
,组装到binder_transaction
,唤醒目标client
进程。
client
进程在调用ioctl进行查找server的时候,会在驱动层进行等待的操作,service_manager
进程唤醒之后会进行读取数据的操作binder_thread_read
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
//...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//从todo 拿出数据
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
}
///...
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
//...
}
if (t->buffer->target_node) {
//...
}else {
tr.target.ptr = 0;
tr.cookie = 0;
/BC_REPLY变成了BR_REPLY
cmd = BR_REPLY;
}
tr.code = t->code;
//...
//数据读取
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr)){
}
ptr += sizeof(uint32_t);
//数据copy到用户空间
if (copy_to_user(ptr, &tr, sizeof(tr))){
}
}
//..
}
客户端在内核把数据copy到用户空间后,用户空间继续执行代码,binder_parse函数
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
//...
switch(cmd) {
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
//将数据转换到binder_io里面,binder_io就是reply
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0;
break;
}
}
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
uint32_t handle;
unsigned iodata[512/4];
struct binder_io msg, reply;
bio_init(&msg, iodata, sizeof(iodata), 4);
bio_put_uint32(&msg, 0); // strict mode header
bio_put_string16_x(&msg, SVC_MGR_NAME);
bio_put_string16_x(&msg, name);
//调用驱动,get_service
if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))
return 0;
//从reply获取handle
handle = bio_get_ref(&reply);
if (handle)
binder_acquire(bs, handle);
binder_done(bs, &msg, &reply);
return handle;
}
总结:
客户端通过name获取服务。
- 首先组装
binder_transation_data
和驱动层交互,驱动把相关的数据copy
到server_manager
进程加入todo
队列 -
server_mamnager
唤醒后解析todo
消息,把指令copy
到用户控件,用户空间根据name
得到相关的数据,得到handle
-
handle
传递到server_manager
驱动,驱动根据handler
找到binder_ref
,也就找到了binder_node
,然后驱动根据栈管理找到需要回执的进程,也就是客户端进程,根据客户端进程binder_proc
创建需要服务的binder_ref
,并且把新的handle
组装成新的数据加入到客户端进程的todo
队列,并唤醒 - 客户端唤醒后读取数据,把这个数据写入copy到用户空间,用户空间执行数据解析得到这个
handle
数据。