Binder子系统驱动架构

Binder作为Android系统的主要进程间通信方式,在驱动层不会有开发的需求,我们主要还是了解下binder在驱动层的运行原理;要想掌握linux驱动开发就要理解linux驱动模型,同样道理,要想理解android系统,那么就要深入了解binder机制,这样对于以后学习android就会胸有成竹

Binder驱动注册

binder代码文件:drivers/android/binder.c

static int __init binder_init(void)
{
    ......
    ret = misc_register(&binder_miscdev);--------binder注册
    ......
}

static const struct file_operations binder_fops = {
	.owner = THIS_MODULE,
	.poll = binder_poll,
	.unlocked_ioctl = binder_ioctl,
	.compat_ioctl = binder_ioctl,------------------binder主要的交互接口
	.mmap = binder_mmap,------------------------用户层和内核层内存映射接口
	.open = binder_open,
	.flush = binder_flush,
	.release = binder_release,
};

static struct miscdevice binder_miscdev = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = "binder",
	.fops = &binder_fops
};

binder注册成功后,用户层就可以通过open/mmap/ioctl进行操作,接下来的内容围绕这几个问题展开:

  • binder如何进行进程间通信
  • binder的数据是如何封装的
  • binder内存映射

Binder通信流程(binder_ioctl)

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    ...
    thread = binder_get_thread(proc);----得到当前线程
    ...
    switch (cmd) {
        case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);-----读写数据
    ...
    }
    ...
}
static int binder_ioctl_write_read(struct file *filp,
				unsigned int cmd, unsigned long arg,
				struct binder_thread *thread)
{
    ...
	void __user *ubuf = (void __user *)arg;
	struct binder_write_read bwr;
	
	if (copy_from_user(&bwr, ubuf, sizeof(bwr)))---------从用户层拷贝binder_write_read结构体

	if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread,
					  bwr.write_buffer,
					  bwr.write_size,
					  &bwr.write_consumed);------------------写数据
	    ...
	}
	if (bwr.read_size > 0) {
		ret = binder_thread_read(proc, thread, bwr.read_buffer,
					 bwr.read_size,
					 &bwr.read_consumed,
					 filp->f_flags & O_NONBLOCK);-----------读数据
		...
	}
	
	if (copy_to_user(ubuf, &bwr, sizeof(bwr)))--------------向用户层拷贝binder_write_read结构体
	...
out:
	return ret;
}
  • binder_thread_write
    我们只关心数据传输的部分
static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
{
	uint32_t cmd;
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;
	
    while (ptr < end && thread->return_error == BR_OK) {
        if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		...
		switch (cmd) {
		...
			case BC_TRANSACTION:
			case BC_REPLY: {
				struct binder_transaction_data tr;

				if (copy_from_user(&tr, ptr, sizeof(tr)))
					return -EFAULT;
				ptr += sizeof(tr);
				binder_transaction(proc, thread, &tr, cmd == BC_REPLY);-----数据传输重要的函数入口
				break;
			}
		...
		}
    }
}
  • binder_thread_read

binder_transaction

下面来分析一个典型的数据传输流程

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	...
	if (reply) {
		...
	} else {
		if (tr->target.handle) {
			struct binder_ref *ref;

			ref = binder_get_ref(proc, tr->target.handle);
			...
			target_node = ref->node;--------------------非service manager
		} else {
			target_node = binder_context_mgr_node;------service manager
			...
		}
		target_proc = target_node->proc;-----------获得传输的目标进程
		...
	}
	
	if (target_thread) {
		...
	} else {
		target_list = &target_proc->todo;---------获得目标进程的todo链表
		target_wait = &target_proc->wait;
	}
	...
	t = kzalloc(sizeof(*t), GFP_KERNEL);-------------下面就是为了把t各个字段赋值
	...
	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	...
	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;----------------------源线程
	else
		t->from = NULL;
	t->sender_euid = task_euid(proc->tsk);
	t->to_proc = target_proc;------------目标进程
	t->to_thread = target_thread;--------目标线程
	t->code = tr->code;------------------用户数据请求码
	t->flags = tr->flags;----------------用户数据flags
	t->priority = task_nice(current);----当前线程优先级
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));-----根据用户数据请求相应buffer
	if (t->buffer == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	...
	offp = (binder_size_t *)(t->buffer->data +
				 ALIGN(tr->data_size, sizeof(void *)));
	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {---------copy用户的数据到t->buffer->data
		...
	}
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {-----copy用户offsets的数据到offp,指示偏移数据
		...
	}
	off_end = (void *)offp + tr->offsets_size;
	off_min = 0;
	//对特殊数据进行处理
	for (; offp < off_end; offp++) {
		struct flat_binder_object *fp;

		if (*offp > t->buffer->data_size - sizeof(*fp) ||
		    *offp < off_min ||
		    t->buffer->data_size < sizeof(*fp) ||
		    !IS_ALIGNED(*offp, sizeof(u32))) {
			...
			goto err_bad_offset;
		}
		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
		off_min = *offp + sizeof(struct flat_binder_object);
		switch (fp->type) {
			case BINDER_TYPE_BINDER:
			case BINDER_TYPE_WEAK_BINDER: {
			...
			} break;
			case BINDER_TYPE_HANDLE:
			case BINDER_TYPE_WEAK_HANDLE: {
			...
			} break;
			case BINDER_TYPE_FD: {
				int target_fd;
				struct file *file;
				...
				file = fget(fp->handle);
				...
				if (security_binder_transfer_file(proc->tsk,
							  target_proc->tsk,
							  file) < 0) {
					...
				}
				target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
				...
				task_fd_install(target_proc, target_fd, file);
				...
				fp->handle = target_fd;
			} break;
			...
		}
	}
	...
	t->work.type = BINDER_WORK_TRANSACTION;
	list_add_tail(&t->work.entry, target_list);-------------加入目标进程链表
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);--------加入本线程链表,表示数据传输完成
	....
}

binder中进程线程和transaction各个结构体的关系如下图
Binder子系统驱动架构_第1张图片

binder数据封装描述

总结下用户层封装的数据格式
Binder子系统驱动架构_第2张图片

你可能感兴趣的:(Linux系统,Android系统,android)