【android】binder机制-servicemanager

servicemanager的源码在/frameworks/base/cmds/servicemanager目录下由binder.c,binder.h,service_manager.c构成

生成servicemanager文件放在/system/bin/目录下

servicemanager的入口是在service_manager.c中的main函数

int main(int argc, char **argv)
{
    struct binder_state *bs;
    void *svcmgr = BINDER_SERVICE_MANAGER;	//binder service manager 句柄0

    bs = binder_open(128*1024);	//打开/dev/binder ,映射128*1024字节内存

    if (binder_become_context_manager(bs)) {	//设置本进程为binder上下文管理者
        LOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    svcmgr_handle = svcmgr;
    binder_loop(bs, svcmgr_handler);	//binder循环
    return 0;
}

一.打开/dev/binder,映射内存

struct binder_state *binder_open(unsigned mapsize)
{
    struct binder_state *bs;

    bs = malloc(sizeof(*bs));	//分配bs内存
    if (!bs) {
        errno = ENOMEM;
        return 0;
    }

    bs->fd = open("/dev/binder", O_RDWR);	//打开/dev/binder设备文件 (~O_NONBLOCK)
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open device (%s)\n",strerror(errno));
        goto fail_open;
    }

    bs->mapsize = mapsize;	//设置要映射的内存大小 128*1024
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);	//映射内存
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",strerror(errno));
        goto fail_map;
    }
    return bs;	//bs的fd保存了设备描述符,返回bs以便其他ioctl等操作

fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return 0;
}

1.1 打开/dev/binder设备文件,会触发设备文件的open方法

static int binder_open(struct inode *nodp, struct file *filp)	//打开/dev/binder
{
	struct binder_proc *proc;

	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",current->group_leader->pid, current->pid);//打印组领导id和进程id

	proc = kzalloc(sizeof(*proc), GFP_KERNEL);	//分配binder_proc结构体内存	
	if (proc == NULL)
		return -ENOMEM;
	get_task_struct(current);	//增加当前进程的task_struct的引用计数	
	proc->tsk = current;	//proc->tsk指向当前进程的task_struct
	INIT_LIST_HEAD(&proc->todo);	//初始化proc->todo链表头
	init_waitqueue_head(&proc->wait);	//初始化proc->wait等待队列头
	proc->default_priority = task_nice(current);	//获取当前进程的优先级 proc->default_priority
	mutex_lock(&binder_lock);		//锁定互斥锁binder_lock
	binder_stats_created(BINDER_STAT_PROC);	//binder_proc创建计数+1
	hlist_add_head(&proc->proc_node, &binder_procs);//添加proc->proc_node节点到全局binder_procs哈希链表中
	proc->pid = current->group_leader->pid;	//proc->pid等于当前进程的组领导id			
	INIT_LIST_HEAD(&proc->delivered_death);	//初始化proc->delivered_death等待队列头
	filp->private_data = proc;		//文件的私有数据指针指向proc
	mutex_unlock(&binder_lock);		//解锁互斥锁binder_lock				//解互斥锁binder_lock

	if (binder_debugfs_dir_entry_proc) {
		char strbuf[11];
		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);	//格式化proc->pid字串到strbuf
		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
		//创建/binder/proc/id值 文件
	}

	return 0;
}

当每次打开/dev/binder都会分配一个binder_proc结构体,并设置proc的多个成员,

初始化binder_proc->todo list链表

初始化binder_proc->wait 等待队列

添加binder_proc->proc_node到全局binder_proc哈希链表中

初始化binder_proc->delivered_death 等待队列

将filp->private_data文件的私有数据指针 指向binder_proc(以后的ioctl等操作可以使用该指针获取binder_proc)

然后根据进程id创建debugfs中/binder/proc/$pid文件

二.设置本进程为binder上下文管理者

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);	//ioctl命令BINDER_SET_CONTEXT_MGR
}

2.1 BINDER_SET_CONTEXT_MGR命令告知设备驱动/dev/binder设置binder服务管理者

调用了/dev/binder的ioctl方法,摘录部分执行到的代码

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)	
{
	int ret;
	struct binder_proc *proc = filp->private_data;//获取binder_proc
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);	//获取命令数据大小
	void __user *ubuf = (void __user *)arg;	//获取arg参数指针

	//等待队列binder_user_error_wait唤醒,条件是binder_stop_on_user_error错误值小于2
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);	
	if (ret)
		return ret;

	mutex_lock(&binder_lock);	//锁定互斥量 binder_lock
	thread = binder_get_thread(proc);	//获取binder线程
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	...
	case BINDER_SET_CONTEXT_MGR:	//设置上下文管理者
		if (binder_context_mgr_node != NULL) {	//判断是否已经设置管理者
			printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
			ret = -EBUSY;
			goto err;
		}
		if (binder_context_mgr_uid != -1) {	//还没有设置管理者
			if (binder_context_mgr_uid != current->cred->euid) {	//不等于当前进程的有效id
				printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",current->cred->euid,binder_context_mgr_uid);
				ret = -EPERM;
				goto err;
			}
		} 
		else
			binder_context_mgr_uid = current->cred->euid;	//将当前进程的有效id给到全局binder_context_mgr_uid
		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);	//创建上下文管理者实体
		if (binder_context_mgr_node == NULL) {
			ret = -ENOMEM;
			goto err;
		}
		binder_context_mgr_node->local_weak_refs++;	//本地弱指针计数++
		binder_context_mgr_node->local_strong_refs++;	//本地强指针计数++
		binder_context_mgr_node->has_strong_ref = 1;	//有强指针
		binder_context_mgr_node->has_weak_ref = 1;	//有弱指针
		break;
	...
	}
	ret = 0;
err:
	if (thread)
		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
	mutex_unlock(&binder_lock);	//解锁互斥量 binder_lock
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);	//等待队列binder_user_error_wait唤醒
	if (ret && ret != -ERESTARTSYS)
		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
	return ret;
}

先通过filp->private_data私有数据获取binder_proc结构体
2.2 接着调用binder_get_thread函数

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
	struct binder_thread *thread = NULL;
	struct rb_node *parent = NULL;
	struct rb_node **p = &proc->threads.rb_node;	//proc线程红黑树根节点

	while (*p) {	//*p=NULL
		parent = *p;
		thread = rb_entry(parent, struct binder_thread, rb_node);

		if (current->pid < thread->pid)
			p = &(*p)->rb_left;
		else if (current->pid > thread->pid)
			p = &(*p)->rb_right;
		else
			break;
	}
	
	if (*p == NULL) {	//若线程不存在,则创建线程
		thread = kzalloc(sizeof(*thread), GFP_KERNEL);	//分配线程内存空间
		if (thread == NULL)
			return NULL;
		binder_stats_created(BINDER_STAT_THREAD);	//增加binder_thread创建引用计数
		thread->proc = proc;	//设置线程的binder_proc
		thread->pid = current->pid;	//获取当前进程id
		init_waitqueue_head(&thread->wait);	//初始化线程等待队列头
		INIT_LIST_HEAD(&thread->todo);	//初始化线程todo链表
		rb_link_node(&thread->rb_node, parent, p);
		rb_insert_color(&thread->rb_node, &proc->threads);
		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;	//设置返回标志
		thread->return_error = BR_OK;	//设置线程的返回值
		thread->return_error2 = BR_OK;	//设置线程的返回值
	}
	return thread;
}

rb_node **p = &proc->threads.rb_node 获取binder_proc的线程红黑树根的根节点

查找binder_proc红黑树,初始化过程查找binder_thread肯定无果,所以*p=NULL,进入if语句

创建线程binder_thread,并设置其相关成员

binder_thread->proc指向对应的binder_proc 也即是将binder_thread和binder_proc捆绑

初始化binder_thread->wait 等待队列

将binder_thread->rb_node节点添加进红黑树,并上色

设置新创建的线程的默认属性:

binder_thread->looper|=BINDER_LOOPER_STATE_NEED_RETURN属性

binder_thread->return_error = BR_OK

binder_thread->return_error2 = BR_OK

2.3 进入switch的BINDER_SET_CONTEXT_MGR分支

判断全局binder_context_mgr_node的值看是否已经有管理者了

判断全局binder_context_mgr_uid的值看是否已经有管理者了

设置全局binder_context_mgr_uid

给全局binder_context_mgr_node分配binder实体binder_node,这里调用了binder_new_node函数,进去分析一下

2.4 binder_new_node

static struct binder_node *binder_new_node(struct binder_proc *proc,void __user *ptr,void __user *cookie)
{
	struct rb_node **p = &proc->nodes.rb_node;	//获取proc红黑树根的根节点
	struct rb_node *parent = NULL;
	struct binder_node *node;

	while (*p) {
		parent = *p;
		node = rb_entry(parent, struct binder_node, rb_node);

		if (ptr < node->ptr)
			p = &(*p)->rb_left;
		else if (ptr > node->ptr)
			p = &(*p)->rb_right;
		else
			return NULL;
	}

	node = kzalloc(sizeof(*node), GFP_KERNEL);	//分配binder_node内存
	if (node == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_NODE);	//设置节点创建计数
	rb_link_node(&node->rb_node, parent, p);
	rb_insert_color(&node->rb_node, &proc->nodes);
	node->debug_id = ++binder_last_id;
	node->proc = proc;	//捆绑binder_proc
	node->ptr = ptr;
	node->cookie = cookie;
	node->work.type = BINDER_WORK_NODE;	//表示创建了实体
	INIT_LIST_HEAD(&node->work.entry);	//初始化节点的工作队列头
	INIT_LIST_HEAD(&node->async_todo);	//初始化节点的异步队列头
	binder_debug(BINDER_DEBUG_INTERNAL_REFS,"binder: %d:%d node %d u%p c%p created\n",
		     proc->pid, current->pid, node->debug_id,node->ptr, node->cookie);
	return node;
}

获取binder_proc的红黑树根的根节点,while循环遍历binder实体红黑树,同理也是找不到跳出

分配binder_node内存,也就是创建binder实体

将binder_node添加进binder_proc->rb_node红黑树中,并上色

将binder_node->proc指向binder_proc,也即是将binder_node和binder_proc捆绑

这里ptr和cookie都为NULL

设置新创建的binder_node(binder实体)的默认属性

binder_node->work.type为BINDER_WORK_NODE

初始化binder_node->work.entry list链表

初始化binder_node->async_todo list链表

2.5 接着设置全局binder上下文管理者binder实体binder_node(binder_context_mgr_node)的属性

binder_context_mgr_node->local_weak_refs++; //本地弱指针计数++

binder_context_mgr_node->local_strong_refs++; //本地强指针计数++

binder_context_mgr_node->has_strong_ref = 1; //有强指针

binder_context_mgr_node->has_weak_ref = 1; //有弱指针
2.6 跳出switch语句

进入err:

 if (thread)
  thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;

设置binder_thread->looper 去掉BINDER_LOOPER_STATE_NEED_RETURN属性

三. binder_loop(bs, svcmgr_handler)

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    unsigned readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
    
    readbuf[0] = BC_ENTER_LOOPER;	//通知binder驱动已经进入死循环
    binder_write(bs, readbuf, sizeof(unsigned));

    for (;;) {
        bwr.read_size = sizeof(readbuf);	//读取数据的长度
        bwr.read_consumed = 0;
        bwr.read_buffer = (unsigned) readbuf;	//读取的数据
		//循环读取/dev/binder设备,看是否有对service的请求
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);	//ioctl BINDER_WRITE_READ
		
        if (res < 0) {
            LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
        if (res == 0) {
            LOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

3.1 binder_write

设置readbuf[0]为BC_ENDER_LOOPER

int binder_write(struct binder_state *bs, void *data, unsigned len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len;	//设置要写的数据长度
    bwr.write_consumed = 0;
    bwr.write_buffer = (unsigned) data;	//要写的数据
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);	//ioctl命令BINDER_WRITE_READ
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",strerror(errno));
    }
    return res;
}

这里bwr.write_buffer=BC_ENDER_LOOPER命令,bwr.write_size=4

3.1.1 调用ioctl方法,摘取调用到的部分

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)	
{
	int ret;
	struct binder_proc *proc = filp->private_data;	//获取binder_proc
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;	//获取arg参数指针

	//等待队列binder_user_error_wait唤醒,条件是binder_stop_on_user_error错误值小于2
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);	
	if (ret)
		return ret;

	mutex_lock(&binder_lock);	//锁定互斥量 binder_lock
	thread = binder_get_thread(proc);	//获取binder线程
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_WRITE_READ: {		//读写数据
		struct binder_write_read bwr;	//binder读写结构体
		if (size != sizeof(struct binder_write_read)) {	//判断数据完整性
			ret = -EINVAL;
			goto err;
		}
		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {	//复制数据 从用户空间到内核空间
			ret = -EFAULT;
			goto err;
		}
		binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
			     proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,bwr.read_size, bwr.read_buffer);

		if (bwr.write_size > 0) {	//若write_size大于0 写
			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
			if (ret < 0) {
				bwr.read_consumed = 0;	//清除bwr的读销毁数据大小
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))	//复制数据 从内核空间到用户空间
					ret = -EFAULT;
				goto err;
			}
		}
		if (bwr.read_size > 0) {	//若read_size大于0 读
			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
			if (!list_empty(&proc->todo))
				wake_up_interruptible(&proc->wait);
			if (ret < 0) {
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))	//复制数据 从内核空间到用户空间
					ret = -EFAULT;
				goto err;
			}
		}
		binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,bwr.read_consumed, bwr.read_size);
		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
			ret = -EFAULT;
			goto err;
		}
		break;
	}
	...
	}
	ret = 0;
err:
	if (thread)
		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
	mutex_unlock(&binder_lock);	//解锁互斥量 binder_lock
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);	//等待队列binder_user_error_wait唤醒
	if (ret && ret != -ERESTARTSYS)
		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
	return ret;
}

通过filp->private_data获取binder_proc

调用binder_get_thread函数获取binder_thread

进入switch的BINDER_WRITE_READ分支

获取传递进来的bwr结构体

3.1.2 这里write_size>0进入binder_thread_write摘录调用到的部分

int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,void __user *buffer, int size, signed long *consumed)
{
	uint32_t cmd;
	void __user *ptr = buffer + *consumed;	//ptr指向有效数据地址
	void __user *end = buffer + size;		//end指向有效数据尾地址

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user(cmd, (uint32_t __user *)ptr))	//获取命令
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
			binder_stats.bc[_IOC_NR(cmd)]++;
			proc->stats.bc[_IOC_NR(cmd)]++;
			thread->stats.bc[_IOC_NR(cmd)]++;
		}
		switch (cmd) {
		...
		case BC_ENTER_LOOPER:	//进入
			binder_debug(BINDER_DEBUG_THREADS,"binder: %d:%d BC_ENTER_LOOPER\n",proc->pid, thread->pid);
			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {	//判断是否已经注册
				thread->looper |= BINDER_LOOPER_STATE_INVALID;	//错误
				binder_user_error("binder: %d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",proc->pid, thread->pid);
			}
			thread->looper |= BINDER_LOOPER_STATE_ENTERED;	//添加进入标志
			break;
		...
		}
		*consumed = ptr - buffer;
	}
	return 0;
}

这里*ptr=bufer+0,*end=buffer+4 所以,prt<end 而binder_thread默认的return_error为BR_OK所以会进入while循环

ptr+=4

接着调用get_user从用户空间复制命令到内核空间cmd

然后进入switch的BC_ENTER_LOOPER分支

根据looper的值,thread->looper & BINDER_LOOPER_STATE_REGISTERED为假

接着设置binder_thread->looper|=BINDER_LOOPER_STATE_ENTERED,跳出switch,

*consumed=ptr-buffer那么*ptr=ptr(前面ptr+4),所以ptr=end跳出while (while的循环执行次数跟命令的个数一样)

 返回0;

3.1.3 binder_thread_write返回值ret=0

copy_to_user(ubuf, &bwr, sizeof(bwr))复制bwr到用户空间,然后跳出switch

进入err: ...执行完毕

 

四 进入for(;;)死循环

1.ioctl(bs->fd, BINDER_WRITE_READ, &bwr)

        bwr.read_size = sizeof(readbuf);	//读取数据的长度
        bwr.read_consumed = 0;
        bwr.read_buffer = (unsigned) readbuf;	//读取的数据
		//循环读取/dev/binder设备,看是否有对service的请求
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);	//ioctl BINDER_WRITE_READ
调用ioctl,摘取部分代码

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)	
{
	int ret;
	struct binder_proc *proc = filp->private_data;	//获取binder_proc
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);	//获取命令数据大小
	void __user *ubuf = (void __user *)arg;	//获取arg参数指针

	//等待队列binder_user_error_wait唤醒,条件是binder_stop_on_user_error错误值小于2
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);	
	if (ret)
		return ret;

	mutex_lock(&binder_lock);	//锁定互斥量 binder_lock
	thread = binder_get_thread(proc);	//获取binder线程
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
	case BINDER_WRITE_READ: {		//读写数据
		struct binder_write_read bwr;	//binder读写结构体
		if (size != sizeof(struct binder_write_read)) {	//判断数据完整性
			ret = -EINVAL;
			goto err;
		}
		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {	//复制数据 从用户空间到内核空间
			ret = -EFAULT;
			goto err;
		}
		binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
			     proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,bwr.read_size, bwr.read_buffer);

		if (bwr.write_size > 0) {	//若write_size大于0 写
			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
			if (ret < 0) {
				bwr.read_consumed = 0;	//清除bwr的读销毁数据大小
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))	//复制数据 从内核空间到用户空间
					ret = -EFAULT;
				goto err;
			}
		}
		if (bwr.read_size > 0) {	//若read_size大于0 读
			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
			if (!list_empty(&proc->todo))
				wake_up_interruptible(&proc->wait);
			if (ret < 0) {
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))	//复制数据 从内核空间到用户空间
					ret = -EFAULT;
				goto err;
			}
		}
		binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,bwr.read_consumed, bwr.read_size);
		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
			ret = -EFAULT;
			goto err;
		}
		break;
	}
	...
	}
	ret = 0;
err:
	if (thread)
		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
	mutex_unlock(&binder_lock);	//解锁互斥量 binder_lock
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);	//等待队列binder_user_error_wait唤醒
	if (ret && ret != -ERESTARTSYS)
		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
	return ret;
}
首先根据filp-> private_data获取binder_proc,再获取binder_thread

进入switch的BINDER_WRITE_READ,从用户空间复制命令到内核空间

由于write_size=0,read_buffer=32*4>0,所以调用binder_thread_read

static int binder_thread_read(struct binder_proc *proc,struct binder_thread *thread,void  __user *buffer, int size,signed long *consumed, int non_block)
{
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}

retry:
	wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);

	if (thread->return_error != BR_OK && ptr < end) {
		if (thread->return_error2 != BR_OK) {
			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (ptr == end)
				goto done;
			thread->return_error2 = BR_OK;
		}
		if (put_user(thread->return_error, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		thread->return_error = BR_OK;
		goto done;
	}


	thread->looper |= BINDER_LOOPER_STATE_WAITING;
	if (wait_for_proc_work)
		proc->ready_threads++;
	mutex_unlock(&binder_lock);
	if (wait_for_proc_work) {
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) {
			binder_user_error("binder: %d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
						proc->pid, thread->pid, thread->looper);
			wait_event_interruptible(binder_user_error_wait,binder_stop_on_user_error < 2);	//等待队列binder_user_error_wait唤醒
		}
		binder_set_nice(proc->default_priority);	//设置binder->proc的默认优先级
		if (non_block) {
			if (!binder_has_proc_work(proc, thread))
				ret = -EAGAIN;
		} 
		else
			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
	} 
	else {
		if (non_block) {
			if (!binder_has_thread_work(thread))
				ret = -EAGAIN;
		} 
		else
			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
	}
	mutex_lock(&binder_lock);
	if (wait_for_proc_work)
		proc->ready_threads--;
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

	if (ret)
		return ret;

	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;

		if (!list_empty(&thread->todo))
			w = list_first_entry(&thread->todo, struct binder_work, entry);
		else if (!list_empty(&proc->todo) && wait_for_proc_work)
			w = list_first_entry(&proc->todo, struct binder_work, entry);
		else {
			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
				goto retry;
			break;
		}

		if (end - ptr < sizeof(tr) + 4)
			break;

		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			t = container_of(w, struct binder_transaction, work);
		} break;
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			cmd = BR_TRANSACTION_COMPLETE;
			if (put_user(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);

			binder_stat_br(proc, thread, cmd);
			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,"binder: %d:%d BR_TRANSACTION_COMPLETE\n",proc->pid, thread->pid);

			list_del(&w->entry);
			kfree(w);
			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
		} break;
		case BINDER_WORK_NODE: {
			struct binder_node *node = container_of(w, struct binder_node, work);
			uint32_t cmd = BR_NOOP;	//操作完成
			const char *cmd_name;
			int strong = node->internal_strong_refs || node->local_strong_refs;
			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
			if (weak && !node->has_weak_ref) {
				cmd = BR_INCREFS;
				cmd_name = "BR_INCREFS";
				node->has_weak_ref = 1;
				node->pending_weak_ref = 1;
				node->local_weak_refs++;
			} else if (strong && !node->has_strong_ref) {
				cmd = BR_ACQUIRE;
				cmd_name = "BR_ACQUIRE";
				node->has_strong_ref = 1;
				node->pending_strong_ref = 1;
				node->local_strong_refs++;
			} else if (!strong && node->has_strong_ref) {
				cmd = BR_RELEASE;
				cmd_name = "BR_RELEASE";
				node->has_strong_ref = 0;
			} else if (!weak && node->has_weak_ref) {
				cmd = BR_DECREFS;
				cmd_name = "BR_DECREFS";
				node->has_weak_ref = 0;
			}
			if (cmd != BR_NOOP) {
				if (put_user(cmd, (uint32_t __user *)ptr))
					return -EFAULT;
				ptr += sizeof(uint32_t);
				if (put_user(node->ptr, (void * __user *)ptr))
					return -EFAULT;
				ptr += sizeof(void *);
				if (put_user(node->cookie, (void * __user *)ptr))
					return -EFAULT;
				ptr += sizeof(void *);

				binder_stat_br(proc, thread, cmd);
				binder_debug(BINDER_DEBUG_USER_REFS,"binder: %d:%d %s %d u%p c%p\n",
					     proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
			} else {
				list_del_init(&w->entry);
				if (!weak && !strong) {
					binder_debug(BINDER_DEBUG_INTERNAL_REFS,"binder: %d:%d node %d u%p c%p deleted\n",
						     proc->pid, thread->pid, node->debug_id,node->ptr, node->cookie);
					rb_erase(&node->rb_node, &proc->nodes);
					kfree(node);
					binder_stats_deleted(BINDER_STAT_NODE);
				} else {
					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
						     "binder: %d:%d node %d u%p c%p state unchanged\n",
						     proc->pid, thread->pid, node->debug_id, node->ptr,
						     node->cookie);
				}
			}
		} break;
		case BINDER_WORK_DEAD_BINDER:
		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
			struct binder_ref_death *death;
			uint32_t cmd;

			death = container_of(w, struct binder_ref_death, work);
			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
			else
				cmd = BR_DEAD_BINDER;	//线程死亡
			if (put_user(cmd, (uint32_t __user *)ptr))
				return -EFAULT;
			ptr += sizeof(uint32_t);
			if (put_user(death->cookie, (void * __user *)ptr))
				return -EFAULT;
			ptr += sizeof(void *);
			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,"binder: %d:%d %s %p\n",proc->pid, thread->pid,
				      cmd == BR_DEAD_BINDER ?"BR_DEAD_BINDER":"BR_CLEAR_DEATH_NOTIFICATION_DONE",death->cookie);

			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
				list_del(&w->entry);
				kfree(death);
				binder_stats_deleted(BINDER_STAT_DEATH);
			} else
				list_move(&w->entry, &proc->delivered_death);
			if (cmd == BR_DEAD_BINDER)
				goto done; /* DEAD_BINDER notifications can cause transactions */
		} break;
		}

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {
			struct binder_node *target_node = t->buffer->target_node;
			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;
			t->saved_priority = task_nice(current);
			if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY))
				binder_set_nice(t->priority);
			else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority)
				binder_set_nice(target_node->min_priority);
			cmd = BR_TRANSACTION;	//请求
		} else {
			tr.target.ptr = NULL;
			tr.cookie = NULL;
			cmd = BR_REPLY;	//回复
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = t->sender_euid;

		if (t->from) {
			struct task_struct *sender = t->from->proc->tsk;
			tr.sender_pid = task_tgid_nr_ns(sender,current->nsproxy->pid_ns);
		} else {
			tr.sender_pid = 0;
		}

		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
		tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size,sizeof(void *));

		if (put_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (copy_to_user(ptr, &tr, sizeof(tr)))
			return -EFAULT;
		ptr += sizeof(tr);

		binder_stat_br(proc, thread, cmd);
		binder_debug(BINDER_DEBUG_TRANSACTION,"binder: %d:%d %s %d %d:%d, cmd %dsize %zd-%zd ptr %p-%p\n",
			     proc->pid, thread->pid,(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :"BR_REPLY",
			     t->debug_id, t->from ? t->from->proc->pid : 0,t->from ? t->from->pid : 0, cmd,
			     t->buffer->data_size, t->buffer->offsets_size,tr.data.ptr.buffer, tr.data.ptr.offsets);

		list_del(&t->work.entry);
		t->buffer->allow_user_free = 1;
		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
			t->to_parent = thread->transaction_stack;
			t->to_thread = thread;
			thread->transaction_stack = t;
		} else {
			t->buffer->transaction = NULL;
			kfree(t);
			binder_stats_deleted(BINDER_STAT_TRANSACTION);
		}
		break;
	}

done:

	*consumed = ptr - buffer;
	if (proc->requested_threads + proc->ready_threads == 0 &&
	    proc->requested_threads_started < proc->max_threads &&
	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
	     /*spawn a new thread if we leave this out */) {
		proc->requested_threads++;
		binder_debug(BINDER_DEBUG_THREADS,"binder: %d:%d BR_SPAWN_LOOPER\n",proc->pid, thread->pid);
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
			return -EFAULT;
	}
	return 0;
}
这里read_consumed = 0,*ptr = buffer,*end = buffer + 32*4

进入if (*consumed == 0) ,将BR_NOOP放置ptr, ptr+=4

wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo)=1

if (thread->return_error != BR_OK && ptr < end),默认的binder_thread->return_error=BR_OK所以条件为假

binder_thread->looper |= BINDER_LOOPER_STATE_WAITING,设置线程状态

binder_proc->ready_threads++

进入if (wait_for_proc_work),接着进入if (non_block).

这里non_block=filp->f_flags & O_NONBLOCK既open设备时候的标志O_RDWR,所以non_block=0

wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)),进入等待

条件判断binder_has_proc_work

static int binder_has_proc_work(struct binder_proc *proc,struct binder_thread *thread)
{	//binder_proc->todo链表是为空||binder_thread->looper设置了BINDER_LOOPER_STATE_NEED_RETURN属性
	return !list_empty(&proc->todo) ||(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
}
程序卡在这里等待唤醒


未完...




你可能感兴趣的:(android,Binder,ServiceManager,设备驱动,管理者)