Service Manager进程,发送BC_REPLY,唤醒FregServer进程,返回BR_TRANSACTION_COMPLETE,睡眠等待在proc->wait

本文参考《Android系统源代码情景分析》,作者罗升阳

一、service manager代码:

       ~/Android/frameworks/base/cmd/servicemanager
       ----binder.c
       ----service_manager.c
       ----binder.h


        驱动层代码:

       ~/Android//kernel/goldfish/drivers/staging/android

       ----binder.c

       ----binder.h


二、源码分析

       从Android Binder进程间通信---Service Manager进程,处理BC_TRANSACTION,返回BR_TRANSACTIONhttp://blog.csdn.net/jltxgcy/article/details/26151113,我们已经知道Service Manager成功地将一个Service组件注册到内部的Service组件列表所svclist中之后,接着就会调用函数binder_send_reply将Service组件注册结果返回给Binder驱动程序,Binder驱动程序再将该结果返回给请求注册Service组件的进程。

      ~/Android/frameworks/base/cmd/servicemanager

       ----binder.c

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       void *buffer_to_free,
                       int status)  //status为0,注册成功代码0写入binder_io结构体reply中
{
    struct {
        uint32_t cmd_free;
        void *buffer;
        uint32_t cmd_reply;
        struct binder_txn txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;//BC_FREE_BUFFER后面跟的通信数据是一个内核缓冲区的用户空间地址
    data.buffer = buffer_to_free;//一个用户空间地址,指向一块用来传输进程间通信数据的内核缓冲区
    data.cmd_reply = BC_REPLY;//BC_REPLY后面跟的通信数据是一个binder_transaction_data结构体,即一个binder_txn结构体
    data.txn.target = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {//status为0
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offs_size = 0;
        data.txn.data = &status;
        data.txn.offs = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;//0的大小,因为做为do_add_service成功,reply结构体放入0
        data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);//0
        data.txn.data = reply->data0;//指向了0
        data.txn.offs = reply->offs0;//无
    }
    binder_write(bs, &data, sizeof(data));
}
       首先定义了一个匿名结构体data,用来描述一个BC_FREE_BUFFER和一个BC_REPLY命令协议,分别用成员变量cmd_free和cmd_reply来表示。命令协议BC_FREE_BUFFER后面跟的通信数据是一个内核缓冲区的用户空间地址,它就保存在成员变量buffer中;而命令协议BC_REPLY后面跟的通信数据是一个binder_transaction_data结构体,即一个binder_txn结构体,它就保存在成员变量txn中。

       然后调用binder_write将匿名结构体data中BC_FREE_BUFFER和BC_REPLY命令协议发送给Binder驱动程序。实现如下:

      ~/Android/frameworks/base/cmd/servicemanager

       ----binder.c

int binder_write(struct binder_state *bs, void *data, unsigned len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (unsigned) data;//匿名结构体data指针
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}
      函数binder_write是通过IO控制命令BINDER_WRITE_READ来将 BC_FREE_BUFFER和BC_REPLY命令协议发送给Binder驱动程序的,映射到驱动程序binder_thread_write。

       ~/Android//kernel/goldfish/drivers/staging/android

       ----binder.c

int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
		    void __user *buffer, int size, signed long *consumed)
{
	uint32_t cmd;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
                ......
		case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))//上面刚提到的binder_txn结构体data.txn
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);//tr为上面已经赋值的data.txn
			break;
		}
                ........
		default:
			printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd);
			return -EINVAL;
		}
		*consumed = ptr - buffer;
	}
	return 0;
}
      我们暂时不分析BC_FREE_BUFFER命令,只分析BC_REPLY,while第二次循环会执行到这里。
      tr就是上面已经赋值的data.txn。然后调用binder_transaction函数,实现如下:

      ~/Android//kernel/goldfish/drivers/staging/android

      ----binder.c

static void
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
	struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	......
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	........
	uint32_t return_error;

	........

	if (reply) {
		in_reply_to = thread->transaction_stack;//首先从线程thread的事务堆栈中将该binder_transaction结构体取出来,并且保存在变量in_reply_to中
		if (in_reply_to == NULL) {
			......
			return_error = BR_FAILED_REPLY;
			goto err_empty_call_stack;
		}
		binder_set_nice(in_reply_to->saved_priority);
		if (in_reply_to->to_thread != thread) {//在上节中刚设置的
			........
			return_error = BR_FAILED_REPLY;
			in_reply_to = NULL;
			goto err_bad_call_stack;
		}
		thread->transaction_stack = in_reply_to->to_parent;//Server Manager进程的主线程transaction_stack为NULL
		target_thread = in_reply_to->from;//找到目标线程
		if (target_thread == NULL) {
			return_error = BR_DEAD_REPLY;
			goto err_dead_binder;
		}
		if (target_thread->transaction_stack != in_reply_to) {//FregServer进程的主线程的transation_stack就是这个in_reply_to
			.........
			return_error = BR_FAILED_REPLY;
			in_reply_to = NULL;
			target_thread = NULL;
			goto err_dead_binder;
		}
		target_proc = target_thread->proc;//找到了目标进程
	} else {
		........
	}
	if (target_thread) {
		.........
		target_list = &target_thread->todo;//分别将它的todo队列和wait等待队列作为目标todo队列target_list和目标wait等待队列target_wait
		target_wait = &target_thread->wait;//分别将它的todo队列和wait等待队列作为目标todo队列target_list和目标wait等待队列target_wait
	} else {
		.........
	}
	.........

	/* TODO: reuse incoming transaction for reply */
	t = kzalloc(sizeof(*t), GFP_KERNEL);//分配了binder_transaction结构体
	........

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);//分配了binder_work结构体
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
	}
	.......

	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;//service_manager的主线程
	else
		t->from = NULL;
	t->sender_euid = proc->tsk->cred->euid;//service_manager进程号
	t->to_proc = target_proc;//目标进程
	t->to_thread = target_thread;//目标线程
	t->code = tr->code;//0
	t->flags = tr->flags;//0
	t->priority = task_nice(current);
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));//分配了binder_buffer结构体
	if (t->buffer == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;//不允许释放
	.......
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;//NULL
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);//增加目标Binder实体对象的强引用计数

	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));//偏移数组在data中起始位置,位于数据缓冲区之后

	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {//数据缓冲区拷贝到data中
		binder_user_error("binder: %d:%d got transaction with invalid "
			"data ptr\n", proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {//偏移数组拷贝到data中,偏移数组位于数据缓冲区之后
		binder_user_error("binder: %d:%d got transaction with invalid "
			"offsets ptr\n", proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
		binder_user_error("binder: %d:%d got transaction with "
			"invalid offsets size, %zd\n",
			proc->pid, thread->pid, tr->offsets_size);
		return_error = BR_FAILED_REPLY;
		goto err_bad_offset;
	}
	off_end = (void *)offp + tr->offsets_size;
	for (; offp < off_end; offp++) {//偏移数组里面没有内容
		.....
	}
	if (reply) {
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction(target_thread, in_reply_to);//FregServer进程的主线程thread->transaction_stack为NULL
	} else if (!(t->flags & TF_ONE_WAY)) {
		.........
	} else {
		.........
	}
	t->work.type = BINDER_WORK_TRANSACTION;
	list_add_tail(&t->work.entry, target_list);//加入到目标线程的todo
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);//加入到本线程的todo
	if (target_wait)
		wake_up_interruptible(target_wait);//唤醒目标线程
	return;
}
       当Binder驱动程序分发一个进程间通信请求给一个线程处理时,就会将一个binder_transaction结构体压入到它的事务堆栈中,因此首先从线程thread的事务堆栈中将该binder_transaction结构体取出来,并且保存在变量in_reply_to中。

       binder_transaction结构体in_reply_to成员变量from指向了之前请求与thread进行进程间通信的线程,因此紧接着获取了目标线程target_thread。

       找到目标线程target_thread之后,分别将它的todo队列和wait等待队列作为目标todo队列target_list和目标wait等待队列target_wait。
       然后使用初始化binder_transaction结构体t,加入到目标线程的todo。又初始化了binder_work结构体,加入到本线程(service_manager主线程)的todo队列。最后唤醒目标线程。

       我们假设本线程继续执行,执行完毕后再执行被唤醒的目标线程。

       service_manager主线程继续执行,执行完binder_transaction,一层一层的返回,最终返回到binder_loop中,继续执行for循环,ioctl映射到binder_ioctl,由于只有read_size大于0,所以执行binder_thread_read,实现如下:

      ~/Android//kernel/goldfish/drivers/staging/android

      ----binder.c

static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
	void  __user *buffer, int size, signed long *consumed, int non_block)
{
	void __user *ptr = buffer + *consumed;//起始位置
	void __user *end = buffer + size;//结束位置

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))//BR_NOOP存入刚才的局部变量中
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}

retry:
	wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);//wait_for_proc_work目前为0,表示线程有要处理的任务

	if (thread->return_error != BR_OK && ptr < end) {
		..........
	}


	thread->looper |= BINDER_LOOPER_STATE_WAITING;//looper为BINDER_LOOPER_STATE_ENTERED,BINDER_LOOPER_STATE_WAITING
	if (wait_for_proc_work)//为0
		proc->ready_threads++;
	mutex_unlock(&binder_lock);
	if (wait_for_proc_work) {//为0
	         ........
	} else {
		if (non_block) {//非阻塞要立刻返回处理结果
			if (!binder_has_thread_work(thread))有任务就接下往下执行,没有任务就返回
				ret = -EAGAIN;
		} else
			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));//有任务不睡眠,继续往下执行
	}
	mutex_lock(&binder_lock);
	if (wait_for_proc_work)//为0
		proc->ready_threads--;
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;//looper为BINDER_LOOPER_STATE_ENTERED

	if (ret)
		return ret;

	while (1) {
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			cmd = BR_TRANSACTION_COMPLETE;
			if (put_user(cmd, (uint32_t __user *)ptr))//将一个BR_TRANSACTION_COMPLETE返回协议写入到用户提供的缓冲区。
				return -EFAULT;
			ptr += sizeof(uint32_t);


			binder_stat_br(proc, thread, cmd);
			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
				printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
				       proc->pid, thread->pid);


			list_del(&w->entry);//删除todo上的工作项
			kfree(w);//释放结构体
			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
		} break;
	}

done:

	*consumed = ptr - buffer;//消耗的大小
	..........
	return 0;
}

     执行完binder_thread_read,返回binder_ioctl,最后返回binder_loop函数,开始执行binder_parse,实现如下:

      ~/Android/frameworks/base/cmd/servicemanager

       ----binder.c

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uint32_t *ptr, uint32_t size, binder_handler func)//ptr为BR_TRANSACTION_COMPLETE的指针,size为它的大小
{
    int r = 1;
    uint32_t *end = ptr + (size / 4);

    while (ptr < end) {
        uint32_t cmd = *ptr++;
        .......
        switch(cmd) {//cmd为BR_TRANSACTION_COMPLETE
        ......
         case BR_TRANSACTION_COMPLETE:
            break;
        ......}
    }

    return r;
}
     执行完binder_parse后,继续执行binder_loop的for循环,又一次睡眠等待直到其所属的进程有新的未处理项为止,停留在下面的代码:

wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));//睡眠等待直到其所属的进程有新的未处理项为止

你可能感兴趣的:(Service Manager进程,发送BC_REPLY,唤醒FregServer进程,返回BR_TRANSACTION_COMPLETE,睡眠等待在proc->wait)