Binder驱动

Binder驱动注册

注册驱动的结构体

static struct miscdevice binder_miscdev = {
    .minor = MISC_DYNAMIC_MINOR, //次设备号 动态分配
    .name = "binder",     //设备名
    .fops = &binder_fops  //设备的文件操作结构,这是file_operations结构
};

其中.fops = &binder_fops此结构体对应不同的操作Binder

Binder驱动_第1张图片
image.png

static const struct file_operations binder_fops = {
    .owner = THIS_MODULE,
    .poll = binder_poll,
    .unlocked_ioctl = binder_ioctl,
    .compat_ioctl = binder_ioctl,
    .mmap = binder_mmap,
    .open = binder_open,
    .flush = binder_flush,
    .release = binder_release,
};

Binder驱动打开

static int binder_open(struct inode *nodp, struct file *filp)
{
    struct binder_proc *proc; // binder进程 

    proc = kzalloc(sizeof(*proc), GFP_KERNEL); // 为binder_proc结构体在分配kernel内存空间
    if (proc == NULL)
        return -ENOMEM;
    get_task_struct(current);
    proc->tsk = current;   //将当前线程的task保存到binder进程的tsk
    INIT_LIST_HEAD(&proc->todo); //初始化todo列表
    init_waitqueue_head(&proc->wait); //初始化wait队列
    proc->default_priority = task_nice(current);  //将当前进程的nice值转换为进程优先级

    binder_lock(__func__);   //同步锁,因为binder支持多线程访问
    binder_stats_created(BINDER_STAT_PROC); //BINDER_PROC对象创建数加1
    hlist_add_head(&proc->proc_node, &binder_procs); //将proc_node节点添加到binder_procs为表头的队列
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death); //初始化已分发的死亡通知列表
    filp->private_data = proc;       //file文件指针的private_data变量指向binder_proc数据
    binder_unlock(__func__); //释放同步锁

    return 0;
}

生成binder_proc,并且进行一些初始化操作

  • 初始化todo链表
  • 设置优先级
  • 将proc_node添加到binder_procs表头,所以 binder_procs是一个链表连接着多个进程节点

Binder分配空间

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    int ret;
    struct vm_struct *area; //内核虚拟空间
    struct binder_proc *proc = filp->private_data;
    const char *failure_string;
    struct binder_buffer *buffer;  

    if (proc->tsk != current)
        return -EINVAL;

在binder_open时候filp->private_data = proc;

    if ((vma->vm_end - vma->vm_start) > SZ_4M)
        vma->vm_end = vma->vm_start + SZ_4M;  //保证映射内存大小不超过4M
    mutex_lock(&binder_mmap_lock);  //同步锁
    //分配一个连续的内核虚拟空间,与进程虚拟空间大小一致
    area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
    if (area == NULL) {
        ret = -ENOMEM;
        failure_string = "get_vm_area";
        goto err_get_vm_area_failed;
    }
    proc->buffer = area->addr; //指向内核虚拟空间的地址
    //地址偏移量 = 用户虚拟地址空间 - 内核虚拟地址空间
    proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
    mutex_unlock(&binder_mmap_lock); //释放锁
  • proc->buffer = area->addr; //指向内核虚拟空间的地址
  • proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
    经过这两个步骤proc中就可以轻松通过这两个变量计算出分配的内核地址空间与用户虚拟空间的地址。
    ...
    //分配物理页的指针数组,数组大小为vma的等效page个数;
    proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
    if (proc->pages == NULL) {
        ret = -ENOMEM;
        failure_string = "alloc page array";
        goto err_alloc_pages_failed;
    }
    proc->buffer_size = vma->vm_end - vma->vm_start;

    vma->vm_ops = &binder_vm_ops;
    vma->vm_private_data = proc;

    //分配物理页面,同时映射到内核空间和进程空间,先分配1个物理页
    if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
        ret = -ENOMEM;
        failure_string = "alloc small buf";
        goto err_alloc_small_buf_failed;
    }

分配物理空间并且映射到虚拟地址中.

    buffer = proc->buffer; //binder_buffer对象 指向proc的虚拟内核空间地址
    INIT_LIST_HEAD(&proc->buffers); //创建进程的buffers链表头
    list_add(&buffer->entry, &proc->buffers); //将binder_buffer地址 加入到所属进程的buffers队列
    buffer->free = 1;
    //将空闲buffer放入proc->free_buffers中
    binder_insert_free_buffer(proc, buffer);
    //异步可用空间大小为buffer总大小的一半。
    proc->free_async_space = proc->buffer_size / 2;

由于刚生成buffer,所以添加到空闲列表中(proc->free_buffers中),将这块binder_buffer加入到进程的buffers队列中。

    barrier();
    proc->files = get_files_struct(current);
    proc->vma = vma;
    proc->vma_vm_mm = vma->vm_mm;
    return 0;

    ...// 错误flags跳转处,free释放内存之类的操作
    return ret;
}

在内核中分配物理虚拟地址这块是加锁的,所以每次保证只有一个进程分配空间。


Binder驱动_第2张图片
image.png

总的来说就是在这个进程中分类了Binder需要的物理空间并且映射到虚拟空间中然后通过proc->free_async_space,proc->buffer,proc->pages等这些进行管理。

Binder驱动_第3张图片
image.png

Binder通信binder_ioctl()

binder_ioctl()函数负责在两个进程间收发IPC数据和IPC reply数据。

ioctl(文件描述符,ioctl命令,数据类型)

ioctl对应参数含义:

  • 文件描述符:binder_open之后的返回值
  • 控制进程交互的一些命令
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;  // binder线程
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    //进入休眠状态,直到中断唤醒
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

持有当前进程的描述proc,一直处于休眠状态等待被唤醒。

    binder_lock(__func__);
    //获取binder_thread
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:  //进行binder的读写操作
        ret = binder_ioctl_write_read(filp, cmd, arg, thread); 
        if (ret)
            goto err;
        break;

进行binder数据的读写,也就是跨进程数据的交互

    case BINDER_SET_MAX_THREADS: //设置binder最大支持的线程数
        if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        break;

从用户空间得到数据设置binder线程最大数

    case BINDER_SET_CONTEXT_MGR: //成为binder的上下文管理者,也就是ServiceManager成为守护进程
        ret = binder_ioctl_set_ctx_mgr(filp);
        if (ret)
            goto err;
        break;
    case BINDER_THREAD_EXIT:   //当binder线程退出,释放binder线程
        binder_free_thread(proc, thread);
        thread = NULL;
        break;

释放binder线程

    case BINDER_VERSION: {  //获取binder的版本号
        struct binder_version __user *ver = ubuf;

        if (size != sizeof(struct binder_version)) {
            ret = -EINVAL;
            goto err;
        }
        if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                 &ver->protocol_version)) {
            ret = -EINVAL;
            goto err;
        }
        break;
    }
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    binder_unlock(__func__);
//进入等待被唤醒状态
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);

err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

同时注意的点是执行命令是加锁操作

其中核心是BINDER_WRITE_READ命令,也就是对应的binder_ioctl_write_read(filp, cmd, arg, thread);

Binder如何进行数据交换

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;用户空间的数据
    struct binder_write_read bwr;//很重要的结构体
Binder驱动_第4张图片
重要结构体得重要讲解.png

flat_binder_object结构体代表Binder对象在两个进程间传递的直接结构。

类型 成员变量 解释
__u32 type 类型
__u32 flags 记录优先级、文件描述符许可
binder_uintptr_t binder (union)当传递的是binder_node时使用,指向binder_node在应用程序的地址
__u32 handle (union)当传递的是binder_ref时使用,存放Binder在进程中的引用号
binder_uintptr_t cookie 只对binder_node有效,存放binder_node的额外数据

其中写是BC_XX操作,是进程给驱动写,读是BR_XX是驱动回复进程。

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { //把用户空间数据ubuf拷贝到bwr
        ret = -EFAULT;
        goto out;
    }

先把用户空间的数据拷贝到bwr中。

    if (bwr.write_size > 0) {
        //当写缓存中有数据,则执行binder写操作
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) { //当写失败,再将bwr数据写回用户空间,并返回
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }

如果用户空间需要写数据,则执行binder_thread_write写。

    if (bwr.read_size > 0) {
        //当读缓存中有数据,则执行binder读操作
        ret = binder_thread_read(proc, thread,
                      bwr.read_buffer, bwr.read_size, &bwr.read_consumed,
                      filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait); //唤醒等待状态的线程
        if (ret < 0) { //当读失败,再将bwr数据写回用户空间,并返回
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }

写完了看是不是要读数据,如果bwr缓存里面有数据要读,则执行binder_thread_read

    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { //将内核数据bwr拷贝到用户空间ubuf
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

最后将内核已经写好的数据拷贝到用户空间。

Binder驱动_第5张图片
image.png

常见命令使用情况

  • BINDER_WRITE_READ
    • IPC.talkWithDriver
  • BINDER_SET_CONTEXT_MGR
    • servicemanager进程成为上下文管理者,binder_become_context_manager()
  • BINDER_SET_MAX_THREADS
    • 初始化ProcessState对象,open_driver()
    • 主动调整参数,ProcessState.setThreadPoolMaxThreadCount()
  • BINDER_VERSION
    • 初始化ProcessState对象,open_driver()

具体传输数据核心(写)

binder_thread_write(){
    while (ptr < end && thread->return_error == BR_OK) {
        get_user(cmd, (uint32_t __user *)ptr);//获取IPC数据中的Binder协议(BC码)
        switch (cmd) {
            case BC_INCREFS: ...
            case BC_ACQUIRE: ...
            case BC_RELEASE: ...
            case BC_DECREFS: ...
            case BC_INCREFS_DONE: ...
            case BC_ACQUIRE_DONE: ...
            case BC_FREE_BUFFER: ... break;
            case BC_TRANSACTION:
            case BC_REPLY: {
                struct binder_transaction_data tr;
                copy_from_user(&tr, ptr, sizeof(tr)); //拷贝用户空间tr到内核
                binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
                break;

数据通信的核心方法,注意cmd==BC_REPLY

            case BC_REGISTER_LOOPER: ...
            case BC_ENTER_LOOPER: ...
            case BC_EXIT_LOOPER: ...
            case BC_REQUEST_DEATH_NOTIFICATION: ...
            case BC_CLEAR_DEATH_NOTIFICATION:  ...
            case BC_DEAD_BINDER_DONE: ...
            }
        }
    }
}
static void binder_transaction(struct binder_proc *proc,
               struct binder_thread *thread,
               struct binder_transaction_data *tr, int reply){
    //根据各种判定,获取以下信息:
    struct binder_thread *target_thread; //目标线程
    struct binder_proc *target_proc;    //目标进程
    struct binder_node *target_node;    //目标binder节点
    struct list_head *target_list;      //目标TODO队列
    wait_queue_head_t *target_wait;     //目标等待队列
    ...
    //分配两个结构体内存
    struct binder_transaction *t = kzalloc(sizeof(*t), GFP_KERNEL);
    struct binder_work *tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    //从target_proc分配一块buffer
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,

目标进程中分配内存

    for (; offp < off_end; offp++) {
        switch (fp->type) {
        case BINDER_TYPE_BINDER: ...
        case BINDER_TYPE_WEAK_BINDER: ...
        case BINDER_TYPE_HANDLE: ...
        case BINDER_TYPE_WEAK_HANDLE: ...
        case BINDER_TYPE_FD: ...
        }
    }
    //向目标进程的target_list添加BINDER_WORK_TRANSACTION事务
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);
    //向当前线程的todo队列添加BINDER_WORK_TRANSACTION_COMPLETE事务
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
}

整个过程是:
handle--> ref--> target_node--> target_proc

具体传输数据(读)

binder_thread_read(){
    wait_for_proc_work = thread->transaction_stack == NULL &&
            list_empty(&thread->todo);
    //根据wait_for_proc_work来决定wait在当前线程还是进程的等待队列
    if (wait_for_proc_work) {
        ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
        ...
    } else {
        ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
        ...
    }
    
    while (1) {
        //当&thread->todo和&proc->todo都为空时,goto到retry标志处,否则往下执行:
        struct binder_transaction_data tr;
        struct binder_transaction *t = NULL;
        switch (w->type) {
          case BINDER_WORK_TRANSACTION: ...
          case BINDER_WORK_TRANSACTION_COMPLETE: ...
          case BINDER_WORK_NODE: ...
          case BINDER_WORK_DEAD_BINDER: ...
          case BINDER_WORK_DEAD_BINDER_AND_CLEAR: ...
          case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: ...
        }
        ...
    }
done:
    *consumed = ptr - buffer;
    //当满足请求线程加已准备线程数等于0,已启动线程数小于最大线程数(15),
    //且looper状态为已注册或已进入时创建新的线程。
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED))) {
        proc->requested_threads++;
        // 生成BR_SPAWN_LOOPER命令,用于创建新的线程
        put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer);
    }
    return 0;
}

读写交互

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags){
    //把data,handle等填充入binder_transaction_data,在binder_transaction_data前面加入BC_TRANSACTION,写到Parcel mOut中。
    err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
waitForResponse(reply);
}

writeTransactionData将数据都写入了mOut这些数据包括:指向AMS的handle,code,flags,和很重要的BC_TRANSACTION

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult{
while (1) {
    if ((err=talkWithDriver()) < NO_ERROR) break;  

talkWithDriver是起步传输的核心

        cmd = mIn.readInt32();
        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            //只有当不需要reply, 也就是oneway时 才会跳出循环,否则还需要等待.
            if (!reply && !acquireResult) goto finish; break;

        case BR_DEAD_REPLY:
            err = DEAD_OBJECT;         goto finish;
        case BR_FAILED_REPLY:
            err = FAILED_TRANSACTION;  goto finish;
        case BR_REPLY: ...             goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
}
}
//mOut有数据,mIn还没有数据。doReceive默认值为true
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;
    bwr.write_buffer = (uintptr_t)mOut.data();
    if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
}
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    switch (cmd) {
        case BINDER_WRITE_READ:
            ret = binder_ioctl_write_read(filp, cmd, arg, thread);
            break;
    }
    return ret;
}
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    if (bwr.write_size > 0) {
        //将数据放入目标进程
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
    }
    if (bwr.read_size > 0) {
        //读取自己队列的数据 
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
             bwr.read_size,
             &bwr.read_consumed,
             filp->f_flags & O_NONBLOCK);
        }
    }
}
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    while (ptr < end && thread->return_error == BR_OK) {
        //拷贝用户空间的cmd命令,此时为BC_TRANSACTION
        //这里是IPCThreadState::transact的时候给mOut写入的命令
        if (get_user(cmd, (uint32_t __user *)ptr)) -EFAULT;
        ptr += sizeof(uint32_t);
        switch (cmd) {
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;
            //拷贝用户空间的binder_transaction_data
            if (copy_from_user(&tr, ptr, sizeof(tr)))   return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }
        ...
    }
  }
  return 0;
}

此时传递给binder_binder_transaction是BC_REPLY

这个方法中处理对对方进程的处理其中目标查询:
handle -> binder_ref -> binder_node -> binder_proc

未完成待续....

你可能感兴趣的:(Binder驱动)