Binder 驱动启动

Android知识总结

一、简介

用户空间中 binder_open(), binder_mmap(), binder_ioctl() 这些方法通过 system call 来调用内核空间 Binder 驱动中的方法。内核空间与用户空间共享内存通过 copy_from_user(), copy_to_user() 内核方法来完成用户空间与内核空间内存的数据传输。 Binder驱动中有一个全局的 binder_procs 链表保存了服务端的进程信息。

1.1、用户空间/内核空间

  • Kernel space 是 Linux 内核的运行空间,User space 是用户程序的运行空间。 为了安全,它们是隔离的,即使用户的程序崩溃了,内核也不受影响。

  • Kernel space 可以执行任意命令,调用系统的一切资源; User space 只能执行简单的运算,不能直接调用系统资源,必须通过系统接口(又称 system call),才能向内核发出指令。

二、binder_init

  • Kconfig 配置文件
menu "Android"

config ANDROID
    bool "Android Drivers"
    ---help---
      Enable support for various drivers needed on the Android platform

if ANDROID

config ANDROID_BINDER_IPC
    bool "Android Binder IPC Driver"
    depends on MMU
    default n
    ---help---
      Binder is used in Android for both communication between processes,
      and remote method invocation.

      This means one Android process can call a method/routine in another
      Android process, using Binder to identify, invoke and pass arguments
      between said processes.

//binder 配置信息 
config ANDROID_BINDER_DEVICES
    string "Android Binder devices"
    depends on ANDROID_BINDER_IPC
    default "binder"
    ---help---
      Default value for the binder.devices parameter.

      The binder.devices parameter is a comma-separated list of strings
      that specifies the names of the binder device nodes that will be
      created. Each binder device has its own context manager, and is
      therefore logically separated from the other devices.

config ANDROID_BINDER_IPC_32BIT
    bool
    depends on !64BIT && ANDROID_BINDER_IPC
    default y
    ---help---
      The Binder API has been changed to support both 32 and 64bit
      applications in a mixed environment.

      Enable this to support an old 32-bit Android user-space (v4.4 and
      earlier).

      Note that enabling this will break newer Android user-space.

endif # if ANDROID

endmenu

kernel/drivers/staging/android/binder.c

//结构体中记录把 native层的方法对应驱动层的方法
static const struct file_operations binder_fops = {
        //native层 <==> 驱动层
    .owner = THIS_MODULE,
    .poll = binder_poll,
    .unlocked_ioctl = binder_ioctl,
    .compat_ioctl = binder_ioctl,
    .mmap = binder_mmap,
    .open = binder_open,
    .flush = binder_flush,
    .release = binder_release,
};
static int __init init_binder_device(const char *name){
    int ret;
    struct binder_device *binder_device;
    //为binder设备分配内存
    binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
    //初始化设备
    // 设备的文件操作结构,这是file_operations结构
    binder_device->miscdev.fops = &binder_fops; 
    // 次设备号 动态分配
    binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; 
    // 设备名,"binder"
    binder_device->miscdev.name = name; 
    //INVALID_UID是无效的,主要保存AMS传过来的 UID
    binder_device->context.binder_context_mgr_uid = INVALID_UID;
    binder_device->context.name = name;
    //misc驱动注册
    ret = misc_register(&binder_device->miscdev);
    //将hlist节点添加到binder_devices为表头的设备链表
    hlist_add_head(&binder_device->hlist, &binder_devices);
    return ret;
}

2.1、binder_init

//设备驱动入口函数
static int __init binder_init(void)
{
    int ret;
    char *device_name, *device_names;
    struct binder_device *device;
    struct hlist_node *tmp;
    //创建名为 binder 的单线程的工作队列
    binder_deferred_workqueue = create_singlethread_workqueue("binder");
    if (!binder_deferred_workqueue)
        return -ENOMEM;

    binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
    if (binder_debugfs_dir_entry_root)
        binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
                         binder_debugfs_dir_entry_root);

    if (binder_debugfs_dir_entry_root) {
        debugfs_create_file("state",
                    S_IRUGO,
                    binder_debugfs_dir_entry_root,
                    NULL,
                    &binder_state_fops);
        debugfs_create_file("stats",
                    S_IRUGO,
                    binder_debugfs_dir_entry_root,
                    NULL,
                    &binder_stats_fops);
        debugfs_create_file("transactions",
                    S_IRUGO,
                    binder_debugfs_dir_entry_root,
                    NULL,
                    &binder_transactions_fops);
        debugfs_create_file("transaction_log",
                    S_IRUGO,
                    binder_debugfs_dir_entry_root,
                    &binder_transaction_log,
                    &binder_transaction_log_fops);
        debugfs_create_file("failed_transaction_log",
                    S_IRUGO,
                    binder_debugfs_dir_entry_root,
                    &binder_transaction_log_failed,
                    &binder_transaction_log_fops);
    }

    //为binder设备配置信息分配内存
    device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
    if (!device_names) {
        ret = -ENOMEM;
        goto err_alloc_device_names_failed;
    }
    //从配置文件读取信息
    strcpy(device_names, binder_devices_param);

    while ((device_name = strsep(&device_names, ","))) {
        ret = init_binder_device(device_name);
        if (ret)
            goto err_init_binder_device_failed;
    }
    return ret;
err_init_binder_device_failed:
    hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
        misc_deregister(&device->miscdev);
        hlist_del(&device->hlist);
        kfree(device);
    }
err_alloc_device_names_failed:
    debugfs_remove_recursive(binder_debugfs_dir_entry_root);

    destroy_workqueue(binder_deferred_workqueue);

    return ret;
}

binder_init 执行过程

  • 1、分配内存
  • 2、初始化设备
  • 3、放入链表 binder_devices

2.2、binder_open

kernel/drivers/staging/android/binder.c

static int binder_open(struct inode *nodp, struct file *filp)
{
    struct binder_proc *proc;
    struct binder_device *binder_dev;

    binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
             current->group_leader->pid, current->pid);
    //为binder_proc结构体在kernel分配内存空间
    proc = kzalloc(sizeof(*proc), GFP_KERNEL);
    if (proc == NULL)
        return -ENOMEM;
    //将当前线程的task保存到binder进程的tsk
    get_task_struct(current);
    proc->tsk = current;
    // 初始化todo列表
    INIT_LIST_HEAD(&proc->todo); 
    // 初始化wait队列
    init_waitqueue_head(&proc->wait);
    // 将当前进程的nice值转换为进程优先级
    proc->default_priority = task_nice(current);
    binder_dev = container_of(filp->private_data, struct binder_device,
                  miscdev);
    proc->context = &binder_dev->context;
    //  同步锁,因为binder支持多线程访问
    binder_lock(__func__);
    // binder_proc对象创建数加1
    binder_stats_created(BINDER_STAT_PROC);
    // 将proc_node节点添加到 binder_procs的队列头部
    hlist_add_head(&proc->proc_node, &binder_procs);
    // 进程pid
    proc->pid = current->group_leader->pid;
    // 初始化已分发的死亡通知列表
    INIT_LIST_HEAD(&proc->delivered_death);
    // 将这个binder_proc与filp关联起来,这样下次通过filp就能找到这个proc了
    filp->private_data = proc;
    // 释放同步锁
    binder_unlock(__func__);

    if (binder_debugfs_dir_entry_proc) {
        char strbuf[11];

        snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
    
        proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
            binder_debugfs_dir_entry_proc,
            (void *)(unsigned long)proc->pid,
            &binder_proc_fops);
    }

    return 0;
}

binder_open 执行过程

  • 1、 创建binder_proc对象
  • 2、当前进程信息保存到 proc
  • 3、proc 保存到 private_data,filp->private_data = proc
  • 4、添加到binder_procs链表中

2.3、binder_mmap

mmap 可以将虚拟内存和指定的物理内存关联起来。

kernel/drivers/staging/android/binder.c

//vma 进程的虚拟内存,内存:1M-8K
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    int ret;
    //内核的虚拟内存
    struct vm_struct *area;
    struct binder_proc *proc = filp->private_data;
    const char *failure_string;
    struct binder_buffer *buffer;

    if (proc->tsk != current)
        return -EINVAL;
    //保证映射内存大小不超过4M,实际受binder的影响,内存:1M-8K
    if ((vma->vm_end - vma->vm_start) > SZ_4M)
        vma->vm_end = vma->vm_start + SZ_4M;

    binder_debug(BINDER_DEBUG_OPEN_CLOSE,
             "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
             proc->pid, vma->vm_start, vma->vm_end,
             (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
             (unsigned long)pgprot_val(vma->vm_page_prot));

    if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
        ret = -EPERM;
        failure_string = "bad vm_flags";
        goto err_bad_arg;
    }
    vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
    //同步锁,保证一次只有一个进程分配内存,保证多进程间的并发访问
    mutex_lock(&binder_mmap_lock);
    //是否已经做过映射,执行过则进入if,goto跳转,释放同步锁后结束binder_mmap方法
    if (proc->buffer) {
        ret = -EBUSY;
        failure_string = "already mapped";
        goto err_already_mapped;
    }
    // 采用 VM_IOREMAP方式,分配一个连续的内核虚拟内存,与进程虚拟内存大小一致
    area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
    if (area == NULL) {
        ret = -ENOMEM;
        failure_string = "get_vm_area";
        goto err_get_vm_area_failed;
    }
    // 将proc中的buffer指针指向这块内核的虚拟内存
    proc->buffer = area->addr;
    // 计算出用户空间和内核空间的地址偏移量。
    //地址偏移量 = 用户虚拟内存地址 - 内核虚拟内存地址
    proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
    mutex_unlock(&binder_mmap_lock);

#ifdef CONFIG_CPU_CACHE_VIPT
    if (cache_is_vipt_aliasing()) {
        while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
            pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
            vma->vm_start += PAGE_SIZE;
        }
    }
#endif
    //分配物理页的指针数组,数组大小为vma的等效page个数
    proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
    if (proc->pages == NULL) {
        ret = -ENOMEM;
        failure_string = "alloc page array";
        goto err_alloc_pages_failed;
    }
    proc->buffer_size = vma->vm_end - vma->vm_start;

    vma->vm_ops = &binder_vm_ops;
    vma->vm_private_data = proc;
    //分配物理页面,同时映射到内核空间和进程空间,先分配1个物理页
    if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
        ret = -ENOMEM;
        failure_string = "alloc small buf";
        goto err_alloc_small_buf_failed;
    }
    buffer = proc->buffer;
    INIT_LIST_HEAD(&proc->buffers);
    // 将buffer连入buffers链表中
    list_add(&buffer->entry, &proc->buffers);
    buffer->free = 1; // 此内存可用
    // 将buffer插入 proc->free_buffers 链表中
    binder_insert_free_buffer(proc, buffer);
    // 异步的可用空闲空间大小
    proc->free_async_space = proc->buffer_size / 2;
    barrier();
    proc->files = get_files_struct(current);
    proc->vma = vma;
    proc->vma_vm_mm = vma->vm_mm;

    /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
         proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
    return 0;

err_alloc_small_buf_failed:
    kfree(proc->pages);
    proc->pages = NULL;
err_alloc_pages_failed:
    mutex_lock(&binder_mmap_lock);
    vfree(proc->buffer);
    proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
    mutex_unlock(&binder_mmap_lock);
err_bad_arg:
    pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
           proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
    return ret;
}

2.3.1、binder_update_page_range

//allocate :1 分配内存,0释放内存
static int binder_update_page_range(struct binder_proc *proc, int allocate,
                    void *start, void *end,
                    struct vm_area_struct *vma)
{
    void *page_addr;
    unsigned long user_page_addr;
    struct page **page;
    struct mm_struct *mm;

    binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
             "%d: %s pages %p-%p\n", proc->pid,
             allocate ? "allocate" : "free", start, end);

    if (end <= start)
        return 0;

    trace_binder_update_page_range(proc, allocate, start, end);

    if (vma)
        mm = NULL;
    else
        mm = get_task_mm(proc->tsk);

    if (mm) {
        down_write(&mm->mmap_sem);
        vma = proc->vma;
        if (vma && mm != proc->vma_vm_mm) {
            pr_err("%d: vma mm and task mm mismatch\n",
                proc->pid);
            vma = NULL;
        }
    }
    //allocate为1,代表分配内存过程。如果为0则代表释放内存过程
    if (allocate == 0)
        goto free_range;

    if (vma == NULL) {
        pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
            proc->pid);
        goto err_no_vma;
    }

    for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
        int ret;

        page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];

        BUG_ON(*page);
        //分配一个page的物理内存,一页 4kb
        *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
        if (*page == NULL) {
            pr_err("%d: binder_alloc_buf failed for page at %p\n",
                proc->pid, page_addr);
            goto err_alloc_page_failed;
        }
        //物理空间映射到虚拟内核空间
        ret = map_kernel_range_noflush((unsigned long)page_addr,
                    PAGE_SIZE, PAGE_KERNEL, page);
        flush_cache_vmap((unsigned long)page_addr,
                (unsigned long)page_addr + PAGE_SIZE);
        if (ret != 1) {
            pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                   proc->pid, page_addr);
            goto err_map_kernel_failed;
        }
        user_page_addr =
            (uintptr_t)page_addr + proc->user_buffer_offset;
        //物理空间映射到虚拟进程空间
        ret = vm_insert_page(vma, user_page_addr, page[0]);
        if (ret) {
            pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
                   proc->pid, user_page_addr);
            goto err_vm_insert_page_failed;
        }
        /* vm_insert_page does not seem to increment the refcount */
    }
    if (mm) {
        up_write(&mm->mmap_sem);
        mmput(mm);
    }
    return 0;

free_range:
    for (page_addr = end - PAGE_SIZE; page_addr >= start;
         page_addr -= PAGE_SIZE) {
        page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
        if (vma)
            zap_page_range(vma, (uintptr_t)page_addr +
                proc->user_buffer_offset, PAGE_SIZE, NULL);
err_vm_insert_page_failed:
        unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
        __free_page(*page);
        *page = NULL;
err_alloc_page_failed:
        ;
    }
err_no_vma:
    if (mm) {
        up_write(&mm->mmap_sem);
        mmput(mm);
    }
    return -ENOMEM;
}

2.3.2、binder_insert_free_buffer

kernel/drivers/staging/android/binder.c

static void binder_insert_free_buffer(struct binder_proc *proc,
                      struct binder_buffer *new_buffer)
{
    struct rb_node **p = &proc->free_buffers.rb_node;
    struct rb_node *parent = NULL;
    struct binder_buffer *buffer;
    size_t buffer_size;
    size_t new_buffer_size;

    BUG_ON(!new_buffer->free);

    new_buffer_size = binder_buffer_size(proc, new_buffer);

    binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
             "%d: add free buffer, size %zd, at %p\n",
              proc->pid, new_buffer_size, new_buffer);

    while (*p) {
        parent = *p;
        buffer = rb_entry(parent, struct binder_buffer, rb_node);
        BUG_ON(!buffer->free);
        // 计算得出空闲内存的大小
        buffer_size = binder_buffer_size(proc, buffer);

        if (new_buffer_size < buffer_size)
            p = &parent->rb_left;
        else
            p = &parent->rb_right;
    }
    rb_link_node(&new_buffer->rb_node, parent, p);
    // 将 buffer插入 proc->free_buffers 链表中
    rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
}

binder_mmap 操作内容

  • 1、通过用户空间的虚拟内存,分配一个内核空间的虚拟内存。
  • 2、分配一个物理内存 — 1 页4kb。(等通信时重新分配,用多少分配多少) 避免内存浪费
  • 3、把这块物理内存分别映射到,用户空间的虚拟内存和内核的虚拟内存

2.4、binder_ioctl

kernel/drivers/staging/android/binder.c

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    /*pr_info("binder_ioctl: %d:%d %x %lx\n",
            proc->pid, current->pid, cmd, arg);*/

    trace_binder_ioctl(cmd, arg);
    //进入休眠状态,直到中断唤醒
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    binder_lock(__func__);
    // 根据当前进程的pid,从binder_proc中查找binder_thread,
    // 如果当前线程已经加入到proc的线程队列则直接返回,
    // 如果不存在则创建binder_thread,并将当前线程添加到当前的proc
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    //进行binder的读写操作
    case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
    case BINDER_SET_MAX_THREADS:
        if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        break;
    case BINDER_SET_CONTEXT_MGR:
        ret = binder_ioctl_set_ctx_mgr(filp);
        if (ret)
            goto err;
        break;
    case BINDER_THREAD_EXIT:
        binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
                 proc->pid, thread->pid);
        binder_free_thread(proc, thread);
        thread = NULL;
        break;
    case BINDER_VERSION: {
        struct binder_version __user *ver = ubuf;

        if (size != sizeof(struct binder_version)) {
            ret = -EINVAL;
            goto err;
        }
        if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                 &ver->protocol_version)) {
            ret = -EINVAL;
            goto err;
        }
        break;
    }
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    binder_unlock(__func__);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
        pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

2.4.1、binder_ioctl_write_read

kernel/drivers/staging/android/binder.c

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    // 把用户空间数据ubuf 拷贝到内核空间bwr,
       //拷贝的不是有效数据,拷贝的是字节头
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d write %lld at %016llx, read %lld at %016llx\n",
             proc->pid, thread->pid,
             (u64)bwr.write_size, (u64)bwr.write_buffer,
             (u64)bwr.read_size, (u64)bwr.read_buffer);

    if (bwr.write_size > 0) {
        // 当写缓存中有数据,则执行binder写操作
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    if (bwr.read_size > 0) {
        // 当读缓存中有数据,则执行binder读操作
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        // 进程todo队列不为空,则唤醒该队列中的线程
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
             proc->pid, thread->pid,
             (u64)bwr.write_consumed, (u64)bwr.write_size,
             (u64)bwr.read_consumed, (u64)bwr.read_size);
    //把内核空间数据bwr拷贝到用户空间ubuf
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

ioctl(BINDER_WRITE_READ)向内核空间进行读写操作,执行命令BINDER_WRITE_READ

  • binder_ioctl 主要进行读写操作
  • 此处内核空间和用户空间之间拷贝的是字节头,不是有效数据

你可能感兴趣的:(Binder 驱动启动)