Binder 内存分配

android6.0已经把binder_buffer有关的操作和binder.c分开了,实现在binder_alloc.c文件里面
一、binder_alloc_mmap_handler函数进行map,先看这个结构体:

struct binder_alloc {
    struct mutex mutex;
    struct vm_area_struct *vma;
    struct mm_struct *vma_vm_mm;
    void *buffer; // map 的地址就是这里了
    ptrdiff_t user_buffer_offset; //内核空间和用户空间的差
    struct list_head buffers; //所有的buffers列表
    struct rb_root free_buffers; //只进行了预定,没有分配,按大小排序
    struct rb_root allocated_buffers; //已经分配了,按地址排序
    size_t free_async_space; //用于异步请求的空间
    struct binder_lru_page *pages;//所有的pages
    size_t buffer_size; //总共的大小
    uint32_t buffer_free;
    int pid;
};

参数alloc和proc相关,也就是每个进程一个,vma用户调用传入参数
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma);
这个函数流程比较简单,关键流程如下:
1、

area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
    alloc->buffer = area->addr;
    alloc->user_buffer_offset =vma->vm_start - (uintptr_t)alloc->buffer;

预定空间,初始化alloc的buffer和user_buffer_offset变量.
2、

alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
                   ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
                   GFP_KERNEL);

分配pages的空间
3、

buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
   buffer->data = alloc->buffer;
   list_add(&buffer->entry, &alloc->buffers);
   buffer->free = 1;
   binder_insert_free_buffer(alloc, buffer);
   alloc->free_async_space = alloc->buffer_size / 2;

往free_buffer里放置一个buffer,设置free_async_space为总空间的一半
二、binder_alloc_new_buf 分配一个binder_buffer,调用了binder_alloc_new_buf_locked
关键流程如下:
1、计算需要分配的空间大小
2、

struct rb_node *n = alloc->free_buffers.rb_node;
    while (n) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);
        BUG_ON(!buffer->free);
        buffer_size = binder_alloc_buffer_size(alloc, buffer);

        if (size < buffer_size) {
            best_fit = n;
            n = n->rb_left;
        } else if (size > buffer_size)
            n = n->rb_right;
        else {
            best_fit = n;
            break;
        }
    }
    if (n == NULL) {
        buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
        buffer_size = binder_alloc_buffer_size(alloc, buffer);
    }

在free_buffers查找适当的内存,看一下

static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
                       struct binder_buffer *buffer)
{
    if (list_is_last(&buffer->entry, &alloc->buffers))
        return (u8 *)alloc->buffer +
            alloc->buffer_size - (u8 *)buffer->data;
    return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}

如果只有一个成员alloc的结束地址减去buffer的开始地址,比如binder_alloc_mmap_handler调用后
此时buffer只有一个成员buffer->data和alloc->buffer相同,返回总的大小。
否则free_buffer的下一个开始减去自己的开始,从这里看,binder_buffer的数据应该是连续的分配
在alloc申请的空间buffer里的。
2、

ret = binder_update_page_range(alloc, 1,
        (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);

前面是对齐等操作,然后binder_update_page_range分配页面了。
3、

rb_erase(best_fit, &alloc->free_buffers);
    buffer->free = 0;
    buffer->free_in_progress = 0;
    binder_insert_allocated_buffer_locked(alloc, buffer);
    binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
             "%d: binder_alloc_buf size %zd got %pK\n",
              alloc->pid, size, buffer);
    buffer->data_size = data_size;
    buffer->offsets_size = offsets_size;
    buffer->async_transaction = is_async;
    buffer->extra_buffers_size = extra_buffers_size;

从free_buffers中移除,添加到allocated_buffer,内存分配完成。
三、释放

binder_buffer void binder_alloc_free_buf(struct binder_alloc *alloc,
                struct binder_buffer *buffer)

1、

ret = binder_update_page_range(alloc, 1,
        (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);    

释放页面
2、

rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
    buffer->free = 1;

从已分配红黑树中移除
3、

if (!list_is_last(&buffer->entry, &alloc->buffers)) {
        struct binder_buffer *next = binder_buffer_next(buffer);

        if (next->free) {
            rb_erase(&next->rb_node, &alloc->free_buffers);
            binder_delete_free_buffer(alloc, next);
        }
    }
    if (alloc->buffers.next != &buffer->entry) {
        struct binder_buffer *prev = binder_buffer_prev(buffer);

        if (prev->free) {
            binder_delete_free_buffer(alloc, buffer);
            rb_erase(&prev->rb_node, &alloc->free_buffers);
            buffer = prev;
        }
    }
    binder_insert_free_buffer(alloc, buffer);

这部分先看看能不能和相邻的free_buffer里的binder_buffer合并,能合并则合并,然后把最终的binder_buffer插入进free_buffer里面。
总结:两个红黑树存储已分配和未分配的buffer,临近的free_buffer会被合并,free_buffer红黑树
便于查找大小合适的块进行分配,allocated_buffer存储地址便于查找和释放,其它的先不分析。
水平有限,如有错误,敬请指正。

你可能感兴趣的:(android源码研)