在向数据结构插入新的内存区域之前,内核必须确认虚拟地址空间中有足够的空闲空间,可用于给定长度的区域。该工作由get_unmmaped_area()完成。
在分析get_unmmaped_area()之前,先简单介绍一下进程地址空间的布局。
进程地址空间 经典布局:
经典布局的缺点:在x86_32,虚拟地址空间从0到0xc0000000,每个用户进程有3GB可用。TASK_UNMAPPED_BASE一般起始于0x4000000(即1GB)。这意味着堆只有1GB的空间可供使用,继续增长则进入到mmap区域。这时mmap区域是自底向上扩展的。
针对这个问题,引入了新的虚拟地址空间:
与经典布局不同的是:使用固定值限制栈的最大长度。由于栈是有界的,因此安置内存映射的区域可以在栈末端的下方立即开始。这时mmap区是自顶向下扩展的。由于堆仍然位于虚拟地址空间中较低的区域并向上增长,因此mmap区域和堆可以相对扩展,直至耗尽虚拟地址空间中剩余的区域。
选择布局的工作由arch_pick_mmap_layout完成。其中arch_get_unmapped_area()完成从低地址向高地址创建新的映射,而arch_get_unmapped_area_topdown()完成从高地址向低地址创建新的映射。
include/linux/sched.h
...
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
...
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
mm/util.c
...
/* HAVE_ARCH_PICK_MMAP_LAYOUT : 体系结构是否想要在不同mmap区域布局之间做出选择 */
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
/* 经典布局 */
void arch_pick_mmap_layout(struct mm_struct *mm)
{
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
}
#endif
arch/x86/mm/mmap.c
...
/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
/*
* 设置了PF_RANDOMEIZE, 则内核不会为栈和内存映射的起点选择固定
* 位置,而是在每次新进程启动时,随机改变这些值的设置
*/
if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();
mm->mmap_legacy_base = mmap_legacy_base(random_factor);
if (mmap_is_legacy()) {
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
现在我们看看get_unmapped_area()中的一些细节。
unsigned long get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len,unsigned long pgoff, unsigned long flags)
{
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
unsigned long error = arch_mmap_check(addr, len, flags);
if (error)
return error;
/* Careful about overflows.. */
if (len > TASK_SIZE)
return -ENOMEM;
get_area = current->mm->get_unmapped_area;
/* 根据线性地址区间是否应该用于文件内存映射或匿名内存映射 */
if (file && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
/*
* 当不是用于文件内存映射或是匿名内存映射,
* 调用current->mm->get_unmapped_area.
* 即调用arch_get_unmapped_area或arch_get_unmapped_area_topdown
*/
addr = get_area(file, addr, len, pgoff, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (addr > TASK_SIZE - len)
return -ENOMEM;
if (offset_in_page(addr))
return -EINVAL;
addr = arch_rebalance_pgtables(addr, len);
error = security_mmap_addr(addr);
return error ? error : addr;
}
EXPORT_SYMBOL(get_unmapped_area);
以arch_get_unmapped_area为例。当addr非空,表示指定了一个特定的优先选用地址,内核会检查该区域是否与现存区域重叠,由find_vma()完成查找功能。当addr为空或是指定的优先地址不满足分配条件时,内核必须遍历进程中可用的区域,设法找到一个大小适当的空闲区域,有vm_unmapped_area()做实际的工作。
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
/* MAP_FIXED : 表示映射将在固定地址创建 */
if (flags & MAP_FIXED)
return addr;
if (addr) {
addr = PAGE_ALIGN(addr);
/*
* find_vma() 寻找第一个满足 addr < vm_area_struct->vm_end 的vma区
* vma = NULL 在vma红黑树的右子树,addr 是所存在的所有线性区线性地址最大
* vma != NULL 一定是tmp == NULL (tmp在find_vma指向当前结点)跳出循环的
*/
vma = find_vma(mm, addr);
/*
* 以下分别判断:
* 1: 请求分配的长度是否小于进程虚拟地址空间大小
* 2: 新分配的虚拟地址空间的起始地址是否在mmap_min_addr(允许分配虚拟地址空间的最低地址)之上
* 3: vma是否空
* 4: vma非空,新分配的虚拟地址空间,是否与相邻的vma重合
*/
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
return vm_unmapped_area(&info);
}
/*
* Search for an unmapped address range.
*
* We are looking for a range that:
* - does not intersect with any VMA;
* - is contained within the [low_limit, high_limit) interval;
* - is at least the desired size.
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
static inline unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
{
/* arch_get_unmapped_area是低地址到高地址创建映射 所以这时默认调用unmapped_area */
if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
return unmapped_area_topdown(info);
else
return unmapped_area(info);
}
在分析unmapped_area()之前,我认为有必要搞清楚vm_area_struct结构体中rb_subtree_gap的含义。在http://patchwork.ozlabs.org/patch/197340/ 这样解释:
Define vma->rb_subtree_gap as the largest gap between any vma in the subtree rooted at that vma, and their predecessor. Or, for a recursive definition, vma->rb_subtree_gap is the max of:
- vma->vm_start - vma->vm_prev->vm_end
- rb_subtree_gap fields of the vmas pointed by vma->rb.rb_left and
vma->rb.rb_right
rb_subtree_gap是当前结点与其前驱结点之间空隙 和 当前结点其左右子树中的结点间的最大空隙的最大值。
unmapped_area():先检查进程虚拟地址空间中可用于映射空间的边界,不满足要求返回错误代号到上层应用程序。当满足时,执行以下操作,为了找到最小的空闲的虚拟地址空间满足这次分配请求,便于两个相邻的vma区合并。
步骤如下:
1. 从vma红黑树的根开始遍历
2. 若当前结点有左子树则遍历其左子树,否则指向其右孩子。
3. 当某结点rb_subtree_gap可能是最后一个满足分配请求的空隙时,遍历结束。
4. 检测这个结点,判断这个结点与其前驱结点之间的空隙是否满足分配请求。满足则跳出循环。
5. 不满足分配请求时,指向其右孩子,判断其右孩子的rb_subtree_gap是否满足当前请求。
6. 满足则返回到2。不满足,回退其父结点,返回到4
unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
* We implement the search by looking for an rbtree node that
* immediately follows a suitable gap. That is,
* - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
* - gap_end = vma->vm_start >= info->low_limit + length;
* - gap_end - gap_start >= length
*/
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/* Adjust search limits by the desired length */
if (info->high_limit < length)
return -ENOMEM;
high_limit = info->high_limit - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
goto check_highest;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
goto check_highest;
while (true) {
/* Visit left subtree if it looks promising */
/* 先从低地址开始查询 */
gap_end = vma->vm_start;
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,struct vm_area_struct, vm_rb);
/*
* 查找到最后一个空隙可能满足这次分配,
* 说明 addr 从低地址向高地址 分配 。
* 便于相邻的两个vma合并。
*/
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
/* 当前结点的rb_subtree_gap 已经是最后一个可能满足这次分配 */
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
/*
* 当前结点与其前驱的空隙也不能满足这次请求,
* 检测当前结点的右孩子的 rb_subtree_gap
*/
if (vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
/*
* 以右孩子为根的树中 rb_subtree_gap 来满足这次的请求
* case 1:若满足,又从当前结点的右结点的左子树开始寻找
* case 2:若不满足,说明当前结点 左右子树没有满足这次请求的空隙,
* 所以回退到上个结点
*/
if (right->rb_subtree_gap >= length) {//case 1
vma = right;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {//case 2
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
goto check_highest;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
// 当前结点的前驱只可能是其左孩子。因为rb_subtree_gap是当前结点与其前驱的空隙
if (prev == vma->vm_rb.rb_left) {
gap_start = vma->vm_prev->vm_end;
gap_end = vma->vm_start;
goto check_current;
}
}
}
check_highest:
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
if (gap_start > high_limit)
return -ENOMEM;
found:
/* We found a suitable gap. Clip it with the original low_limit. */
if (gap_start < info->low_limit)
gap_start = info->low_limit;
/* Adjust gap address to the desired alignment */
gap_start += (info->align_offset - gap_start) & info->align_mask;
VM_BUG_ON(gap_start + info->length > info->high_limit);
VM_BUG_ON(gap_start + info->length > gap_end);
return gap_start;
}