在实际需要某个虚拟内存区域的数据之前,虚拟和物理内存之间的关联不会建立。如果进程访问的虚拟地址空间部分尚未与页帧关联,处理器自动地引发一个缺页异常,内核必须处理此异常。
缺页处理的实现因处理器的不同而有所不同。由于CPU采用了不同的内存管理概念,生成缺页异常的细节也不太相同。因此,缺页异常的处理例程在内核代码中位于特定于体系结构的部分。
CPU通过地址总线可以访问连接在地址总线上的所有外设,包括物理内存、IO设备等等,但是从CPU发出的访问地址并非是这些外设在地址总线上的物理地址,而是一个虚拟地址,有MMU将虚拟地址转化成物理地址再从地址总线上发出,MMU上的这种虚拟地址和物理地址的关系是需要创建的,并且MMU还可以设备这个物理面是否可以进行写操作,当没有创建一个虚拟地址到物理地址的映射,或者创建这样的映射,但是那个物理面不可写的狮虎,MM将会通知CPU产生一个缺页异常。
static noinline void
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct vm_area_struct *vma; //定义结构体指针变量:表示独立的虚拟内存区域
struct task_struct *tsk; //进程描述符
struct mm_struct *mm; //进程对应的内存描述符
int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
//获取当前CPU正在运行的进程的进程描述符
//然后获取进程的内存描述符
tsk = current;
mm = tsk->mm;
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
return;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 9) == 0.
*/
//缺页的地址如果位于内核空间,并不代表异常发生于内核空间,有可能是用户状态访问了内核空间的地址
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
//检查发生缺页地址是否在vmalloc区,如果是则进行相应处理,主要是从内核的主页表向进程页表同步数据
if (vmalloc_fault(address) >= 0)
return;
if (kmemcheck_fault(regs, address, error_code))
return;
}
/* spurious_fault检查是否是TLB(转换后备缓冲区,又称页表缓存)所导致的性能延迟 */
if (spurious_fault(error_code, address))
return;
/* kprobes don't want to hook the spurious faults: */
if (kprobes_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock:
*/
//由于异常地址位于内核态,出发内核异常,因为vmalloc区的缺页异常,内核态的缺页异常
//只能发生在vmalloc区,如果不是,那就是内核异常
bad_area_nosemaphore(regs, error_code, address, NULL);
return;
}
/* kprobes don't want to hook the spurious faults: */
if (unlikely(kprobes_fault(regs)))
return;
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
if (unlikely(smap_violation(error_code, regs))) {
bad_area_nosemaphore(regs, error_code, address, NULL);
return;
}
/*
* If we're in an interrupt, have no user context or are running
* in a region with pagefaults disabled then we must not take the fault
*/
if (unlikely(faulthandler_disabled() || !mm)) {
bad_area_nosemaphore(regs, error_code, address, NULL);
return;
}
/*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
//可以缩短因缺页异常所导致的关闭中断
if (user_mode(regs)) {
local_irq_enable();
error_code |= PF_USER;
flags |= FAULT_FLAG_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (error_code & PF_WRITE)
flags |= FAULT_FLAG_WRITE;
if (error_code & PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
/*
* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in
* the kernel and should generate an OOPS. Unfortunately, in the
* case of an erroneous fault occurring in a code path which already
* holds mmap_sem we will deadlock attempting to validate the fault
* against the address space. Luckily the kernel only validly
* references user space from well defined areas of code, which are
* listed in the exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a
* deadlock. Attempt to lock the address space, if we cannot we then
* validate the source. If this is invalid we can skip the address
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
//缺页发生在内核上下文,此情况发生缺页的地址只能位于用户态地址空间
if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address, NULL);
return;
}
retry://如果发生在用户态或者是有exception table,说明不是内核异常
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case we'll have missed the might_sleep() from
* down_read():
*/
might_sleep();
}
//在当前进程的地址空间中查找发生异常的地址对应的VMA
vma = find_vma(mm, address);
//如果没有找到VMA,则释放信号量
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
return;
}
//找到VMA,并且发生异常的虚拟地址位VMA的有效范围内,则为正常的缺页异常,请求调页,分配物理内存
if (likely(vma->vm_start <= address))
goto good_area;
//如果异常地址不是位于紧挨着堆栈区域,同时又没有相应VMA,则进程访问造成非法地址,进入bad_area处理
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, error_code, address);
return;
}
if (error_code & PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
* and pusha to work. ("enter $65535, $31" pushes
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
bad_area(regs, error_code, address);
return;
}
}
//表示缺页异常地址位于堆栈区,需要扩展堆栈,
//说明(堆栈区的虚拟地址空间也是动态分配和扩展的,而不是一开始就分配好的)
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
//说明:正常的缺页异常,进行请求调页,分配物理内存
good_area:
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address, vma);
return;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*/
//从handle_mm_fault开始部分,是所有处理器架构共用部分
//负责处理用户空间的缺页错误异常
fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR;
/*
* If we need to retry the mmap_sem has already been released,
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
/* Retry at most once */
if (flags & FAULT_FLAG_ALLOW_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
if (!fatal_signal_pending(tsk))
goto retry;
}
/* User mode? Just return to handle the fatal exception */
if (flags & FAULT_FLAG_USER)
return;
/* Not returning to user mode? Handle exceptions or die: */
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
}
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, vma, fault);
return;
}
/*
* Major/minor page fault accounting. If any of the events
* returned VM_FAULT_MAJOR, we account it as a major fault.
*/
if (major) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
//VM86模式(兼容老旧的环境)做一些相关性的检查
check_v8086_mode(regs, address, tsk);
}```
## 3.用户空间页错误异常
用户空间页错误异常是指进程访问用户虚拟地址生成的页错误异常,分为两种情况:
进程再用户模式下访问用户虚拟地址,生成页错误异常。
进程再内核模式下访问用户虚拟地址,生成页错误异常。进程通过系统调用进入内核模式,系统调用传入用户空间的缓冲区,进程在内核模式下访问用户空间的缓冲区。
如果虚拟内存区域使用标准巨型页,则调用函数hugetlb_fault处理标准巨型页的页错误异常。如果虚拟内存区域使用普通页,则调用_handle_mm_fault处理普通页的页错误异常。
```c
static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
{
struct vm_fault vmf = {
.vma = vma,
.address = address & PAGE_MASK,
.flags = flags,
.pgoff = linear_page_index(vma, address),
.gfp_mask = __get_fault_gfp_mask(vma),
};
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
p4d_t *p4d;
int ret;
//在页全局目录中查找虚拟地址对应的表项
pgd = pgd_offset(mm, address);
//在页的四级目录中查找虚拟地址对应的表项,如果页四级目录不存在,那么先创建四级目录
p4d = p4d_alloc(mm, pgd, address);
if (!p4d)
return VM_FAULT_OOM;
//在页上层目录中查找虚拟地址对应的表项,如果页上层目录不存在,那么先创建页上层目录
vmf.pud = pud_alloc(mm, p4d, address);
if (!vmf.pud)
return VM_FAULT_OOM;
if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pud_t orig_pud = *vmf.pud;
barrier();
if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
/* NUMA case for anonymous PUDs would go here */
if (dirty && !pud_write(orig_pud)) {
ret = wp_huge_pud(&vmf, orig_pud);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pud_set_accessed(&vmf, orig_pud);
return 0;
}
}
}
vmf.pmd = pmd_alloc(mm, vmf.pud, address);
if (!vmf.pmd)
return VM_FAULT_OOM;
if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
pmd_t orig_pmd = *vmf.pmd;
barrier();
if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
return do_huge_pmd_numa_page(&vmf, orig_pmd);
if ((vmf.flags & FAULT_FLAG_WRITE) &&
!pmd_write(orig_pmd)) {
ret = wp_huge_pmd(&vmf, orig_pmd);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
} else {
huge_pmd_set_accessed(&vmf, orig_pmd);
return 0;
}
}
}
return handle_pte_fault(&vmf); //直接处理页表
//调用handle_pte_fault函数目的:页表项如果是无效应该怎么做,页表项不在内存中应该怎么做,页在内存中又怎么处理
}
函数handle_pte_fault处理直接页表源码流程如下:
static int handle_pte_fault(struct vm_fault *vmf)
{
pte_t entry;
//直接页表当中查找虚拟地址对应的表项
if (unlikely(pmd_none(*vmf->pmd))) {
//如果页中间目录表项是空表项,说明直接页表不存在,则把vmf->pte设置成空指针
vmf->pte = NULL;
} else {
//如果页中间目录表项不是空表项,说明直接页表存在,那么在直接页表中查找虚拟地址对应的表项
//vmf->pte存放表项的地址,vmf->orig_pte存放页表项的值,如果页表项是空表项,vmf->pte没必要存放表项的地址,设置成空指针
if (pmd_devmap_trans_unstable(vmf->pmd))
return 0;
/*
* A regular pmd is established and it can't morph into a huge
* pmd from under us anymore at this point because we hold the
* mmap_sem read mode and khugepaged takes it in write mode.
* So now it's safe to run pte_offset_map().
*/
vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
vmf->orig_pte = *vmf->pte;
/*
* some architectures can have larger ptes than wordsize,
* e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
* CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
* atomic accesses. The code below just needs a consistent
* view for the ifs and we later double check anyway with the
* ptl lock held. So here a barrier will do.
*/
barrier();
if (pte_none(vmf->orig_pte)) {
pte_unmap(vmf->pte);
vmf->pte = NULL;
}
}
//如果页表项不存在(直接页表不存在或者页表项是空表项)
if (!vmf->pte) {
//如果是私有匿名映射,则调用此函数do_anonymous_page处理匿名页的缺页异常
if (vma_is_anonymous(vmf->vma))
return do_anonymous_page(vmf);
else
//如果是文件映射或者共享匿名映射,调用函数处理文件页的缺页异常
return do_fault(vmf);
}
//如果页表项存在,但是页不在物理内存当中,说明页被患处到交换区,调用此函数处理
//把页从交换区读到内存中
if (!pte_present(vmf->orig_pte))
return do_swap_page(vmf);
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
//开始处理“页表项存在,并且页在物理内存中”这种情况,页错误异常是由访问权限触发的
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
//获取页表锁的地址,页表锁有两种实现方式:粗粒度的锁(一个进程一个页表锁)
//细粒度的锁(每个直接页表一个锁)
spin_lock(vmf->ptl);//锁住页表
entry = vmf->orig_pte;
//重新读取页表项的值
if (unlikely(!pte_same(*vmf->pte, entry)))
goto unlock;
//如果页错误异常是由写操作触发
if (vmf->flags & FAULT_FLAG_WRITE) {
//如果页表项没有写权限,调用函数do_wp_page执行写时复制
if (!pte_write(entry))
return do_wp_page(vmf);
//如果页表项有写权限,那么设备页表项的脏标志位,表示页的数据被修改
entry = pte_mkdirty(entry);
}
//设置页表项的访问标志位,表示这个页刚刚被访问过
entry = pte_mkyoung(entry);
//设置页表项
if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
vmf->flags & FAULT_FLAG_WRITE)) {
//如果页表项发生变化,调用函数update_mmu_cache以更新处理器的内存管理单元的页表缓存
update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
} else {
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
if (vmf->flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
}
unlock://释放页表的锁
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
什么情况下会发生匿名页缺页异常呢?
1)函数的局部变量比较大,或者函数调用的层次比较深,导致当前栈不够用,需要扩大栈;
2)进程调用malloc,从堆申请了内存块,只分配虚拟内存区域,还没有映射到物理页,第一次访问时触发缺页异常。
3)进程直接调用mmap,创建匿名的内存映射,只分配了虚拟内存区域,还没有映射到物理页,第一次访问时触发缺页异常。
函数do_anonymous_page处理私有匿名页的缺页异常,执行流程及源码分析如下:
static int do_anonymous_page(struct vm_fault *vmf)//do_anonymous_page处理私有匿名页的缺页异常
{
struct vm_area_struct *vma = vmf->vma;
struct mem_cgroup *memcg;
struct page *page;
pte_t entry;
/* 没有-->vm_ops的文件映射 */
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
* from a different thread.
*
* pte_alloc_map() is safe to use under down_write(mmap_sem) or when
* parallel threads are excluded by other means.
*
* Here we only have down_read(mmap_sem).
*/
//如果直接页表不存在,那么分配页表
if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
return VM_FAULT_OOM;
/* See the comment in pte_alloc_one_map() */
if (unlikely(pmd_trans_unstable(vmf->pmd)))
return 0;
/* 如果是读操作,直接映射到零页 */
//如果缺页异常是由读操作触发的,并且进程允许使用零页,那么我们就把虚拟页映射到一个专用的零页
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm)) {
//生成特殊的页表项,映射到专用的零页
entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vma->vm_page_prot));
//在直接页表当中查找虚拟地址对应的表项,并且锁住页表
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
//如果页表项不是空表项,说明其他处理器可能正在修改同一个页表项
if (!pte_none(*vmf->pte))
goto unlock;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
//跳转到标号setpte去设置页表项
goto setpte;
}
/* 分配我们自己的私有页 */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
//分配物理页,优先从高端内存区域分配,并且用零初始化
page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
if (!page)
goto oom;
if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
goto oom_free_page;
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceeding stores to the page contents become visible before
* the set_pte_at() write.
*/
//设置页描述符的标志位,表示物理页包含有效的数据
__SetPageUptodate(page);
//使用页帧号和访问权限生成页表项
entry = mk_pte(page, vma->vm_page_prot);
//如果虚拟内存区域有写权限,设置页表项的脏标志位和写权限,脏标志位表示页的数据被修改过
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
//如果页表项不是空表项,说明其他处理器可能正在修改同一个页表项
if (!pte_none(*vmf->pte))
goto release;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
//把物理页添加到活动LRU(最近最少使用)链表或不可回收LRU链表中,页回收算法需要从LRU链表选择需要回收的物理页
lru_cache_add_active_or_unevictable(page, vma);
setpte:
//设置页表项
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
/* 不需要从页表缓存删除页表项,因为以前虚拟页没有映射到物理页 */
update_mmu_cache(vma, vmf->address, vmf->pte);
unlock:
//释放页表的锁
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
release:
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
goto unlock;
oom_free_page:
put_page(page);
oom:
return VM_FAULT_OOM;
}
何时会触发文件页的缺页异常呢?
1)启动程序的时候,内核为程序的代码段和数据段创建私有的文件映射,映射到进程的虚拟地址空间,第一次访问的时候触发文件页的缺页异常。
2)进程使用mmap创建文件映射,把文件的一个区间映射到进程的虚拟地址空间,第一次访问的时候触发文件页的缺页异常。
函数 do_fault 处理文件页和共享匿名页的缺页异常,执行流程及源码分析如下:
static int do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret;
/* 如果虚拟内存区域没有提供处理页错误异常方法vma->vm_ops->fault,返回错误号VM_FAULT_SIGBUS */
if (!vma->vm_ops->fault)
ret = VM_FAULT_SIGBUS;
//如果缺页异常是由读私有文件页触发的,调用函数do_read_fault
else if (!(vmf->flags & FAULT_FLAG_WRITE))
ret = do_read_fault(vmf);
//如果缺页异常是由写私有文件页触发的,调用函数do_cow_fault来处理写私有文件页错误,执行写时复制
else if (!(vma->vm_flags & VM_SHARED))
ret = do_cow_fault(vmf);
//如果缺页异常是由写共享文件页触发的,调用函数do_shared_fault
else
ret = do_shared_fault(vmf);
/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) {
pte_free(vma->vm_mm, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
return ret;
}
处理读文件页错误,具体处理读文件页错误的方法如下:
函数do_read_fault处理读文件页错误,执行流程及源码分析如下:
static int do_read_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret = 0;
/*
为了减少页错误异常的次数,如果正在访问的文件页后面的几个文件页也被映射到进程的虚拟地址空间,那么预先读取到页缓存中
全局变量fault_around_bytes控制总长度,默认值是64KB,如果页长度是4KB,就一次读取16页
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
ret = do_fault_around(vmf);
if (ret)
return ret;
}
//把文件页读到文件的页缓存中
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
//设置页表项,把虚拟页映射到文件的页缓存中的物理页
ret |= finish_fault(vmf);
unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
put_page(vmf->page);
return ret;
}
给定一个虚拟内存区域vma,函数filemap_fault读文件页的方法如下:
1)根据vma->vm_file得到文件的打开实例file;
2)根据file->f_mapping得到文件的地址空间mapping;
3)使用地址空间操作集合中的readpage方法(mapping->a_ops->readpage)把文件页读到内存中。
函数finish_fault负责设备项表项,把主要工作委托给函数alloc_set_pte,执行流程及源码分析如下:
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page)
{
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
pte_t entry;
int ret;
if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
/* THP on COW? */
VM_BUG_ON_PAGE(memcg, page);
ret = do_set_pmd(vmf, page);
if (ret != VM_FAULT_FALLBACK)
return ret;
}
//如果直接页表不存在,那么直接分配页表,根据虚拟地址在直接页表中查找页表项,并且锁住页表
if (!vmf->pte) {
ret = pte_alloc_one_map(vmf);
if (ret)
return ret;
}
/* 锁住页表后重新检查 */
if (unlikely(!pte_none(*vmf->pte)))
return VM_FAULT_NOPAGE;
//如果在锁住页表以后发现页表项不是空表项,说明其他处理器修改了同一页表项,那么当前处理器放弃处理
flush_icache_page(vma, page);
//使用页帧号和访问权限生成页表项的值
entry = mk_pte(page, vma->vm_page_prot);
if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/* 写时复制的页 */
if (write && !(vma->vm_flags & VM_SHARED)) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
//把物理页添加到活动LRU链表或不可回收LRU链表中
lru_cache_add_active_or_unevictable(page, vma);
} else {
inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
page_add_file_rmap(page, false);
}
//设置页表项
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
/* 一个不存在的页不会被缓存,更新处理器的页表缓存 */
update_mmu_cache(vma, vmf->address, vmf->pte);
return 0;
}
处理写私有文件页错误的方法:
1)把文件页从存储设备上的文件系统读到文件的页缓存中;
2)执行写时复制,为文件的页缓存中的物理页创建一个副本,这个副本是进程的私有匿名页,和文件脱离系统,修改副本不会导致文件变化;
3)设备进程的页表项,把虚拟页映射到副本;
函数do_cow_fault处理写私有文件页错误,执行流程及源码分析如下:
static int do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret;
//关联一个实例到虚拟内存区域
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
//后面会用到写时复制,所以预先为副本分配一个物理页
vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
if (!vmf->cow_page)
return VM_FAULT_OOM;
if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
&vmf->memcg, false)) {
put_page(vmf->cow_page);
return VM_FAULT_OOM;
}
//把文件页读取文件的页缓存中
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
if (ret & VM_FAULT_DONE_COW)
return ret;
//把文件页缓存中物理页的数据复制到副本物理页
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
//设置副本页描述符的标志位,表示物理页包含有效的数据
__SetPageUptodate(vmf->cow_page);
//设置页表项,把虚拟页映射到副本物理页
ret |= finish_fault(vmf);
unlock_page(vmf->page);
put_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
return ret;
uncharge_out:
mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
put_page(vmf->cow_page);
return ret;
}
处理写共享文件页错误的方法如下:
1)把文件页从存储设备上的文件系统读到文件的页缓存中;
2)设备进程的页表项,把虚拟页映射到文件的页缓存中的物理页。
函数do_shared_fault处理写共享文件页错误,执行流程及源码分析如下:
static int do_shared_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret, tmp;
//把文件页读到文件的页缓存中
ret = __do_fault(vmf);
//如果创建内存映射的时候文件所属的文件系统注册了虚拟内存操作集合中的page-write方法
//调用这个方法通知文件系统“页即将变成可写的”
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
/*
* Check if the backing address space wants to know that the page is
* about to become writable
*/
if (vma->vm_ops->page_mkwrite) {
unlock_page(vmf->page);
tmp = do_page_mkwrite(vmf);
if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
put_page(vmf->page);
return tmp;
}
}
//设置页表项,把虚拟页映射到文件的页缓存中的物理页
ret |= finish_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) {
unlock_page(vmf->page);
put_page(vmf->page);
return ret;
}