/*
* Setup common bits before finally enabling the MMU. Essentially
* this is just loading the page table pointer and domain access
* registers.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*/
__enable_mmu:
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A #r0为什么不先清0?
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
#ifndef CONFIG_ARM_LPAE
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
/*
结合数据手册,此段是将D0、D1两个域,D1对应于USER域,D0对应于KERNEL和TABLE域,配置为管理模式,
对这两个域的访问和执行不受TLB中相应标志位影响,
D2域,对应于IO域,配置为客户模式,对此域的访问受TLB中相应标志位的检查;
*/
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
b __turn_mmu_on
ENDPROC(__enable_mmu)
/*
* Enable the MMU. This completely changes the structure of the visible
* memory space. You will not be able to trace execution through this.
* If you have an enquiry about this, *please* check the linux-arm-kernel
* mailing list archives BEFORE sending another post to the list.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
mov r3, r13
mov pc, r3
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
.popsection
/*
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
__mmap_switched:
adr r3, __mmap_switched_data
ldmia r3!, {r4, r5, r6, r7}
cmp r4, r5 @ Copy data segment if needed,from[r4]__data_loc to [r5]_sdata
@ __data_loc是数据段在内核代码映像中的存储位置
@ _sdata是数据段的链接位置(在内存中的位置)
@ 如果是XIP技术的内核,这两个数据肯定不同
1: cmpne r5, r6
ldrne fp, [r4], #4 @ fp(frame pointer) == r11
strne fp, [r5], #4
bne 1b
mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7
strcc fp, [r6],#4
bcc 1b
@ 这里将需要的数据从寄存器中转移到全局变量中,因为最后会跳入C代码,寄存器会被使用
ARM( ldmia r3, {r4, r5, r6, r7, sp})
THUMB( ldmia r3, {r4, r5, r6, r7} )
THUMB( ldr sp, [r3, #16] )
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
cmp r7, #0
bicne r4, r0, #CR_A @ Clear 'A' bit @ 禁用对齐错误检查
stmneia r7, {r0, r4} @ Save control register values
@ 保存控制寄存器值到全局变量cr_alignment(在arch/arm/kernel/entry-armv.S)
/*
.globl cr_alignment @ [r0]
.globl cr_not_alignment @ [r4]
cr_alignment:
.space 4
cr_not_alignment
.space 4
*/
b start_kernel
ENDPROC(__mmap_switched)