setup_arch:
parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
struct memblock {
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
};
memblock_type分两种一种为reserverd,即已经有固定用途
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
/*1. regions difined in meminfo*/
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/*2. Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(_stext), _end - _stext);
arm_mm_memblock_reserve();
arm_dt_memblock_reserve();
/*3. reserve any platform specific memblock areas */
if (mdesc->reserve)
mdesc->reserve();
/*4.
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
*/
dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table();
prepare_page_table();
map_lowmem();
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
top_pmd = pmd_off_k(0xffff0000);
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
}
struct mem_type {
pteval_t prot_pte;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;
};
typedef u32 pteval_t;
typedef u32 pmdval_t;
/*
* Architecture ioremap implementation.
*/
#define MT_DEVICE 0
#define MT_DEVICE_NONSHARED 1
#define MT_DEVICE_CACHED 2
#define MT_DEVICE_WC 3
/* types 0-3 are defined in asm/io.h */
#define MT_UNCACHED 4
#define MT_CACHECLEAN 5
#define MT_MINICLEAN 6
#define MT_LOW_VECTORS 7
#define MT_HIGH_VECTORS 8
#define MT_MEMORY 9
#define MT_ROM 10
#define MT_MEMORY_NONCACHED 11
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
#define MT_MEMORY_DMA_READY 15
crash> p mem_types
mem_types = $18 =
{{
prot_pte = 0x653,
prot_l1 = 0x41,
prot_sect = 0x11452,
domain = 0x2
}, {
prot_pte = 0x273,
prot_l1 = 0x41,
prot_sect = 0x1452,
domain = 0x2
}, {
prot_pte = 0x66f,
prot_l1 = 0x41,
prot_sect = 0x1045e,
domain = 0x2
}, {
prot_pte = 0x667,
prot_l1 = 0x41,
prot_sect = 0x10456,
domain = 0x2
}, {
prot_pte = 0x243,
prot_l1 = 0x41,
prot_sect = 0x52,
domain = 0x2
}, {
prot_pte = 0x0,
prot_l1 = 0x0,
prot_sect = 0x841e,
domain = 0x0
}, {
prot_pte = 0x0,
prot_l1 = 0x0,
prot_sect = 0x941a,
domain = 0x0
}, {
prot_pte = 0x4df,
prot_l1 = 0x21,
prot_sect = 0x0,
domain = 0x1
}, {
prot_pte = 0x5df,
prot_l1 = 0x21,
prot_sect = 0x0,
domain = 0x1
}, {
prot_pte = 0x45f,
prot_l1 = 0x1,
prot_sect = 0x1140e,
domain = 0x0
}, {
prot_pte = 0x0,
prot_l1 = 0x0,
prot_sect = 0x940e,
domain = 0x0
}, {
prot_pte = 0x447,
prot_l1 = 0x1,
prot_sect = 0x10406,
domain = 0x0
}, {
prot_pte = 0x243,
prot_l1 = 0x1,
prot_sect = 0x12,
domain = 0x0
}, {
prot_pte = 0x43,
prot_l1 = 0x1,
prot_sect = 0x0,
domain = 0x0
}, {
prot_pte = 0x43,
prot_l1 = 0x1,
prot_sect = 0x10412,
domain = 0x0
}, {
prot_pte = 0x45f,
prot_l1 = 0x1,
prot_sect = 0x0,
domain = 0x0
}}
static inline void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
*/
for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
if (end >= arm_lowmem_limit)
end = arm_lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the vmalloc region.
*/
for (addr = __phys_to_virt(end);
addr < VMALLOC_START; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
}
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
/* to find an entry in a page-table-directory */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define PMD_SHIFT 21
#define PGDIR_SHIFT 21
static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
{
return (pud_t *)pgd;
}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
从外想内, pud_offset(pgd_offset_k(virt), virt) -> pgd_offset_k(virt) -> pgd_offset(&init_mm, addr)
-> ((mm)->pgd + pgd_index(addr))
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static inline void clean_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
}
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgprot(x) (x)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
phys_addr_t start;
phys_addr_t end;
struct map_desc map;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
start = reg->base;
end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY;
create_mapping(&map, false);
}
}
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
* are able to cope here with varying sizes and address
* offsets, and we take full advantage of sections and
* supersections.
*/
static void __init create_mapping(struct map_desc *md, bool force_pages)
{
unsigned long addr, length, end;
phys_addr_t phys;
const struct mem_type *type;
pgd_t *pgd;
type = &mem_types[md->type];
addr = md->virtual & PAGE_MASK;
phys = __pfn_to_phys(md->pfn);
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
pgd = pgd_offset_k(addr);
end = addr + length;
do {
unsigned long next = pgd_addr_end(addr, end);
alloc_init_pud(pgd, addr, next, phys, type, force_pages);
phys += next - addr;
addr = next;
} while (pgd++, addr != end);
}
static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
bool force_pages)
{
pmd_t *pmd = pmd_offset(pud, addr);
/*
* Try a section mapping - end, addr and phys must all be aligned
* to a section boundary. Note that PMDs refer to the individual
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 &&
!force_pages) {
pmd_t *p = pmd;
if (addr & SECTION_SIZE)
pmd++;
do {
*pmd = __pmd(phys | type->prot_sect);
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
flush_pmd_entry(p);
} else {
/*
* No need to loop; pte's aren't interested in the
* individual L1 entries.
*/
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
}
}
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
const struct mem_type *type)
{
pte_t *start_pte = early_pte_alloc(pmd);
pte_t *pte = start_pte + pte_index(addr);
/* If replacing a section mapping, the whole section must be replaced */
BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
early_pte_install(pmd, start_pte, type->prot_l1);
}
arch/arm/include/asm/glue-proc.h:
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
#define cpu_reset __glue(CPU_NAME,_reset)
#define cpu_do_idle __glue(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
#define cpu_do_resume __glue(CPU_NAME,_do_resume)
#endif
arch/arm/mm/proc-v7-2level.S
/*
* cpu_v7_set_pte_ext(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* (hardware version is stored at +2048 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
*/
ENTRY(cpu_v7_set_pte_ext)
str r1, [r0] @ linux version
bic r3, r1, #0x000003f0
bic r3, r3, #PTE_TYPE_MASK
orr r3, r3, r2
orr r3, r3, #PTE_EXT_AP0 | 2
tst r1, #1 << 4
orrne r3, r3, #PTE_EXT_TEX(1)
eor r1, r1, #L_PTE_DIRTY
tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
orrne r3, r3, #PTE_EXT_APX
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_PRESENT
moveq r3, #0
ARM( str r3, [r0, #2048]! )
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)