linux-3.2.36内核启动3-setup_arch中的内存初始化2(arm平台 分析建立页表)

又是一个国庆七天假,之前有很多打算

可是到最后,只有linux愿意陪我。


介绍pageing_init之前,我们了解几个定义

pte_t 页表项

pmd_t 页中间目录项

pud_t 页上级目录

pgd_t 页全局目录项

我的arm平台

#define PMD_SHIFT                21

#define PGDIR_SHIFT               21

 

下面这个函数paging_init每个平台实现不一样,我的根本就没有用PUD_SHIFT

arm最多用二级

void __init paging_init(struct machine_desc*mdesc)

{

       void *zero_page;

 

       memblock_set_current_limit(lowmem_limit);

       就是

       memblock.current_limit = limit;

lowmem_limit = bank->start + bank->size;高端内存初始化时记录的我的是0x34000000

 

        build_mem_type_table();这个函数很大,主要就是根据cpu类型记录内存信息,大的原因就是可虑了所以现有的arm类型

        printk("Memory policy: ECC%sabled, Data cache %s\n",

                ecc_mask ? "en" :"dis", cp->policy);

我的平台Memory policy: ECC disabled, Data cache writeback关闭ecc数据缓存为回写

       prepare_page_table();

我们看看准备什么

static inline void prepare_page_table(void)

{

        unsigned longaddr;

        phys_addr_t end;

 

        /*

         * Clear out allthe mappings below the kernel image.

         */

清除内核下所有的映射

我的MODULES_VADDR=0xbf000000 PMD_SIZE=0x2000000

MODULES_VADDR是动态模块映射区起始地址,

PMD_SIZE宏用于计算由页中间目录的一个单独表项所映射的区域大小,也就是一个页表的大小

我的平台启动打印

    vector  : 0xffff0000 - 0xffff1000   (   4kB)

    fixmap  : 0xfff00000 - 0xfffe0000   ( 896 kB)

    vmalloc : 0xc4800000 -0xf6000000   ( 792 MB)

    lowmem  : 0xc0000000 - 0xc4000000   (  64MB)

    pkmap   : 0xbfe00000 - 0xc0000000   (   2MB)

modules : 0xbf000000 -0xbfe00000   (  14 MB)

        for (addr = 0;addr < MODULES_VADDR; addr += PMD_SIZE)

               pmd_clear(pmd_off_k(addr));

pmd_off_k(addr)就是(pmd_t *)addr

#definepmd_clear(pmdp)                         \

         do{                             \

                  pmdp[0] = __pmd(0);       \

                  pmdp[1] = __pmd(0);       \

                  clean_pmd_entry(pmdp);        \

         } while (0)

#define __pmd(x)        (x)

 

 staticinline void clean_pmd_entry(pmd_t *pmd)

{

说一下pmd传入此函数就是存在寄存器r0中

        const unsigned int__tlb_flag = __cpu_tlb_flags;cpu决定

 

        if (tlb_flag(TLB_DCLEAN))先判断cpu内存类型

               asm("mcr        p15, 0, %0,c7, c10, 1  @ flush_pmd"

                        :: "r" (pmd) : "cc");

就是mcr p15 0 r0 c7 c10 1

清除数据缓冲区Line使用的装换的虚拟地址r0(就是pmd)。

 

        if(tlb_flag(TLB_L2CLEAN_FR))

                asm("mcr        p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"

                        :: "r" (pmd) : "cc");

清除L2 cache

}

 

上面以说过XIP。内

#ifdef CONFIG_XIP_KERNEL

        /* The XIP kernelis mapped in the module area -- skip over it */

        addr = ((unsignedlong)_etext + PMD_SIZE - 1) & PMD_MASK;

#endif

此时addr是内核空间地址开始

        for ( ; addr <PAGE_OFFSET; addr += PMD_SIZE)

               pmd_clear(pmd_off_k(addr));

从上面可以知道这个清除了动态模块空间、pkmap(高端内存的永久固定区)、内核低端内存空间

        /*

         * Find the end ofthe first block of lowmem.

         */

        end =memblock.memory.regions[0].base + memblock.memory.regions[0].size;

        if (end >=lowmem_limit)

                end =lowmem_limit;

end设为低端内存空间结尾地址或第一个bank结尾地址

        /*

         * Clear out allthe kernel space mappings, except for the first

         * memory bank, upto the end of the vmalloc region.

         */

当然是跳过第一个bank 的所以内核空间

VMALLOC_END=0xf6000000

        for (addr =__phys_to_virt(end);

             addr <VMALLOC_END; addr += PMD_SIZE)

                pmd_clear(pmd_off_k(addr));

}

 

       map_lowmem();

低端内存映射

static void __init map_lowmem(void)

{

        structmemblock_region *reg;

 

        /* Map all thelowmem memory banks. */

       for_each_memblock(memory, reg) {

                phys_addr_tstart = reg->base;

               phys_addr_t end = start + reg->size;

                structmap_desc map;

 

                if (end> lowmem_limit)

                       end = lowmem_limit;

                if (start>= end)

                        break;

 

                map.pfn =__phys_to_pfn(start);页号

#define     __phys_to_pfn(paddr)    ((unsignedlong)((paddr) >> PAGE_SHIFT))

((unsigned long)((paddr) >>12))就是除以4096 即页大小

               map.virtual = __phys_to_virt(start);

                map.length= end - start;

                map.type =MT_MEMORY;

 

               create_mapping(&map);

生成目录项和必要的页表,这是一个重要的函数

static void __init create_mapping(struct map_desc *md)

{

        unsigned longaddr, length, end;

        phys_addr_t phys;

        const struct mem_type *type;

        pgd_t *pgd;

 

        if (md->virtual!= vectors_base() && md->virtual < TASK_SIZE) {不是中断向量表地址也不是在内核空间

#if __LINUX_ARM_ARCH__ >= 4

#define vectors_high() (cr_alignment & CR_V)

#else

#define vectors_high()  (0)

#endif

 

:#define vectors_base()      (vectors_high() ? 0xffff0000 : 0)

中断向量表地址,我的是0xffff0000

 

#define TASK_SIZE               (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))

0x01000000:16M就是动态模块空间+pkmap空间

TASK_SIZE就是内核空间地址开始

               printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"

                      " at 0x%08lx in user region\n",

                      (long long)__pfn_to_phys((u64)md->pfn), md->virtual);

                return;

        }

 

        if ((md->type== MT_DEVICE || md->type == MT_ROM) &&

            md->virtual>= PAGE_OFFSET && md->virtual < VMALLOC_END) {

重叠的虚拟空间

               printk(KERN_WARNING "BUG: mapping for 0x%08llx"

                      " at 0x%08lx overlaps vmalloc space\n",

                      (long long)__pfn_to_phys((u64)md->pfn), md->virtual);

        }

 

        type =&mem_types[md->type];

 

        /*

         * Catch 36-bitaddresses

         */

36位地址

        if (md->pfn>= 0x100000) {

               create_36bit_mapping(md, type);这个我们不看了,我还没有用过36位地址的

                return;

        }

#define PAGE_MASK (~(PAGE_SIZE-1)) 即~0xfff

        addr =md->virtual & PAGE_MASK; 这样就是屏蔽页内偏移值

        phys =__pfn_to_phys(md->pfn); 物理地址

        length =PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));对齐

第12位为长度,高20位为页表和页目录

#define SECTION_SHIFT          20

#define SECTION_SIZE           (1UL << SECTION_SHIFT) 段大小为1M

#define SECTION_MASK           (~(SECTION_SIZE-1))

 

        if(type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)){

此条件是即不是l1又是和1M对齐

下面细说这个判断

在下面的alloc_init_section()看到此条件的应用

               printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not"

                      "be mapped using pages, ignoring.\n",

                      (long long)__pfn_to_phys(md->pfn), addr);

                return;

        }

 

        pgd =pgd_offset_k(addr);

#define PGDIR_SHIFT             21

#define pgd_index(addr)        ((addr) >> PGDIR_SHIFT) 右移21位正好的页目录地址

#define pgd_offset(mm, addr)   ((mm)->pgd + pgd_index(addr))

#define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)

我懒得算直接打印把

        printk(KERN_NOTICE"md->virtual = 0x%lx md->pfn = 0x%lx md->length = 0x%lx\n",(unsigned long)md->virtual, (unsigned long)md->pfn, (unsignedlong)md->length);

        printk(KERN_NOTICE"addr = 0x%lx phys = 0x%lx length = 0x%lx init_mm.pgd = 0x%lx pgd =0x%lx\n", (unsigned long)addr, (unsigned long)phys, (unsigned long)length,(unsigned long)init_mm.pgd, (unsigned long)pgd);

 

此时打印结果

md->virtual = 0xc0000000 md->pfn = 0x30000 md->length =0x4000000

addr = 0xc0000000 phys = 0x30000000 length = 0x4000000 init_mm.pgd= 0xc0004000 pgd = 0xc0007000

pdg 页目录地址

pfn 页框号

这个可以看出是低端地址,从页表基地址开始,0xc0004000

低端地址的也目录地址从0xc0007000开始

下面不用看就是填写页信息

        end = addr +length;

        do {

                unsignedlong next = pgd_addr_end(addr, end);

#define pgd_addr_end(addr, end)                                        \

({      unsigned long__boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;  \

        (__boundary - 1 <(end) - 1)? __boundary: (end);               \

})

#define PGDIR_SIZE              (1UL << PGDIR_SHIFT)

1 << 21 2M

#define PGDIR_MASK               (~(PGDIR_SIZE-1)) 即屏蔽21位

这个就是找到下一个页目录

               alloc_init_pud(pgd, addr, next, phys, type);

static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsignedlong end,

        unsigned long phys,const struct mem_type *type)

{

传入的参数说明

pgd 页目录地址

addr 要映射的虚拟空间起始地址

end要映射的虚拟空间结束地址

phys 要映射的虚拟空间对应的物理空间起始地址

type mem类型

        pud_t *pud =pud_offset(pgd, addr);

typedef struct { pgd_t pgd; } pud_t;

pud_offset() 就是转换为pgd_t因为我说过此平台没有pud

        unsigned long next;

 

#define pud_addr_end(addr, end)                      (end)

        do {

                next =pud_addr_end(addr, end);

               alloc_init_section(pud, addr, next, phys, type);这个函数我们在下面贴

                phys += next - addr;

        } while (pud++, addr= next, addr != end);

}

                phys +=next - addr;

                addr =next;

        } while (pgd++,addr != end);

}

上面看似是pgd循环加上pud循环,在此实质就是pdg循环

 

        }

}

下面重点看alloc_init_section

#definepmd_offset(dir, addr)   ((pmd_t *)(dir))

在ARM处理器上如果是整个段都有映射则采用单层映射如果不是整个段都有映射则采用两层映射页面大小采用的是4KB使页面目录对应于ARM的首层映射表中间目录设置成与页面目录相同从而把概念上的三层映射转换成了物理上的两层映射采用两层映射会降低系统的相应速度因为从虚拟地址到物理地址之间的转换多了一步会浪费时间但是会增加内存的利用率除非进程用完了3GB的地址空间这种可能性是很小的对于某些外设的操作希望它反应迅速因此需要将其进行单层映射因此将IO寄存器所在的区域进行单层映射

http://blog.csdn.net/zhaohc_nj/article/details/7977011

这个微博分析了alloc_init_section,可以看看,我也分析一下吧

static void __init alloc_init_section(pud_t *pud, unsigned longaddr,

                                     unsignedlong end, phys_addr_t phys,

                                      conststruct mem_type *type)

{

        pmd_t *pmd =pmd_offset(pud, addr);

这个有让我们看到好像是pud循环下的pmd循环,其实都是pgd

        /*

         * Try a section mapping - end, addr and physmust all be aligned

         * to a sectionboundary.  Note that PMDs refer to theindividual

         * L1 entries,whereas PGDs refer to a group of L1 entries making

         * up one logicalpointer to an L2 table.

         */

        解释的很清楚一个section映射。条件就是end, addr and physmust all be aligned

         to a sectionboundary

        if (((addr | end |phys) & ~SECTION_MASK) == 0) {

                pmd_t *p =pmd;

 

                if (addr& SECTION_SIZE)

                        pmd++;

 

                do {

                       *pmd = __pmd(phys | type->prot_sect);

这里或上prot_sectarmmmu工作原理有关

                       phys += SECTION_SIZE;

可以看出就是把每个section的起始物理地址存入页目录地址

可以看出线性的概念。单层映射没有使用页框这个东西。就用了pgd.

                } while (pmd++, addr += SECTION_SIZE,addr != end);

 

               flush_pmd_entry(p);

flush_pmd_entry主要就是

               asm("mcr        p15, 0, %0,c7, c10, 1  @ flush_pmd"

                        : :"r" (pmd) : "cc");

就是r0 = p

mcr p15, 0, r0, c7, c10, 1

清除数据缓冲区Line使用的装换的虚拟地址

 

        } else {

                /*

                 * No needto loop; pte's aren't interested in the

                 *individual L1 entries.

                 */

               alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);

static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,

                                 unsigned long end, unsigned long pfn,

                                 const struct mem_type *type)

{

        pte_t *pte =early_pte_alloc(pmd, addr, type->prot_l1);

static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned longaddr, unsigned long prot)

{

        if (pmd_none(*pmd)) {

pte_t *pte =early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);

值为0即一级为空此时要求pte分配空间512*sizof(pte_t) +512*sizeof(u32)


对于这个大小我们看看linux给我们的解释。arch/arm/include/asm/pgtable-2level.h

* Hardware-wise, we have a two level page table structure, wherethe first

 * level has 4096 entries,and the second level has 256 entries. Each entry

 * is one 32-bit word..Mostof the bits in the second level entry are used

 * by hardware, and therearen't any "accessed" and "dirty" bits.

硬件方面,我们有一个两级页表结构,其中第一级有4096个条目,第二级有256个条目。每个条目是一个32-bit字。第二级有很多位被硬件用,它们中没有”accessed””dirty”

 

* Linux on the other hand has a three level page table structure,which can

 * be wrapped to fit a twolevel page table structure easily - using the PGD

 * and PTE only.  However, Linux also expects one"PTE" table per page, and

 * at least a"dirty" bit.

另一方面Linux上有三级页表结构,它可以很容易包裹,以适应一个两级页表结构 -使用PGDPTE。然而,Linux还预计,一个PTE表,每页至少一个“dirty”位。

* Therefore, we tweak the implementation slightly - we tell Linuxthat we

 * have 2048 entries in thefirst level, each of which is 8 bytes (iow, two

 * hardware pointers to thesecond level.)  The second level containstwo

 * hardware PTE tablesarranged contiguously, preceded by Linux versions

 * which contain the stateinformation Linux needs.  We, therefore,end up

 * with 512 entries in the"PTE" level.

因此,我们小幅调整的实施 - 我们告诉Linux,我们有2048个条目,其中每8个字节(IOW,两个硬件指针到第二级)第二个等级包含两个硬件PTE表相邻排列,前面带有Linux版本,其中包含的Linux需要的状态信息。因此,我们有512个条目中的“pte”。

 

因此,linux在构造页表时,制造了一个假象:
1) 硬件PGD还是4096项,每项4字节;但是linux按照PGD共2048项,每项8字节来计算,计算得来的每项中实际都含有两个pgd项。
2) 硬件PTE还是256项,每项4字节;但是linux每次分配pte表时,都分配4K大小的页,页的前2048字节折合512个pte项,即折合两个硬件的pte表;页的后2048字节留作它用,折合为虚拟的512个pte项,即折合两个虚拟的pte表,与前半页对应。

 

这样,前半页折合出来的两个硬件pte表,正好填入2pgd项中。

而且:
前半页折合出来的两个硬件pte表中,每项都包含一些页的属性bit,如是否present,是否可写....这些属性均为ARM硬件支持的属性,命名为PTE_xxx
后半页折合出来的两个虚拟pte表中,每项都包含一些页的属性bit,如是否accessed,是否dirty....这些属性均为ARM硬件不能支持的属性,命名为L_PTE_xxx

这样,就能模拟出来诸如accessed,dirty.....这样硬件还不支持的属性。
       


启动内存分配我在下次搞个单独微博再说

               __pmd_populate(pmd, __pa(pte), prot);

static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,

                                 pmdval_t prot)

{

        pmdval_t pmdval =(pte + PTE_HWTABLE_OFF) | prot;

pmd 值为(pte + PTE_HWTABLE_OFF) | prot,这也是mmu工作原理决定的

        pmdp[0] =__pmd(pmdval);第一个硬件pte

        pmdp[1] =__pmd(pmdval + 256 * sizeof(pte_t))第二个硬件pte;

       flush_pmd_entry(pmdp);已说过

}

        }

       BUG_ON(pmd_bad(*pmd));

        returnpte_offset_kernel(pmd, addr);

}

        do {

               set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);

pte的值也是和mmu硬件有关

#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)

#define cpu_set_pte_ext                      __glue(CPU_NAME,_set_pte_ext)

#define __glue(name,fn)           ____glue(name,fn)

#define ____glue(name,fn) name##fn

我的就是,真佩服linux内核开发者的用法

cpu_arm920_set_pte_ext(pfn, __pgprot(type->prot_pte)), 0);

这是汇编

/*

 * cpu_arm920_set_pte(ptep,pte, ext)

 *

 * Set a PTE and flush it out设置并刷新

 */

        .align  5

ENTRY(cpu_arm920_set_pte_ext)

#ifdef CONFIG_MMU

        armv3_set_pte_ext

        mov     r0, r0

pte_t填写到页表项中,由于d cache打开 这一条指令实际并没有写回内存而是写到cache 

        mcr     p15, 0, r0, c7, c10, 1          @ clean D entry清除D入口

cache地址r0对应的内容写回内存中 这一条语句实际是写到了write buffer, 
还没有真正写回内存。 

        mcr     p15, 0, r0, c7, c10, 4          @ drain WBWB开漏

等待把writebuffer中的内容写回内存。

之前这个物理址可能已经与别的虚拟地址建立了映射,而且刚对该地址进行过操作,会出现在数据缓存中,因此除了要更新内存中的pte外,还要删除写缓冲器中与该中间目录项相对应的项和沥干写缓冲器,防止下一次进行存取时发生误操

#endif

        mov     pc, lr

 

                pfn++;

        } while (pte++, addr+= PAGE_SIZE, addr != end);

循环填写

}

        }

}


       devicemaps_init(mdesc);

/*

 *Set up device the mappings.  Since weclear out the page tables for all

 *mappings above VMALLOC_END, we will remove any debug device mappings.

 *This means you have to be careful how you debug this function, or any

 *called function.  This means you can'tuse any function or debugging

 *method which may touch any device, otherwise the kernel _will_ crash.

 */

设置设备的映射。由于我们清除掉所有VMALLOC_END以上映射,我们将删除任何调试设备映射。这意味着你必须要小心调试此函数,或任何调用。这意味着你不能使用任何可能会碰触到任何设备的函数或调试方法,否则内核_will_崩溃。

这段话很好理解,比如我们用的硬件寄存器映射就是在vmalloc_end以上,当你在访问时,这个映射已被清除了,当然会崩溃。

static void __init devicemaps_init(structmachine_desc *mdesc)

{

        struct map_desc map;

        unsigned long addr;

 

        /*

         * Allocate the vector page early.

         */

        vectors_page = early_alloc(PAGE_SIZE);

 

        for (addr = VMALLOC_END; addr; addr +=PMD_SIZE)

                pmd_clear(pmd_off_k(addr));

这个就是我们上面看的清除掉所有VMALLOC_END以上映射

#definepmd_clear(pmdp)                 \

        do {                            \

                pmdp[0] = __pmd(0);     \

                pmdp[1] = __pmd(0);     \

                clean_pmd_entry(pmdp);  \

        } while (0)

clean_pmd_entry()不贴了,就是利用cp15去操作pmd clear

        /*

         * Map the kernel if it is XIP.

         * It is always first in themodulearea.

         */

已说过xip,它代码段是运行在flash rom上,如norflash.

MODULES_VADDR=0xbf000000

从这看,内存也可以是flash

上面的英文已说的很清楚,xip总是从modulearea开始

我的平台没有这个

#ifdef CONFIG_XIP_KERNEL

        map.pfn =__phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);

        map.virtual = MODULES_VADDR;

        map.length = ((unsigned long)_etext -map.virtual + ~SECTION_MASK) & SECTION_MASK;

        map.type = MT_ROM;

        create_mapping(&map);

#endif

 

        /*

         * Map the cache flushing regions.

         */

Cache映射

#ifdef FLUSH_BASE

        map.pfn =__phys_to_pfn(FLUSH_BASE_PHYS);

        map.virtual = FLUSH_BASE;

        map.length = SZ_1M;

        map.type = MT_CACHECLEAN;

        create_mapping(&map);

#endif

Minicache映射

#ifdefFLUSH_BASE_MINICACHE

        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS+ SZ_1M);

        map.virtual = FLUSH_BASE_MINICACHE;

        map.length = SZ_1M;

        map.type = MT_MINICLEAN;

        create_mapping(&map);

#endif

上面的我的平台都没有

下面是向量表映射

        /*

         * Create a mapping for the machinevectors at the high-vectors

         * location (0xffff0000).  If we aren't using high-vectors, also

         * create a mapping at the low-vectorsvirtual address.

         */

        map.pfn =__phys_to_pfn(virt_to_phys(vectors_page));

        map.virtual = 0xffff0000;

        map.length = PAGE_SIZE;

        map.type = MT_HIGH_VECTORS;

 

  create_mapping(&map);

还记得我在此函数加的打印吧

打印结果

md->virtual = 0xffff0000 md->pfn = 0x33fff md->length =0x1000

addr = 0xffff0000 phys = 0x33fff000 length = 0x1000 init_mm.pgd =0xc0004000 pgd = 0xc0007ff8中断向量表的页目录地址

 

 

        if (!vectors_high()) {

上面的英文说:如果不能用高端向量表,就映射个低端的,

从虚拟地址0开始

我们看看vectors_high()

 

#if__LINUX_ARM_ARCH__ >= 4

#definevectors_high()  (cr_alignment & CR_V)

#else

#definevectors_high()  (0)

#endif

这个是由arm架构来决定的

                map.virtual = 0;

                map.type = MT_LOW_VECTORS;

                create_mapping(&map);

        }

 

        /*

         * Ask the machine support to map inthe statically mapped devices.

         */

 

        if (mdesc->map_io)

                mdesc->map_io();

 

静态映射:这个玩意就是我们说的io静态映射用法,我的平台有

        .map_io         = mini2440_map_io,

static void __initmini2440_map_io(void)

{

        s3c24xx_init_io(mini2440_iodesc,ARRAY_SIZE(mini2440_iodesc));

}

static structmap_desc mini2440_iodesc[] __initdata = {

        /* nothing to declare, move along */

};

这个我们只看两句

        iotable_init(mach_desc, size);

        iotable_init(s3c_iodesc,ARRAY_SIZE(s3c_iodesc));

 

static structmap_desc s3c_iodesc[] __initdata = {

        IODESC_ENT(GPIO),

        IODESC_ENT(IRQ),

        IODESC_ENT(MEMCTRL),

        IODESC_ENT(UART)

};

 

#define IODESC_ENT(x){ (unsigned long)S3C24XX_VA_##x, __phys_to_pfn(S3C24XX_PA_##x), S3C24XX_SZ_##x,MT_DEVICE }

 

void __initiotable_init(struct map_desc *io_desc, int nr)

{

        int i;

 

        for (i = 0; i < nr; i++)

                create_mapping(io_desc + i);

}

应该有四个打印

GPIO

md->virtual = 0xfd000000 md->pfn = 0x56000 md->length =0x100000

addr = 0xfd000000 phys = 0x56000000 length = 0x100000 init_mm.pgd =0xc0004000 pgd = 0xc0007f40

IRQ:终端控制器

md->virtual = 0xf6000000 md->pfn = 0x4a000 md->length =0x100000

addr = 0xf6000000 phys = 0x4a000000 length = 0x100000 init_mm.pgd =0xc0004000 pgd = 0xc0007d80

MEMCTRL:存储控制器

md->virtual = 0xf6200000 md->pfn = 0x48000 md->length =0x100000

addr = 0xf6200000 phys = 0x48000000 length = 0x100000 init_mm.pgd =0xc0004000 pgd = 0xc0007d88

UART

md->virtual = 0xf7000000 md->pfn = 0x50000 md->length =0x100000

addr = 0xf7000000 phys = 0x50000000 length = 0x100000 init_mm.pgd =0xc0004000 pgd = 0xc0007dc0

 

        /*

         * Finally flush the caches and tlb toensure that we're in a

         * consistent state wrt thewritebuffer.  This also ensures that

         * any write-allocated cache lines inthe vector page are written

         * back.  After this point, we can start to touchdevices again.

         */

最后刷新cachestlb以确保我们处于一致的writebuffer缓存。这也保证了任何写矢量页分配的cache lines被写回。这一点后,我们就可以开始再次触摸设备

        local_flush_tlb_all();

        flush_cache_all();

这两个都是汇编,我不在细看了

}

 

       kmap_init();

这个函数时pkmap的初始化

这个就是我们说的高端内存中的永久内核映射,永久内存映射也是线性映射,它使用了散列表来记录页信息,此表的地址在PKMAP_BASE下,

#definePKMAP_BASE              (PAGE_OFFSET -PMD_SIZE)

可以看出在0xc0000000下2M的地方0xbfe00000

static void __init kmap_init(void)

{

#ifdef CONFIG_HIGHMEM

        pkmap_page_table =early_pte_alloc(pmd_off_k(PKMAP_BASE),

               PKMAP_BASE, _PAGE_KERNEL_TABLE);

#endif

}

 

       top_pmd = pmd_off_k(0xffff0000);

 

       /* allocate the zero page. */

       zero_page = early_alloc(PAGE_SIZE);

 

       bootmem_init();

bootmem_init下一篇微博单独看

       empty_zero_page = virt_to_page(zero_page);

       __flush_dcache_page(NULL, empty_zero_page);

不细看

}

你可能感兴趣的:(linux-3.2.36内核启动3-setup_arch中的内存初始化2(arm平台 分析建立页表))