1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
|
static
void
split_pgtable(
void
)
{
const
unsigned
long
end = ~(unsigned
long
)0;
unsigned
long
virt = PAGE_OFFSET, tvirt, pgd_end, pud_end;
phys_addr_t phys;
pgprot_t prot;
pgd_t *pgd;
pud_t *pud, *tpud;
pmd_t *pmd, *tpmd;
pte_t *pte, *tpte;
int
i;
pgd = pgd_offset_k(virt);
do
{
pgd_end = pgd_addr_end(virt, end);
if
(pgd_none(*pgd)) {
/* bypass */
virt = pgd_end;
continue
;
}
pud = pud_offset(pgd, virt);
do
{
pud_end = pud_addr_end(virt, pgd_end);
if
(pud_none(*pud)) {
/* bypass */
virt = pud_end;
continue
;
}
if
((pud_val(*pud)&3) == 1) {
/* section */
pmd = __va(memblock_alloc(PTRS_PER_PMD *
sizeof
(pmd_t), PTRS_PER_PMD *
sizeof
(pmd_t)));
phys = pud_val(*pud)&PUD_MASK&PHYS_MASK;
prot = __pgprot(pud_val(*pud)^phys);
for
(i = 0; i < PTRS_PER_PMD; phys += PMD_SIZE, i++) {
__pmd_populate(&pmd[i], phys, prot);
}
pud_populate(&init_mm, pud, pmd);
}
pmd = pmd_offset(pud, virt);
do
{
tvirt = virt;
virt = pmd_addr_end(virt, pud_end);
if
((pmd_val(*pmd)&PMD_TYPE_MASK) == PMD_TYPE_SECT) {
/* section */
pte = __va(memblock_alloc(PTRS_PER_PTE *
sizeof
(pte_t), PTRS_PER_PTE *
sizeof
(pte_t)));
phys = pmd_val(*pmd)&PMD_MASK&PHYS_MASK;
prot = __pgprot(((pmd_val(*pmd)^phys)&~PMD_TYPE_MASK)|PTE_TYPE_PAGE);
for
(i = 0; i < PTRS_PER_PTE; phys += PAGE_SIZE, i++) {
set_pte(&pte[i], __pte(phys|prot));
}
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
}
else
{
if
(pmd_none(*pmd))
continue
;
pte = pmd_page_vaddr(*pmd);
}
while
((tvirt += (1UL << MMU_REMAP_SHIFT)) > virt) {
tpud = pud_offset(pgd_offset_k(tvirt), tvirt);
if
(pud_none(*tpud)) {
tpmd = __va(memblock_alloc(PTRS_PER_PMD *
sizeof
(pmd_t), PTRS_PER_PMD *
sizeof
(pmd_t)));
memset
(tpmd, 0, PTRS_PER_PMD *
sizeof
(pmd_t));
pud_populate(&init_mm, tpud, tpmd);
}
tpmd = pmd_offset(tpud, tvirt);
tpte = __va(memblock_alloc(PTRS_PER_PTE *
sizeof
(pte_t), PTRS_PER_PTE *
sizeof
(pte_t)));
for
(i = 0; i < PTRS_PER_PTE; i++) {
set_pte(&tpte[i], pte_val(pte[i])&~PTE_VALID);
}
__pmd_populate(tpmd, __pa(tpte), PMD_TYPE_TABLE);
}
}
while
(pmd++, virt != pud_end);
}
while
(pud++, virt != pgd_end);
}
while
(pgd++, virt != end);
}
int
pte_set_invalid(pte_t *ptep, pgtable_t token, unsigned
long
addr,
void
*data)
{
set_pte(ptep, pte_val(*ptep)&~PTE_VALID);
return
0;
}
int
pte_set_valid(pte_t *ptep, pgtable_t token, unsigned
long
addr,
void
*data)
{
set_pte(ptep, pte_val(*ptep)|PTE_VALID);
return
0;
}
int
pte_set_rdonly(pte_t *ptep, pgtable_t token, unsigned
long
addr,
void
*data)
{
set_pte(ptep, pte_val(*ptep)|PTE_RDONLY);
return
0;
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
void
__init paging_init(
void
)
{
void
*zero_page;
map_mem();
fixup_executable();
//3.10无此函数
split_pgtable();
// add this line
}
|
1
2
3
|
#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
#define MMU_REMAP_WIDTH 4
#define MMU_REMAP_SHIFT (VA_BITS - 1 - MMU_REMAP_WIDTH)
#define MMU_REMAP_MASK ((1UL << MMU_REMAP_WIDTH) - 1)
#define __virt_to_phys(x) (((phys_addr_t)(x)&~(MMU_REMAP_MASK << MMU_REMAP_SHIFT)) - PAGE_OFFSET + PHYS_OFFSET)
#ifndef __ASSEMBLY__
unsigned
long
__phys_to_virt(phys_addr_t phys);
#endif
|
1
2
3
4
5
6
7
|
#ifndef CONFIG_DEBUG_RODATA
// static inline void mark_rodata_ro(void) {}/* mark this line */
extern
void
mark_rodata_ro(
void
);
// add this line
#endif
|
1
2
3
4
5
6
7
8
9
10
11
12
13
|
#include
enum
pageflags {
......
PG_remap_start,
/* add this line */
PG_remap_end = PG_remap_start + MMU_REMAP_WIDTH - 1,
/* add this line */
__NR_PAGEFLAGS,
|
1
2
3
4
5
6
7
8
9
10
11
12
13
|
static
const
struct
trace_print_flags pageflag_names[] = {
......
{1UL << PG_remap_start,
"mmu_remap"
},
/* add this line */
{1UL << (PG_remap_start+1),
"mmu_remap1"
},
/* add this line */
{1UL << (PG_remap_start+2),
"mmu_remap2"
},
/* add this line */
{1UL << (PG_remap_start+3),
"mmu_remap3"
},
/* add this line */
};
|
1
2
3
|
/* Default implementation of BIOVEC_PHYS_MERGEABLE */
#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
|
1
2
3
|
/* Default implementation of BIOVEC_PHYS_MERGEABLE */
#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) (0)
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
|
#if 1 /* add this block */
unsigned
long
__phys_to_virt(phys_addr_t phys)
{
unsigned
long
val = 0;
if
(high_memory)
val = ((phys_to_page(phys)->flags >> PG_remap_start)&MMU_REMAP_MASK) << MMU_REMAP_SHIFT;
return
(unsigned
long
)(phys - PHYS_OFFSET + PAGE_OFFSET)|val;
}
static
void
mmu_random_remap(
struct
page *page,
int
num)
{
extern
unsigned
int
get_random_int(
void
);
unsigned
long
pfn, val = get_random_int()&MMU_REMAP_MASK;
int
i;
pfn = page_to_pfn(page);
if
(pfn > PHYS_PFN_OFFSET && val == ((page[-1].flags >> PG_remap_start)&MMU_REMAP_MASK))
val = (val + 1)&MMU_REMAP_MASK;
if
(pfn + num < max_pfn && val == ((page[num].flags >> PG_remap_start)&MMU_REMAP_MASK))
val = (val + 1)&MMU_REMAP_MASK;
for
(i = 0; i < num; i++) {
page[i].flags |= (val << PG_remap_start);
}
}
static
void
mmu_random_unmap(
struct
page *page,
int
num)
{
int
i;
for
(i = 0; i < num; i++) {
page[i].flags &= ~(MMU_REMAP_MASK << PG_remap_start);
}
}
int
set_memory_invalid(unsigned
long
addr,
int
numpages)
{
extern
int
pte_set_invalid(pte_t *ptep, pgtable_t token, unsigned
long
addr,
void
*data);
const
unsigned
long
size = PAGE_SIZE * numpages;
apply_to_page_range(&init_mm, addr, size, pte_set_invalid, NULL);
flush_tlb_kernel_range(addr, addr + size);
return
0;
}
EXPORT_SYMBOL(set_memory_invalid);
int
set_memory_valid(unsigned
long
addr,
int
numpages)
{
extern
int
pte_set_valid(pte_t *ptep, pgtable_t token, unsigned
long
addr,
void
*data);
const
unsigned
long
size = PAGE_SIZE * numpages;
apply_to_page_range(&init_mm, addr, size, pte_set_valid, NULL);
flush_tlb_kernel_range(addr, addr + size);
return
0;
}
EXPORT_SYMBOL(set_memory_valid);
__weak
void
mark_rodata_ro(
void
)
{
extern
int
pte_set_rdonly(pte_t *ptep, pgtable_t token, unsigned
long
addr,
void
*data);
extern
char
_stext[], _etext[];
const
unsigned
long
start = PAGE_ALIGN((unsigned
long
)_stext);
const
unsigned
long
size = PAGE_ALIGN((unsigned
long
)_etext) - start;
apply_to_page_range(&init_mm, start, size, pte_set_rdonly, NULL);
}
#endif /* block end */
static
bool
free_pages_prepare(
struct
page *page, unsigned
int
order)
{
int
i;
int
bad = 0;
// add this block
if
(!PageHighMem(page)) {
set_memory_invalid((unsigned
long
)lowmem_page_address(page), 1 << order);
mmu_random_unmap(page, 1 << order);
}
// block end
......
return
true
;
}
......
static
int
prep_new_page(
struct
page *page,
int
order, gfp_t gfp_flags)
{
int
i;
for
(i = 0; i < (1 << order); i++) {
struct
page *p = page + i;
if
(unlikely(check_new_page(p)))
return
1;
}
set_page_private(page, 0);
set_page_refcounted(page);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
// add this block
if
(!PageHighMem(page)) {
mmu_random_remap(page, 1 << order);
set_memory_valid((unsigned
long
)lowmem_page_address(page), 1 << order);
}
// block end
if
(gfp_flags&__GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
......
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
int
split_free_page(
struct
page *page)
{
unsigned
int
order;
int
nr_pages;
order = page_order(page);
nr_pages = __isolate_free_page(page, order);
if
(!nr_pages)
return
0;
// add this block
if
(!PageHighMem(page)) {
mmu_random_remap(page, 1 << order);
set_memory_valid((unsigned
long
)lowmem_page_address(page), 1 << order);
}
// block end
......
}
......
/* 如果没有pasr_find_free_page()函数则忽略!!! */
int
pasr_find_free_page(
struct
page *page,
struct
list_head *freelist)
{
struct
zone *z = page_zone(page);
unsigned
int
order;
int
free_count, i;
/* Remove page from free list */
order = page_order(page);
// add this block
if
(!PageHighMem(page)) {
mmu_random_remap(page, 1 << order);
set_memory_valid((unsigned
long
)lowmem_page_address(page), 1 << order);
}
// block end
list_del(&page->lru);
......
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
static
inline
void
*get_freepointer_safe(
struct
kmem_cache *s,
void
*object)
{
void
*p;
//#ifdef CONFIG_DEBUG_PAGEALLOC // 注释这行代码
p = (
void
*)~(
uintptr_t
)0;
// add this line
probe_kernel_read(&p, (
void
**)(object + s->offset),
sizeof
(p));
//#else // 注释这行代码
// p = get_freepointer(s, object); // 注释这行代码
//#endif // 注释这行代码
return
p;
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
|
extern
int
set_memory_invalid(unsigned
long
addr,
int
numpages);
/* add this line */
static
void
*__vmalloc_area_node(
struct
vm_struct *area, gfp_t gfp_mask, pgprot_t prot,
int
node)
{
......
for
(i = 0; i < area->nr_pages; i++) {
struct
page *page;
if
(node == NUMA_NO_NODE)
page = alloc_page(alloc_mask);
else
page = alloc_pages_node(node, alloc_mask, order);
if
(unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
goto
fail;
}
// add this block
if
(!PageHighMem(page))
set_memory_invalid((unsigned
long
)lowmem_page_address(page), 1);
// block end
area->pages[i] = page;
......
}
......
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
|
static
void
mrdump_mini_dump_loads(loff_t offset, mrdump_write write)
{
......
for
(i = 0; i < MRDUMP_MINI_NR_SECTION; i++) {
phdr = &mrdump_mini_ehdr->phdrs[i];
if
(phdr->p_type == PT_NULL)
break
;
if
(phdr->p_type == PT_LOAD) {
//mrdump_mini_dump_phdr(phdr, &pos);
start = phdr->p_vaddr;
size = ALIGN(phdr->p_filesz, SZ_512);
phdr->p_offset = pos;
// add this block
{
extern
int
set_memory_valid(unsigned
long
virt,
int
numpages);
set_memory_valid(start, size >> PAGE_SHIFT);
}
//block end
errno
= write((
void
*)start, pos + offset, size, 1);
......
}
|