Skip to content

Commit cd0334e

Browse files
Alexandre Ghitipalmer-dabbelt
authored andcommitted
riscv: Split early and final KASAN population functions
This is a preliminary work that allows to make the code more understandable. Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Björn Töpel <bjorn@rivosinc.com> Link: https://lore.kernel.org/r/20230203075232.274282-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 6d796c5 commit cd0334e

1 file changed

Lines changed: 116 additions & 69 deletions

File tree

arch/riscv/mm/kasan_init.c

Lines changed: 116 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -95,23 +95,13 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
9595
}
9696

9797
static void __init kasan_populate_pud(pgd_t *pgd,
98-
unsigned long vaddr, unsigned long end,
99-
bool early)
98+
unsigned long vaddr, unsigned long end)
10099
{
101100
phys_addr_t phys_addr;
102101
pud_t *pudp, *base_pud;
103102
unsigned long next;
104103

105-
if (early) {
106-
/*
107-
* We can't use pgd_page_vaddr here as it would return a linear
108-
* mapping address but it is not mapped yet, but when populating
109-
* early_pg_dir, we need the physical address and when populating
110-
* swapper_pg_dir, we need the kernel virtual address so use
111-
* pt_ops facility.
112-
*/
113-
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114-
} else if (pgd_none(*pgd)) {
104+
if (pgd_none(*pgd)) {
115105
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
116106
memcpy(base_pud, (void *)kasan_early_shadow_pud,
117107
sizeof(pud_t) * PTRS_PER_PUD);
@@ -130,16 +120,10 @@ static void __init kasan_populate_pud(pgd_t *pgd,
130120
next = pud_addr_end(vaddr, end);
131121

132122
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
133-
if (early) {
134-
phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
135-
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
123+
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
124+
if (phys_addr) {
125+
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
136126
continue;
137-
} else {
138-
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
139-
if (phys_addr) {
140-
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
141-
continue;
142-
}
143127
}
144128
}
145129

@@ -152,34 +136,21 @@ static void __init kasan_populate_pud(pgd_t *pgd,
152136
* it entirely, memblock could allocate a page at a physical address
153137
* where KASAN is not populated yet and then we'd get a page fault.
154138
*/
155-
if (!early)
156-
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
139+
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
157140
}
158141

159142
static void __init kasan_populate_p4d(pgd_t *pgd,
160-
unsigned long vaddr, unsigned long end,
161-
bool early)
143+
unsigned long vaddr, unsigned long end)
162144
{
163145
phys_addr_t phys_addr;
164146
p4d_t *p4dp, *base_p4d;
165147
unsigned long next;
166148

167-
if (early) {
168-
/*
169-
* We can't use pgd_page_vaddr here as it would return a linear
170-
* mapping address but it is not mapped yet, but when populating
171-
* early_pg_dir, we need the physical address and when populating
172-
* swapper_pg_dir, we need the kernel virtual address so use
173-
* pt_ops facility.
174-
*/
175-
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
176-
} else {
177-
base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
178-
if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
179-
base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
180-
memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
181-
sizeof(p4d_t) * PTRS_PER_P4D);
182-
}
149+
base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
150+
if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
151+
base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
152+
memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
153+
sizeof(p4d_t) * PTRS_PER_P4D);
183154
}
184155

185156
p4dp = base_p4d + p4d_index(vaddr);
@@ -188,20 +159,14 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
188159
next = p4d_addr_end(vaddr, end);
189160

190161
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
191-
if (early) {
192-
phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
193-
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
162+
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
163+
if (phys_addr) {
164+
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
194165
continue;
195-
} else {
196-
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
197-
if (phys_addr) {
198-
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
199-
continue;
200-
}
201166
}
202167
}
203168

204-
kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
169+
kasan_populate_pud((pgd_t *)p4dp, vaddr, next);
205170
} while (p4dp++, vaddr = next, vaddr != end);
206171

207172
/*
@@ -210,25 +175,23 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
210175
* it entirely, memblock could allocate a page at a physical address
211176
* where KASAN is not populated yet and then we'd get a page fault.
212177
*/
213-
if (!early)
214-
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
178+
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
215179
}
216180

217181
#define kasan_early_shadow_pgd_next (pgtable_l5_enabled ? \
218182
(uintptr_t)kasan_early_shadow_p4d : \
219183
(pgtable_l4_enabled ? \
220184
(uintptr_t)kasan_early_shadow_pud : \
221185
(uintptr_t)kasan_early_shadow_pmd))
222-
#define kasan_populate_pgd_next(pgdp, vaddr, next, early) \
186+
#define kasan_populate_pgd_next(pgdp, vaddr, next) \
223187
(pgtable_l5_enabled ? \
224-
kasan_populate_p4d(pgdp, vaddr, next, early) : \
188+
kasan_populate_p4d(pgdp, vaddr, next) : \
225189
(pgtable_l4_enabled ? \
226-
kasan_populate_pud(pgdp, vaddr, next, early) : \
190+
kasan_populate_pud(pgdp, vaddr, next) : \
227191
kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
228192

229193
static void __init kasan_populate_pgd(pgd_t *pgdp,
230-
unsigned long vaddr, unsigned long end,
231-
bool early)
194+
unsigned long vaddr, unsigned long end)
232195
{
233196
phys_addr_t phys_addr;
234197
unsigned long next;
@@ -237,11 +200,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
237200
next = pgd_addr_end(vaddr, end);
238201

239202
if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
240-
if (early) {
241-
phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
242-
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
243-
continue;
244-
} else if (pgd_page_vaddr(*pgdp) ==
203+
if (pgd_page_vaddr(*pgdp) ==
245204
(unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
246205
/*
247206
* pgdp can't be none since kasan_early_init
@@ -258,7 +217,95 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
258217
}
259218
}
260219

261-
kasan_populate_pgd_next(pgdp, vaddr, next, early);
220+
kasan_populate_pgd_next(pgdp, vaddr, next);
221+
} while (pgdp++, vaddr = next, vaddr != end);
222+
}
223+
224+
static void __init kasan_early_populate_pud(p4d_t *p4dp,
225+
unsigned long vaddr,
226+
unsigned long end)
227+
{
228+
pud_t *pudp, *base_pud;
229+
phys_addr_t phys_addr;
230+
unsigned long next;
231+
232+
if (!pgtable_l4_enabled) {
233+
pudp = (pud_t *)p4dp;
234+
} else {
235+
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
236+
pudp = base_pud + pud_index(vaddr);
237+
}
238+
239+
do {
240+
next = pud_addr_end(vaddr, end);
241+
242+
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
243+
(next - vaddr) >= PUD_SIZE) {
244+
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
245+
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
246+
continue;
247+
}
248+
249+
BUG();
250+
} while (pudp++, vaddr = next, vaddr != end);
251+
}
252+
253+
static void __init kasan_early_populate_p4d(pgd_t *pgdp,
254+
unsigned long vaddr,
255+
unsigned long end)
256+
{
257+
p4d_t *p4dp, *base_p4d;
258+
phys_addr_t phys_addr;
259+
unsigned long next;
260+
261+
/*
262+
* We can't use pgd_page_vaddr here as it would return a linear
263+
* mapping address but it is not mapped yet, but when populating
264+
* early_pg_dir, we need the physical address and when populating
265+
* swapper_pg_dir, we need the kernel virtual address so use
266+
* pt_ops facility.
267+
* Note that this test is then completely equivalent to
268+
* p4dp = p4d_offset(pgdp, vaddr)
269+
*/
270+
if (!pgtable_l5_enabled) {
271+
p4dp = (p4d_t *)pgdp;
272+
} else {
273+
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
274+
p4dp = base_p4d + p4d_index(vaddr);
275+
}
276+
277+
do {
278+
next = p4d_addr_end(vaddr, end);
279+
280+
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
281+
(next - vaddr) >= P4D_SIZE) {
282+
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
283+
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
284+
continue;
285+
}
286+
287+
kasan_early_populate_pud(p4dp, vaddr, next);
288+
} while (p4dp++, vaddr = next, vaddr != end);
289+
}
290+
291+
static void __init kasan_early_populate_pgd(pgd_t *pgdp,
292+
unsigned long vaddr,
293+
unsigned long end)
294+
{
295+
phys_addr_t phys_addr;
296+
unsigned long next;
297+
298+
do {
299+
next = pgd_addr_end(vaddr, end);
300+
301+
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
302+
(next - vaddr) >= PGDIR_SIZE) {
303+
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
304+
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
305+
continue;
306+
}
307+
308+
kasan_early_populate_p4d(pgdp, vaddr, next);
262309
} while (pgdp++, vaddr = next, vaddr != end);
263310
}
264311

@@ -295,16 +342,16 @@ asmlinkage void __init kasan_early_init(void)
295342
PAGE_TABLE));
296343
}
297344

298-
kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
299-
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
345+
kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
346+
KASAN_SHADOW_START, KASAN_SHADOW_END);
300347

301348
local_flush_tlb_all();
302349
}
303350

304351
void __init kasan_swapper_init(void)
305352
{
306-
kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
307-
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
353+
kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
354+
KASAN_SHADOW_START, KASAN_SHADOW_END);
308355

309356
local_flush_tlb_all();
310357
}
@@ -314,7 +361,7 @@ static void __init kasan_populate(void *start, void *end)
314361
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
315362
unsigned long vend = PAGE_ALIGN((unsigned long)end);
316363

317-
kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
364+
kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
318365

319366
local_flush_tlb_all();
320367
memset(start, KASAN_SHADOW_INIT, end - start);

0 commit comments

Comments
 (0)