Skip to content

Commit 5ec5ac4

Browse files
seehearfeelchenhuacai
authored andcommitted
LoongArch: Rework KASAN initialization for PTW-enabled systems
kasan_init_generic() indicates that kasan is fully initialized, so it should be put at end of kasan_init(). Otherwise bringing up the primary CPU failed when CONFIG_KASAN is set on PTW-enabled systems, here are the call chains: kernel_entry() start_kernel() setup_arch() kasan_init() kasan_init_generic() The reason is PTW-enabled systems have speculative accesses which means memory accesses to the shadow memory after kasan_init() may be executed by hardware before. However, accessing shadow memory is safe only after kasan fully initialized because kasan_init() uses a temporary PGD table until we have populated all levels of shadow page tables and writen the PGD register. Moving kasan_init_generic() later can defer the occasion of kasan_enabled(), so as to avoid speculative accesses on shadow pages. After moving kasan_init_generic() to the end, kasan_init() can no longer call kasan_mem_to_shadow() for shadow address conversion because it will always return kasan_early_shadow_page. On the other hand, we should keep the current logic of kasan_mem_to_shadow() for both the early and final stage because there may be instrumentation before kasan_init(). To solve this, we factor out a new mem_to_shadow() function from current kasan_mem_to_shadow() for the shadow address conversion in kasan_init(). Cc: stable@vger.kernel.org Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 7cb37af commit 5ec5ac4

1 file changed

Lines changed: 40 additions & 38 deletions

File tree

arch/loongarch/mm/kasan_init.c

Lines changed: 40 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -40,39 +40,43 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
4040
#define __pte_none(early, pte) (early ? pte_none(pte) : \
4141
((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
4242

43-
void *kasan_mem_to_shadow(const void *addr)
43+
static void *mem_to_shadow(const void *addr)
4444
{
45-
if (!kasan_enabled()) {
45+
unsigned long offset = 0;
46+
unsigned long maddr = (unsigned long)addr;
47+
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
48+
49+
if (maddr >= FIXADDR_START)
4650
return (void *)(kasan_early_shadow_page);
47-
} else {
48-
unsigned long maddr = (unsigned long)addr;
49-
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
50-
unsigned long offset = 0;
51-
52-
if (maddr >= FIXADDR_START)
53-
return (void *)(kasan_early_shadow_page);
54-
55-
maddr &= XRANGE_SHADOW_MASK;
56-
switch (xrange) {
57-
case XKPRANGE_CC_SEG:
58-
offset = XKPRANGE_CC_SHADOW_OFFSET;
59-
break;
60-
case XKPRANGE_UC_SEG:
61-
offset = XKPRANGE_UC_SHADOW_OFFSET;
62-
break;
63-
case XKPRANGE_WC_SEG:
64-
offset = XKPRANGE_WC_SHADOW_OFFSET;
65-
break;
66-
case XKVRANGE_VC_SEG:
67-
offset = XKVRANGE_VC_SHADOW_OFFSET;
68-
break;
69-
default:
70-
WARN_ON(1);
71-
return NULL;
72-
}
7351

74-
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
52+
maddr &= XRANGE_SHADOW_MASK;
53+
switch (xrange) {
54+
case XKPRANGE_CC_SEG:
55+
offset = XKPRANGE_CC_SHADOW_OFFSET;
56+
break;
57+
case XKPRANGE_UC_SEG:
58+
offset = XKPRANGE_UC_SHADOW_OFFSET;
59+
break;
60+
case XKPRANGE_WC_SEG:
61+
offset = XKPRANGE_WC_SHADOW_OFFSET;
62+
break;
63+
case XKVRANGE_VC_SEG:
64+
offset = XKVRANGE_VC_SHADOW_OFFSET;
65+
break;
66+
default:
67+
WARN_ON(1);
68+
return NULL;
7569
}
70+
71+
return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
72+
}
73+
74+
void *kasan_mem_to_shadow(const void *addr)
75+
{
76+
if (kasan_enabled())
77+
return mem_to_shadow(addr);
78+
else
79+
return (void *)(kasan_early_shadow_page);
7680
}
7781

7882
const void *kasan_shadow_to_mem(const void *shadow_addr)
@@ -293,11 +297,8 @@ void __init kasan_init(void)
293297
/* Maps everything to a single page of zeroes */
294298
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
295299

296-
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
297-
kasan_mem_to_shadow((void *)KFENCE_AREA_END));
298-
299-
/* Enable KASAN here before kasan_mem_to_shadow(). */
300-
kasan_init_generic();
300+
kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START),
301+
mem_to_shadow((void *)KFENCE_AREA_END));
301302

302303
/* Populate the linear mapping */
303304
for_each_mem_range(i, &pa_start, &pa_end) {
@@ -307,13 +308,13 @@ void __init kasan_init(void)
307308
if (start >= end)
308309
break;
309310

310-
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
311-
(unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
311+
kasan_map_populate((unsigned long)mem_to_shadow(start),
312+
(unsigned long)mem_to_shadow(end), NUMA_NO_NODE);
312313
}
313314

314315
/* Populate modules mapping */
315-
kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
316-
(unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
316+
kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR),
317+
(unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
317318
/*
318319
* KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
319320
* should make sure that it maps the zero page read-only.
@@ -328,4 +329,5 @@ void __init kasan_init(void)
328329

329330
/* At this point kasan is fully initialized. Enable error messages */
330331
init_task.kasan_depth = 0;
332+
kasan_init_generic();
331333
}

0 commit comments

Comments
 (0)