Skip to content

Commit d127c19

Browse files
AlexGhitipalmer-dabbelt
authored andcommitted
riscv: Improve kasan population function
Current population code populates a whole page table without taking care of what could have been already allocated and without taking into account possible index in page table, assuming the virtual address to map is always aligned on the page table size, which, for example, won't be the case when the kernel will get pushed to the end of the address space. Address those problems by rewriting the kasan population function, splitting it into subfunctions for each different page table level. Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
1 parent 9484e2a commit d127c19

1 file changed

Lines changed: 63 additions & 28 deletions

File tree

arch/riscv/mm/kasan_init.c

Lines changed: 63 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -60,37 +60,72 @@ asmlinkage void __init kasan_early_init(void)
6060
local_flush_tlb_all();
6161
}
6262

63-
static void __init populate(void *start, void *end)
63+
static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
64+
{
65+
phys_addr_t phys_addr;
66+
pte_t *ptep, *base_pte;
67+
68+
if (pmd_none(*pmd))
69+
base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
70+
else
71+
base_pte = (pte_t *)pmd_page_vaddr(*pmd);
72+
73+
ptep = base_pte + pte_index(vaddr);
74+
75+
do {
76+
if (pte_none(*ptep)) {
77+
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
78+
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
79+
}
80+
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
81+
82+
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
83+
}
84+
85+
static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
86+
{
87+
phys_addr_t phys_addr;
88+
pmd_t *pmdp, *base_pmd;
89+
unsigned long next;
90+
91+
base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
92+
if (base_pmd == lm_alias(kasan_early_shadow_pmd))
93+
base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
94+
95+
pmdp = base_pmd + pmd_index(vaddr);
96+
97+
do {
98+
next = pmd_addr_end(vaddr, end);
99+
kasan_populate_pte(pmdp, vaddr, next);
100+
} while (pmdp++, vaddr = next, vaddr != end);
101+
102+
/*
103+
* Wait for the whole PGD to be populated before setting the PGD in
104+
* the page table, otherwise, if we did set the PGD before populating
105+
* it entirely, memblock could allocate a page at a physical address
106+
* where KASAN is not populated yet and then we'd get a page fault.
107+
*/
108+
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
109+
}
110+
111+
static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
112+
{
113+
phys_addr_t phys_addr;
114+
pgd_t *pgdp = pgd_offset_k(vaddr);
115+
unsigned long next;
116+
117+
do {
118+
next = pgd_addr_end(vaddr, end);
119+
kasan_populate_pmd(pgdp, vaddr, next);
120+
} while (pgdp++, vaddr = next, vaddr != end);
121+
}
122+
123+
static void __init kasan_populate(void *start, void *end)
64124
{
65-
unsigned long i, offset;
66125
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
67126
unsigned long vend = PAGE_ALIGN((unsigned long)end);
68-
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
69-
unsigned long n_ptes =
70-
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
71-
unsigned long n_pmds =
72-
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
73-
74-
pte_t *pte =
75-
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
76-
pmd_t *pmd =
77-
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
78-
pgd_t *pgd = pgd_offset_k(vaddr);
79-
80-
for (i = 0; i < n_pages; i++) {
81-
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
82-
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
83-
}
84-
85-
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
86-
set_pmd(&pmd[i],
87-
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
88-
__pgprot(_PAGE_TABLE)));
89127

90-
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
91-
set_pgd(&pgd[i],
92-
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
93-
__pgprot(_PAGE_TABLE)));
128+
kasan_populate_pgd(vaddr, vend);
94129

95130
local_flush_tlb_all();
96131
memset(start, KASAN_SHADOW_INIT, end - start);
@@ -154,7 +189,7 @@ void __init kasan_init(void)
154189
if (start >= end)
155190
break;
156191

157-
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
192+
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
158193
};
159194

160195
for (i = 0; i < PTRS_PER_PTE; i++)

0 commit comments

Comments
 (0)