Skip to content

Commit 1182520

Browse files
committed
KVM: selftests: Plumb "struct kvm_mmu" into x86's MMU APIs
In preparation for generalizing the x86 virt mapping APIs to work with TDP (stage-2) page tables, plumb "struct kvm_mmu" into all of the helper functions instead of operating on vm->mmu directly. Opportunistically swap the order of the check in virt_get_pte() to first assert that the parent is the PGD, and then check that the PTE is present, as it makes more sense to check if the parent PTE is the PGD/root (i.e. not a PTE) before checking that the PTE is PRESENT. No functional change intended. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> [sean: rebase on common kvm_mmu structure, rewrite changelog] Link: https://patch.msgid.link/20251230230150.4150236-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 9f073ac commit 1182520

2 files changed

Lines changed: 39 additions & 28 deletions

File tree

tools/testing/selftests/kvm/include/x86/processor.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1451,7 +1451,8 @@ enum pg_level {
14511451
#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
14521452
#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
14531453

1454-
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1454+
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
1455+
uint64_t paddr, int level);
14551456
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
14561457
uint64_t nr_bytes, int level);
14571458

tools/testing/selftests/kvm/lib/x86/processor.c

Lines changed: 37 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -156,40 +156,46 @@ bool kvm_is_tdp_enabled(void)
156156
return get_kvm_amd_param_bool("npt");
157157
}
158158

159+
static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu)
160+
{
161+
/* If needed, create the top-level page table. */
162+
if (!mmu->pgd_created) {
163+
mmu->pgd = vm_alloc_page_table(vm);
164+
mmu->pgd_created = true;
165+
}
166+
}
167+
159168
void virt_arch_pgd_alloc(struct kvm_vm *vm)
160169
{
161170
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
162171
"Unknown or unsupported guest mode: 0x%x", vm->mode);
163172

164-
/* If needed, create the top-level page table. */
165-
if (!vm->mmu.pgd_created) {
166-
vm->mmu.pgd = vm_alloc_page_table(vm);
167-
vm->mmu.pgd_created = true;
168-
}
173+
virt_mmu_init(vm, &vm->mmu);
169174
}
170175

171-
static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte,
172-
uint64_t vaddr, int level)
176+
static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
177+
uint64_t *parent_pte, uint64_t vaddr, int level)
173178
{
174179
uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
175180
uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
176181
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
177182

178-
TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->mmu.pgd,
183+
TEST_ASSERT((*parent_pte == mmu->pgd) || (*parent_pte & PTE_PRESENT_MASK),
179184
"Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
180185
level + 1, vaddr);
181186

182187
return &page_table[index];
183188
}
184189

185190
static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
191+
struct kvm_mmu *mmu,
186192
uint64_t *parent_pte,
187193
uint64_t vaddr,
188194
uint64_t paddr,
189195
int current_level,
190196
int target_level)
191197
{
192-
uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
198+
uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
193199

194200
paddr = vm_untag_gpa(vm, paddr);
195201

@@ -215,10 +221,11 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
215221
return pte;
216222
}
217223

218-
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
224+
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
225+
uint64_t paddr, int level)
219226
{
220227
const uint64_t pg_size = PG_LEVEL_SIZE(level);
221-
uint64_t *pte = &vm->mmu.pgd;
228+
uint64_t *pte = &mmu->pgd;
222229
int current_level;
223230

224231
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
@@ -243,17 +250,17 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
243250
* Allocate upper level page tables, if not already present. Return
244251
* early if a hugepage was created.
245252
*/
246-
for (current_level = vm->mmu.pgtable_levels;
253+
for (current_level = mmu->pgtable_levels;
247254
current_level > PG_LEVEL_4K;
248255
current_level--) {
249-
pte = virt_create_upper_pte(vm, pte, vaddr, paddr,
256+
pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
250257
current_level, level);
251258
if (*pte & PTE_LARGE_MASK)
252259
return;
253260
}
254261

255262
/* Fill in page table entry. */
256-
pte = virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
263+
pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
257264
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
258265
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
259266
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
@@ -270,7 +277,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
270277

271278
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
272279
{
273-
__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
280+
__virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
274281
}
275282

276283
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
@@ -285,7 +292,7 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
285292
nr_bytes, pg_size);
286293

287294
for (i = 0; i < nr_pages; i++) {
288-
__virt_pg_map(vm, vaddr, paddr, level);
295+
__virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
289296
sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
290297
nr_bytes / PAGE_SIZE);
291298

@@ -294,7 +301,8 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
294301
}
295302
}
296303

297-
static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
304+
static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
305+
int *level, int current_level)
298306
{
299307
if (*pte & PTE_LARGE_MASK) {
300308
TEST_ASSERT(*level == PG_LEVEL_NONE ||
@@ -306,17 +314,19 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
306314
return *level == current_level;
307315
}
308316

309-
static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
317+
static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
318+
struct kvm_mmu *mmu,
319+
uint64_t vaddr,
310320
int *level)
311321
{
312-
int va_width = 12 + (vm->mmu.pgtable_levels) * 9;
313-
uint64_t *pte = &vm->mmu.pgd;
322+
int va_width = 12 + (mmu->pgtable_levels) * 9;
323+
uint64_t *pte = &mmu->pgd;
314324
int current_level;
315325

316326
TEST_ASSERT(!vm->arch.is_pt_protected,
317327
"Walking page tables of protected guests is impossible");
318328

319-
TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= vm->mmu.pgtable_levels,
329+
TEST_ASSERT(*level >= PG_LEVEL_NONE && *level <= mmu->pgtable_levels,
320330
"Invalid PG_LEVEL_* '%d'", *level);
321331

322332
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
@@ -332,22 +342,22 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
332342
(((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
333343
"Canonical check failed. The virtual address is invalid.");
334344

335-
for (current_level = vm->mmu.pgtable_levels;
345+
for (current_level = mmu->pgtable_levels;
336346
current_level > PG_LEVEL_4K;
337347
current_level--) {
338-
pte = virt_get_pte(vm, pte, vaddr, current_level);
339-
if (vm_is_target_pte(pte, level, current_level))
348+
pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
349+
if (vm_is_target_pte(mmu, pte, level, current_level))
340350
return pte;
341351
}
342352

343-
return virt_get_pte(vm, pte, vaddr, PG_LEVEL_4K);
353+
return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
344354
}
345355

346356
uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
347357
{
348358
int level = PG_LEVEL_4K;
349359

350-
return __vm_get_page_table_entry(vm, vaddr, &level);
360+
return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
351361
}
352362

353363
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
@@ -497,7 +507,7 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
497507
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
498508
{
499509
int level = PG_LEVEL_NONE;
500-
uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level);
510+
uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
501511

502512
TEST_ASSERT(*pte & PTE_PRESENT_MASK,
503513
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);

0 commit comments

Comments
 (0)