Skip to content

Commit 6dd7075

Browse files
yosrym93sean-jc
authored andcommitted
KVM: selftests: Move PTE bitmasks to kvm_mmu
Move the PTE bitmasks into kvm_mmu to parameterize them for virt mapping functions. Introduce helpers to read/write different PTE bits given a kvm_mmu. Drop the 'global' bit definition as it's currently unused, but leave the 'user' bit as it will be used in coming changes. Opportunisitcally rename 'large' to 'huge' as it's more consistent with the kernel naming. Leave PHYSICAL_PAGE_MASK alone, it's fixed in all page table formats and a lot of other macros depend on it. It's tempting to move all the other macros to be per-struct instead, but it would be too much noise for little benefit. Keep c_bit and s_bit in vm->arch as they used before the MMU is initialized, through __vmcreate() -> vm_userspace_mem_region_add() -> vm_mem_add() -> vm_arch_has_protected_memory(). No functional change intended. Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> [sean: rename accessors to is_<adjective>_pte()] Link: https://patch.msgid.link/20251230230150.4150236-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 3d0e759 commit 6dd7075

3 files changed

Lines changed: 76 additions & 39 deletions

File tree

tools/testing/selftests/kvm/include/x86/kvm_util_arch.h

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,21 @@
1010

1111
extern bool is_forced_emulation_enabled;
1212

13-
struct kvm_mmu_arch {};
13+
struct pte_masks {
14+
uint64_t present;
15+
uint64_t writable;
16+
uint64_t user;
17+
uint64_t accessed;
18+
uint64_t dirty;
19+
uint64_t huge;
20+
uint64_t nx;
21+
uint64_t c;
22+
uint64_t s;
23+
};
24+
25+
struct kvm_mmu_arch {
26+
struct pte_masks pte_masks;
27+
};
1428

1529
struct kvm_vm_arch {
1630
vm_vaddr_t gdt;

tools/testing/selftests/kvm/include/x86/processor.h

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -362,16 +362,6 @@ static inline unsigned int x86_model(unsigned int eax)
362362
return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
363363
}
364364

365-
/* Page table bitfield declarations */
366-
#define PTE_PRESENT_MASK BIT_ULL(0)
367-
#define PTE_WRITABLE_MASK BIT_ULL(1)
368-
#define PTE_USER_MASK BIT_ULL(2)
369-
#define PTE_ACCESSED_MASK BIT_ULL(5)
370-
#define PTE_DIRTY_MASK BIT_ULL(6)
371-
#define PTE_LARGE_MASK BIT_ULL(7)
372-
#define PTE_GLOBAL_MASK BIT_ULL(8)
373-
#define PTE_NX_MASK BIT_ULL(63)
374-
375365
#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
376366

377367
#define PAGE_SHIFT 12
@@ -1451,6 +1441,24 @@ enum pg_level {
14511441
#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
14521442
#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
14531443

1444+
#define PTE_PRESENT_MASK(mmu) ((mmu)->arch.pte_masks.present)
1445+
#define PTE_WRITABLE_MASK(mmu) ((mmu)->arch.pte_masks.writable)
1446+
#define PTE_USER_MASK(mmu) ((mmu)->arch.pte_masks.user)
1447+
#define PTE_ACCESSED_MASK(mmu) ((mmu)->arch.pte_masks.accessed)
1448+
#define PTE_DIRTY_MASK(mmu) ((mmu)->arch.pte_masks.dirty)
1449+
#define PTE_HUGE_MASK(mmu) ((mmu)->arch.pte_masks.huge)
1450+
#define PTE_NX_MASK(mmu) ((mmu)->arch.pte_masks.nx)
1451+
#define PTE_C_BIT_MASK(mmu) ((mmu)->arch.pte_masks.c)
1452+
#define PTE_S_BIT_MASK(mmu) ((mmu)->arch.pte_masks.s)
1453+
1454+
#define is_present_pte(mmu, pte) (!!(*(pte) & PTE_PRESENT_MASK(mmu)))
1455+
#define is_writable_pte(mmu, pte) (!!(*(pte) & PTE_WRITABLE_MASK(mmu)))
1456+
#define is_user_pte(mmu, pte) (!!(*(pte) & PTE_USER_MASK(mmu)))
1457+
#define is_accessed_pte(mmu, pte) (!!(*(pte) & PTE_ACCESSED_MASK(mmu)))
1458+
#define is_dirty_pte(mmu, pte) (!!(*(pte) & PTE_DIRTY_MASK(mmu)))
1459+
#define is_huge_pte(mmu, pte) (!!(*(pte) & PTE_HUGE_MASK(mmu)))
1460+
#define is_nx_pte(mmu, pte) (!!(*(pte) & PTE_NX_MASK(mmu)))
1461+
14541462
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
14551463
uint64_t paddr, int level);
14561464
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,

tools/testing/selftests/kvm/lib/x86/processor.c

Lines changed: 43 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -156,12 +156,14 @@ bool kvm_is_tdp_enabled(void)
156156
return get_kvm_amd_param_bool("npt");
157157
}
158158

159-
static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu)
159+
static void virt_mmu_init(struct kvm_vm *vm, struct kvm_mmu *mmu,
160+
struct pte_masks *pte_masks)
160161
{
161162
/* If needed, create the top-level page table. */
162163
if (!mmu->pgd_created) {
163164
mmu->pgd = vm_alloc_page_table(vm);
164165
mmu->pgd_created = true;
166+
mmu->arch.pte_masks = *pte_masks;
165167
}
166168
}
167169

@@ -170,7 +172,19 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
170172
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
171173
"Unknown or unsupported guest mode: 0x%x", vm->mode);
172174

173-
virt_mmu_init(vm, &vm->mmu);
175+
struct pte_masks pte_masks = (struct pte_masks){
176+
.present = BIT_ULL(0),
177+
.writable = BIT_ULL(1),
178+
.user = BIT_ULL(2),
179+
.accessed = BIT_ULL(5),
180+
.dirty = BIT_ULL(6),
181+
.huge = BIT_ULL(7),
182+
.nx = BIT_ULL(63),
183+
.c = vm->arch.c_bit,
184+
.s = vm->arch.s_bit,
185+
};
186+
187+
virt_mmu_init(vm, &vm->mmu, &pte_masks);
174188
}
175189

176190
static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
@@ -180,7 +194,7 @@ static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
180194
uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
181195
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
182196

183-
TEST_ASSERT((*parent_pte == mmu->pgd) || (*parent_pte & PTE_PRESENT_MASK),
197+
TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
184198
"Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
185199
level + 1, vaddr);
186200

@@ -199,10 +213,10 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
199213

200214
paddr = vm_untag_gpa(vm, paddr);
201215

202-
if (!(*pte & PTE_PRESENT_MASK)) {
203-
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
216+
if (!is_present_pte(mmu, pte)) {
217+
*pte = PTE_PRESENT_MASK(mmu) | PTE_WRITABLE_MASK(mmu);
204218
if (current_level == target_level)
205-
*pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
219+
*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
206220
else
207221
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
208222
} else {
@@ -214,7 +228,7 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
214228
TEST_ASSERT(current_level != target_level,
215229
"Cannot create hugepage at level: %u, vaddr: 0x%lx",
216230
current_level, vaddr);
217-
TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
231+
TEST_ASSERT(!is_huge_pte(mmu, pte),
218232
"Cannot create page table at level: %u, vaddr: 0x%lx",
219233
current_level, vaddr);
220234
}
@@ -255,24 +269,24 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
255269
current_level--) {
256270
pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
257271
current_level, level);
258-
if (*pte & PTE_LARGE_MASK)
272+
if (is_huge_pte(mmu, pte))
259273
return;
260274
}
261275

262276
/* Fill in page table entry. */
263277
pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
264-
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
278+
TEST_ASSERT(!is_present_pte(mmu, pte),
265279
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
266-
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
280+
*pte = PTE_PRESENT_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
267281

268282
/*
269283
* Neither SEV nor TDX supports shared page tables, so only the final
270284
* leaf PTE needs manually set the C/S-bit.
271285
*/
272286
if (vm_is_gpa_protected(vm, paddr))
273-
*pte |= vm->arch.c_bit;
287+
*pte |= PTE_C_BIT_MASK(mmu);
274288
else
275-
*pte |= vm->arch.s_bit;
289+
*pte |= PTE_S_BIT_MASK(mmu);
276290
}
277291

278292
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -304,7 +318,7 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
304318
static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
305319
int *level, int current_level)
306320
{
307-
if (*pte & PTE_LARGE_MASK) {
321+
if (is_huge_pte(mmu, pte)) {
308322
TEST_ASSERT(*level == PG_LEVEL_NONE ||
309323
*level == current_level,
310324
"Unexpected hugepage at level %d", current_level);
@@ -362,60 +376,61 @@ uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr)
362376

363377
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
364378
{
379+
struct kvm_mmu *mmu = &vm->mmu;
365380
uint64_t *pml4e, *pml4e_start;
366381
uint64_t *pdpe, *pdpe_start;
367382
uint64_t *pde, *pde_start;
368383
uint64_t *pte, *pte_start;
369384

370-
if (!vm->mmu.pgd_created)
385+
if (!mmu->pgd_created)
371386
return;
372387

373388
fprintf(stream, "%*s "
374389
" no\n", indent, "");
375390
fprintf(stream, "%*s index hvaddr gpaddr "
376391
"addr w exec dirty\n",
377392
indent, "");
378-
pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->mmu.pgd);
393+
pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd);
379394
for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
380395
pml4e = &pml4e_start[n1];
381-
if (!(*pml4e & PTE_PRESENT_MASK))
396+
if (!is_present_pte(mmu, pml4e))
382397
continue;
383398
fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
384399
" %u\n",
385400
indent, "",
386401
pml4e - pml4e_start, pml4e,
387402
addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
388-
!!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
403+
is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
389404

390405
pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
391406
for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
392407
pdpe = &pdpe_start[n2];
393-
if (!(*pdpe & PTE_PRESENT_MASK))
408+
if (!is_present_pte(mmu, pdpe))
394409
continue;
395410
fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
396411
"%u %u\n",
397412
indent, "",
398413
pdpe - pdpe_start, pdpe,
399414
addr_hva2gpa(vm, pdpe),
400-
PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
401-
!!(*pdpe & PTE_NX_MASK));
415+
PTE_GET_PFN(*pdpe), is_writable_pte(mmu, pdpe),
416+
is_nx_pte(mmu, pdpe));
402417

403418
pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
404419
for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
405420
pde = &pde_start[n3];
406-
if (!(*pde & PTE_PRESENT_MASK))
421+
if (!is_present_pte(mmu, pde))
407422
continue;
408423
fprintf(stream, "%*spde 0x%-3zx %p "
409424
"0x%-12lx 0x%-10llx %u %u\n",
410425
indent, "", pde - pde_start, pde,
411426
addr_hva2gpa(vm, pde),
412-
PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
413-
!!(*pde & PTE_NX_MASK));
427+
PTE_GET_PFN(*pde), is_writable_pte(mmu, pde),
428+
is_nx_pte(mmu, pde));
414429

415430
pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
416431
for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
417432
pte = &pte_start[n4];
418-
if (!(*pte & PTE_PRESENT_MASK))
433+
if (!is_present_pte(mmu, pte))
419434
continue;
420435
fprintf(stream, "%*spte 0x%-3zx %p "
421436
"0x%-12lx 0x%-10llx %u %u "
@@ -424,9 +439,9 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
424439
pte - pte_start, pte,
425440
addr_hva2gpa(vm, pte),
426441
PTE_GET_PFN(*pte),
427-
!!(*pte & PTE_WRITABLE_MASK),
428-
!!(*pte & PTE_NX_MASK),
429-
!!(*pte & PTE_DIRTY_MASK),
442+
is_writable_pte(mmu, pte),
443+
is_nx_pte(mmu, pte),
444+
is_dirty_pte(mmu, pte),
430445
((uint64_t) n1 << 27)
431446
| ((uint64_t) n2 << 18)
432447
| ((uint64_t) n3 << 9)
@@ -509,7 +524,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
509524
int level = PG_LEVEL_NONE;
510525
uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
511526

512-
TEST_ASSERT(*pte & PTE_PRESENT_MASK,
527+
TEST_ASSERT(is_present_pte(&vm->mmu, pte),
513528
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);
514529

515530
/*

0 commit comments

Comments
 (0)