Skip to content

Commit e40e72f

Browse files
yosrym93sean-jc
authored andcommitted
KVM: selftests: Stop passing VMX metadata to TDP mapping functions
The root GPA is now retrieved from the nested MMU, stop passing VMX metadata. This is in preparation for making these functions work for NPTs as well. Opportunistically drop tdp_pg_map() since it's unused. No functional change intended. Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251230230150.4150236-12-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent f00f519 commit e40e72f

4 files changed

Lines changed: 24 additions & 40 deletions

File tree

tools/testing/selftests/kvm/include/x86/vmx.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -557,14 +557,9 @@ bool load_vmcs(struct vmx_pages *vmx);
557557

558558
bool ept_1g_pages_supported(void);
559559

560-
void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr,
561-
uint64_t paddr);
562-
void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr,
563-
uint64_t paddr, uint64_t size);
564-
void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
565-
struct kvm_vm *vm);
566-
void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
567-
uint64_t addr, uint64_t size);
560+
void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size);
561+
void tdp_identity_map_default_memslots(struct kvm_vm *vm);
562+
void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size);
568563
bool kvm_cpu_has_ept(void);
569564
void vm_enable_ept(struct kvm_vm *vm);
570565
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);

tools/testing/selftests/kvm/lib/x86/memstress.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ uint64_t memstress_nested_pages(int nr_vcpus)
5959
return 513 + 10 * nr_vcpus;
6060
}
6161

62-
static void memstress_setup_ept_mappings(struct vmx_pages *vmx, struct kvm_vm *vm)
62+
static void memstress_setup_ept_mappings(struct kvm_vm *vm)
6363
{
6464
uint64_t start, end;
6565

@@ -68,16 +68,15 @@ static void memstress_setup_ept_mappings(struct vmx_pages *vmx, struct kvm_vm *v
6868
* KVM can shadow the EPT12 with the maximum huge page size supported
6969
* by the backing source.
7070
*/
71-
tdp_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
71+
tdp_identity_map_1g(vm, 0, 0x100000000ULL);
7272

7373
start = align_down(memstress_args.gpa, PG_SIZE_1G);
7474
end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
75-
tdp_identity_map_1g(vmx, vm, start, end - start);
75+
tdp_identity_map_1g(vm, start, end - start);
7676
}
7777

7878
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
7979
{
80-
struct vmx_pages *vmx;
8180
struct kvm_regs regs;
8281
vm_vaddr_t vmx_gva;
8382
int vcpu_id;
@@ -87,11 +86,11 @@ void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
8786

8887
vm_enable_ept(vm);
8988
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
90-
vmx = vcpu_alloc_vmx(vm, &vmx_gva);
89+
vcpu_alloc_vmx(vm, &vmx_gva);
9190

9291
/* The EPTs are shared across vCPUs, setup the mappings once */
9392
if (vcpu_id == 0)
94-
memstress_setup_ept_mappings(vmx, vm);
93+
memstress_setup_ept_mappings(vm);
9594

9695
/*
9796
* Override the vCPU to run memstress_l1_guest_code() which will

tools/testing/selftests/kvm/lib/x86/vmx.c

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -409,8 +409,8 @@ static void tdp_create_pte(struct kvm_vm *vm,
409409
}
410410

411411

412-
void __tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
413-
uint64_t nested_paddr, uint64_t paddr, int target_level)
412+
void __tdp_pg_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
413+
int target_level)
414414
{
415415
const uint64_t page_size = PG_LEVEL_SIZE(target_level);
416416
void *eptp_hva = addr_gpa2hva(vm, vm->arch.tdp_mmu->pgd);
@@ -453,12 +453,6 @@ void __tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
453453
}
454454
}
455455

456-
void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
457-
uint64_t nested_paddr, uint64_t paddr)
458-
{
459-
__tdp_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
460-
}
461-
462456
/*
463457
* Map a range of EPT guest physical addresses to the VM's physical address
464458
*
@@ -476,9 +470,8 @@ void tdp_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
476470
* Within the VM given by vm, creates a nested guest translation for the
477471
* page range starting at nested_paddr to the page range starting at paddr.
478472
*/
479-
void __tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
480-
uint64_t nested_paddr, uint64_t paddr, uint64_t size,
481-
int level)
473+
void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
474+
uint64_t size, int level)
482475
{
483476
size_t page_size = PG_LEVEL_SIZE(level);
484477
size_t npages = size / page_size;
@@ -487,23 +480,22 @@ void __tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
487480
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
488481

489482
while (npages--) {
490-
__tdp_pg_map(vmx, vm, nested_paddr, paddr, level);
483+
__tdp_pg_map(vm, nested_paddr, paddr, level);
491484
nested_paddr += page_size;
492485
paddr += page_size;
493486
}
494487
}
495488

496-
void tdp_map(struct vmx_pages *vmx, struct kvm_vm *vm,
497-
uint64_t nested_paddr, uint64_t paddr, uint64_t size)
489+
void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
490+
uint64_t size)
498491
{
499-
__tdp_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
492+
__tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
500493
}
501494

502495
/* Prepare an identity extended page table that maps all the
503496
* physical pages in VM.
504497
*/
505-
void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
506-
struct kvm_vm *vm)
498+
void tdp_identity_map_default_memslots(struct kvm_vm *vm)
507499
{
508500
uint32_t s, memslot = 0;
509501
sparsebit_idx_t i, last;
@@ -520,16 +512,15 @@ void tdp_identity_map_default_memslots(struct vmx_pages *vmx,
520512
if (i > last)
521513
break;
522514

523-
tdp_map(vmx, vm, (uint64_t)i << vm->page_shift,
515+
tdp_map(vm, (uint64_t)i << vm->page_shift,
524516
(uint64_t)i << vm->page_shift, 1 << vm->page_shift);
525517
}
526518
}
527519

528520
/* Identity map a region with 1GiB Pages. */
529-
void tdp_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
530-
uint64_t addr, uint64_t size)
521+
void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
531522
{
532-
__tdp_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
523+
__tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
533524
}
534525

535526
bool kvm_cpu_has_ept(void)

tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ void l1_guest_code(struct vmx_pages *vmx)
8080
static void test_vmx_dirty_log(bool enable_ept)
8181
{
8282
vm_vaddr_t vmx_pages_gva = 0;
83-
struct vmx_pages *vmx;
8483
unsigned long *bmap;
8584
uint64_t *host_test_mem;
8685

@@ -96,7 +95,7 @@ static void test_vmx_dirty_log(bool enable_ept)
9695
if (enable_ept)
9796
vm_enable_ept(vm);
9897

99-
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
98+
vcpu_alloc_vmx(vm, &vmx_pages_gva);
10099
vcpu_args_set(vcpu, 1, vmx_pages_gva);
101100

102101
/* Add an extra memory slot for testing dirty logging */
@@ -120,9 +119,9 @@ static void test_vmx_dirty_log(bool enable_ept)
120119
* GPAs as the EPT enabled case.
121120
*/
122121
if (enable_ept) {
123-
tdp_identity_map_default_memslots(vmx, vm);
124-
tdp_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
125-
tdp_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
122+
tdp_identity_map_default_memslots(vm);
123+
tdp_map(vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE);
124+
tdp_map(vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE);
126125
}
127126

128127
bmap = bitmap_zalloc(TEST_MEM_PAGES);

0 commit comments

Comments
 (0)