Skip to content

Commit 59eef1a

Browse files
yosrym93sean-jc
authored andcommitted
KVM: selftests: Extend memstress to run on nested SVM
Add L1 SVM code and generalize the setup code to work for both VMX and SVM. This allows running 'dirty_log_perf_test -n' on AMD CPUs. Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251230230150.4150236-20-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 6794d91 commit 59eef1a

1 file changed

Lines changed: 35 additions & 7 deletions

File tree

tools/testing/selftests/kvm/lib/x86/memstress.c

Lines changed: 35 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include "kvm_util.h"
1414
#include "memstress.h"
1515
#include "processor.h"
16+
#include "svm_util.h"
1617
#include "vmx.h"
1718

1819
void memstress_l2_guest_code(uint64_t vcpu_id)
@@ -29,9 +30,10 @@ __asm__(
2930
" ud2;"
3031
);
3132

32-
static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
33-
{
3433
#define L2_GUEST_STACK_SIZE 64
34+
35+
static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
36+
{
3537
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
3638
unsigned long *rsp;
3739

@@ -45,10 +47,34 @@ static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
4547
prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
4648

4749
GUEST_ASSERT(!vmlaunch());
48-
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
50+
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_VMCALL);
51+
GUEST_DONE();
52+
}
53+
54+
static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
55+
{
56+
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
57+
unsigned long *rsp;
58+
59+
60+
rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
61+
*rsp = vcpu_id;
62+
generic_svm_setup(svm, memstress_l2_guest_entry, rsp);
63+
64+
run_guest(svm->vmcb, svm->vmcb_gpa);
65+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
4966
GUEST_DONE();
5067
}
5168

69+
70+
static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
71+
{
72+
if (this_cpu_has(X86_FEATURE_VMX))
73+
l1_vmx_code(data, vcpu_id);
74+
else
75+
l1_svm_code(data, vcpu_id);
76+
}
77+
5278
uint64_t memstress_nested_pages(int nr_vcpus)
5379
{
5480
/*
@@ -78,15 +104,17 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm)
78104
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
79105
{
80106
struct kvm_regs regs;
81-
vm_vaddr_t vmx_gva;
107+
vm_vaddr_t nested_gva;
82108
int vcpu_id;
83109

84-
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
85110
TEST_REQUIRE(kvm_cpu_has_tdp());
86111

87112
vm_enable_tdp(vm);
88113
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
89-
vcpu_alloc_vmx(vm, &vmx_gva);
114+
if (kvm_cpu_has(X86_FEATURE_VMX))
115+
vcpu_alloc_vmx(vm, &nested_gva);
116+
else
117+
vcpu_alloc_svm(vm, &nested_gva);
90118

91119
/* The EPTs are shared across vCPUs, setup the mappings once */
92120
if (vcpu_id == 0)
@@ -99,6 +127,6 @@ void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
99127
vcpu_regs_get(vcpus[vcpu_id], &regs);
100128
regs.rip = (unsigned long) memstress_l1_guest_code;
101129
vcpu_regs_set(vcpus[vcpu_id], &regs);
102-
vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
130+
vcpu_args_set(vcpus[vcpu_id], 2, nested_gva, vcpu_id);
103131
}
104132
}

0 commit comments

Comments
 (0)