Skip to content

Commit 55058e3

Browse files
yosrym93sean-jc
authored andcommitted
KVM: selftests: Add a selftests for nested VMLOAD/VMSAVE
Add a test for VMLOAD/VMSAVE in an L2 guest. The test verifies that L1 intercepts for VMSAVE/VMLOAD always work regardless of VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK. Then, more interestingly, it makes sure that when L1 does not intercept VMLOAD/VMSAVE, they work as intended in L2. When VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK is enabled by L1, VMSAVE/VMLOAD from L2 should interpret the GPA as an L2 GPA and translate it through the NPT. When VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK is disabled by L1, VMSAVE/VMLOAD from L2 should interpret the GPA as an L1 GPA. To test this, put two VMCBs (0 and 1) in L1's physical address space, and have a single L2 GPA where: - L2 VMCB GPA == L1 VMCB(0) GPA - L2 VMCB GPA maps to L1 VMCB(1) via the NPT in L1. This setup allows detecting how the GPA is interpreted based on which L1 VMCB is actually accessed. In both cases, L2 sets KERNEL_GS_BASE (one of the fields handled by VMSAVE/VMLOAD), and executes VMSAVE to write its value to the VMCB. The test userspace code then checks that the write was made to the correct VMCB (based on whether VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK is set by L1), and writes a new value to that VMCB. L2 then executes VMLOAD to load the new value and makes sure it's reflected correctly in KERNERL_GS_BASE. Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20260110004821.3411245-4-yosry.ahmed@linux.dev Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent f756ed8 commit 55058e3

3 files changed

Lines changed: 199 additions & 0 deletions

File tree

tools/testing/selftests/kvm/Makefile.kvm

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
9595
TEST_GEN_PROGS_x86 += x86/nested_invalid_cr3_test
9696
TEST_GEN_PROGS_x86 += x86/nested_tsc_adjust_test
9797
TEST_GEN_PROGS_x86 += x86/nested_tsc_scaling_test
98+
TEST_GEN_PROGS_x86 += x86/nested_vmsave_vmload_test
9899
TEST_GEN_PROGS_x86 += x86/platform_info_test
99100
TEST_GEN_PROGS_x86 += x86/pmu_counters_test
100101
TEST_GEN_PROGS_x86 += x86/pmu_event_filter_test

tools/testing/selftests/kvm/include/x86/processor.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,7 @@ struct kvm_x86_cpu_feature {
201201
#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
202202
#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
203203
#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
204+
#define X86_FEATURE_V_VMSAVE_VMLOAD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 15)
204205
#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
205206
#define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
206207
#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
Lines changed: 197 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,197 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (C) 2026, Google LLC.
4+
*/
5+
#include "kvm_util.h"
6+
#include "vmx.h"
7+
#include "svm_util.h"
8+
#include "kselftest.h"
9+
10+
/*
11+
* Allocate two VMCB pages for testing. Both pages have different GVAs (shared
12+
* by both L1 and L2) and L1 GPAs. A single L2 GPA is used such that:
13+
* - L2 GPA == L1 GPA for VMCB0.
14+
* - L2 GPA is mapped to L1 GPA for VMCB1 using NPT in L1.
15+
*
16+
* This allows testing whether the GPA used by VMSAVE/VMLOAD in L2 is
17+
* interpreted as a direct L1 GPA or translated using NPT as an L2 GPA, depends
18+
* on which VMCB is accessed.
19+
*/
20+
#define TEST_MEM_SLOT_INDEX 1
21+
#define TEST_MEM_PAGES 2
22+
#define TEST_MEM_BASE 0xc0000000
23+
24+
#define TEST_GUEST_ADDR(idx) (TEST_MEM_BASE + (idx) * PAGE_SIZE)
25+
26+
#define TEST_VMCB_L1_GPA(idx) TEST_GUEST_ADDR(idx)
27+
#define TEST_VMCB_GVA(idx) TEST_GUEST_ADDR(idx)
28+
29+
#define TEST_VMCB_L2_GPA TEST_VMCB_L1_GPA(0)
30+
31+
#define L2_GUEST_STACK_SIZE 64
32+
33+
static void l2_guest_code_vmsave(void)
34+
{
35+
asm volatile("vmsave %0" : : "a"(TEST_VMCB_L2_GPA) : "memory");
36+
}
37+
38+
static void l2_guest_code_vmload(void)
39+
{
40+
asm volatile("vmload %0" : : "a"(TEST_VMCB_L2_GPA) : "memory");
41+
}
42+
43+
static void l2_guest_code_vmcb(int vmcb_idx)
44+
{
45+
wrmsr(MSR_KERNEL_GS_BASE, 0xaaaa);
46+
l2_guest_code_vmsave();
47+
48+
/* Verify the VMCB used by VMSAVE and update KERNEL_GS_BASE to 0xbbbb */
49+
GUEST_SYNC(vmcb_idx);
50+
51+
l2_guest_code_vmload();
52+
GUEST_ASSERT_EQ(rdmsr(MSR_KERNEL_GS_BASE), 0xbbbb);
53+
54+
/* Reset MSR_KERNEL_GS_BASE */
55+
wrmsr(MSR_KERNEL_GS_BASE, 0);
56+
l2_guest_code_vmsave();
57+
58+
vmmcall();
59+
}
60+
61+
static void l2_guest_code_vmcb0(void)
62+
{
63+
l2_guest_code_vmcb(0);
64+
}
65+
66+
static void l2_guest_code_vmcb1(void)
67+
{
68+
l2_guest_code_vmcb(1);
69+
}
70+
71+
static void l1_guest_code(struct svm_test_data *svm)
72+
{
73+
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
74+
75+
/* Each test case initializes the guest RIP below */
76+
generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
77+
78+
/* Set VMSAVE/VMLOAD intercepts and make sure they work with.. */
79+
svm->vmcb->control.intercept |= (BIT_ULL(INTERCEPT_VMSAVE) |
80+
BIT_ULL(INTERCEPT_VMLOAD));
81+
82+
/* ..VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK cleared.. */
83+
svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
84+
85+
svm->vmcb->save.rip = (u64)l2_guest_code_vmsave;
86+
run_guest(svm->vmcb, svm->vmcb_gpa);
87+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMSAVE);
88+
89+
svm->vmcb->save.rip = (u64)l2_guest_code_vmload;
90+
run_guest(svm->vmcb, svm->vmcb_gpa);
91+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMLOAD);
92+
93+
/* ..and VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK set */
94+
svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
95+
96+
svm->vmcb->save.rip = (u64)l2_guest_code_vmsave;
97+
run_guest(svm->vmcb, svm->vmcb_gpa);
98+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMSAVE);
99+
100+
svm->vmcb->save.rip = (u64)l2_guest_code_vmload;
101+
run_guest(svm->vmcb, svm->vmcb_gpa);
102+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMLOAD);
103+
104+
/* Now clear the intercepts to test VMSAVE/VMLOAD behavior */
105+
svm->vmcb->control.intercept &= ~(BIT_ULL(INTERCEPT_VMSAVE) |
106+
BIT_ULL(INTERCEPT_VMLOAD));
107+
108+
/*
109+
* Without VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK, the GPA will be
110+
* interpreted as an L1 GPA, so VMCB0 should be used.
111+
*/
112+
svm->vmcb->save.rip = (u64)l2_guest_code_vmcb0;
113+
svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
114+
run_guest(svm->vmcb, svm->vmcb_gpa);
115+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
116+
117+
/*
118+
* With VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK, the GPA will be interpeted as
119+
* an L2 GPA, and translated through the NPT to VMCB1.
120+
*/
121+
svm->vmcb->save.rip = (u64)l2_guest_code_vmcb1;
122+
svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
123+
run_guest(svm->vmcb, svm->vmcb_gpa);
124+
GUEST_ASSERT_EQ(svm->vmcb->control.exit_code, SVM_EXIT_VMMCALL);
125+
126+
GUEST_DONE();
127+
}
128+
129+
int main(int argc, char *argv[])
130+
{
131+
vm_vaddr_t nested_gva = 0;
132+
struct vmcb *test_vmcb[2];
133+
struct kvm_vcpu *vcpu;
134+
struct kvm_vm *vm;
135+
int i;
136+
137+
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
138+
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_NPT));
139+
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD));
140+
141+
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
142+
vm_enable_tdp(vm);
143+
144+
vcpu_alloc_svm(vm, &nested_gva);
145+
vcpu_args_set(vcpu, 1, nested_gva);
146+
147+
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
148+
TEST_MEM_BASE, TEST_MEM_SLOT_INDEX,
149+
TEST_MEM_PAGES, 0);
150+
151+
for (i = 0; i <= 1; i++) {
152+
virt_map(vm, TEST_VMCB_GVA(i), TEST_VMCB_L1_GPA(i), 1);
153+
test_vmcb[i] = (struct vmcb *)addr_gva2hva(vm, TEST_VMCB_GVA(i));
154+
}
155+
156+
tdp_identity_map_default_memslots(vm);
157+
158+
/*
159+
* L2 GPA == L1_GPA(0), but map it to L1_GPA(1), to allow testing
160+
* whether the L2 GPA is interpreted as an L1 GPA or translated through
161+
* the NPT.
162+
*/
163+
TEST_ASSERT_EQ(TEST_VMCB_L2_GPA, TEST_VMCB_L1_GPA(0));
164+
tdp_map(vm, TEST_VMCB_L2_GPA, TEST_VMCB_L1_GPA(1), PAGE_SIZE);
165+
166+
for (;;) {
167+
struct ucall uc;
168+
169+
vcpu_run(vcpu);
170+
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
171+
172+
switch (get_ucall(vcpu, &uc)) {
173+
case UCALL_ABORT:
174+
REPORT_GUEST_ASSERT(uc);
175+
case UCALL_SYNC:
176+
i = uc.args[1];
177+
TEST_ASSERT(i == 0 || i == 1, "Unexpected VMCB idx: %d", i);
178+
179+
/*
180+
* Check that only the expected VMCB has KERNEL_GS_BASE
181+
* set to 0xaaaa, and update it to 0xbbbb.
182+
*/
183+
TEST_ASSERT_EQ(test_vmcb[i]->save.kernel_gs_base, 0xaaaa);
184+
TEST_ASSERT_EQ(test_vmcb[1-i]->save.kernel_gs_base, 0);
185+
test_vmcb[i]->save.kernel_gs_base = 0xbbbb;
186+
break;
187+
case UCALL_DONE:
188+
goto done;
189+
default:
190+
TEST_FAIL("Unknown ucall %lu", uc.cmd);
191+
}
192+
}
193+
194+
done:
195+
kvm_vm_free(vm);
196+
return 0;
197+
}

0 commit comments

Comments
 (0)