Skip to content

Commit e61ab42

Browse files
committed
KVM: SVM: move guest vmsave/vmload back to assembly
It is error-prone that code after vmexit cannot access percpu data because GSBASE has not been restored yet. It forces MSR_IA32_SPEC_CTRL save/restore to happen very late, after the predictor untraining sequence, and it gets in the way of return stack depth tracking (a retbleed mitigation that is in linux-next as of 2022-11-09). As a first step towards fixing that, move the VMCB VMSAVE/VMLOAD to assembly, essentially undoing commit fb0c4a4 ("KVM: SVM: move VMLOAD/VMSAVE to C code", 2021-03-15). The reason for that commit was that it made it simpler to use a different VMCB for VMLOAD/VMSAVE versus VMRUN; but that is not a big hassle anymore thanks to the kvm-asm-offsets machinery and other related cleanups. The idea on how to number the exception tables is stolen from a prototype patch by Peter Zijlstra. Cc: stable@vger.kernel.org Fixes: a149180 ("x86: Add magic AMD return-thunk") Link: <https://lore.kernel.org/all/f571e404-e625-bae1-10e9-449b2eb4cbd8@citrix.com/> Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 73412df commit e61ab42

3 files changed

Lines changed: 39 additions & 20 deletions

File tree

arch/x86/kvm/kvm-asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ static void __used common(void)
1616
BLANK();
1717
OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
1818
OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
19+
OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
1920
OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
2021
}
2122

arch/x86/kvm/svm/svm.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3910,16 +3910,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
39103910
} else {
39113911
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
39123912

3913-
/*
3914-
* Use a single vmcb (vmcb01 because it's always valid) for
3915-
* context switching guest state via VMLOAD/VMSAVE, that way
3916-
* the state doesn't need to be copied between vmcb01 and
3917-
* vmcb02 when switching vmcbs for nested virtualization.
3918-
*/
3919-
vmload(svm->vmcb01.pa);
39203913
__svm_vcpu_run(svm);
3921-
vmsave(svm->vmcb01.pa);
3922-
39233914
vmload(__sme_page_pa(sd->save_area));
39243915
}
39253916

arch/x86/kvm/svm/vmenter.S

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
2929
#endif
3030

31+
#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
32+
3133
.section .noinstr.text, "ax"
3234

3335
/**
@@ -55,6 +57,16 @@ SYM_FUNC_START(__svm_vcpu_run)
5557
mov %_ASM_ARG1, %_ASM_DI
5658
.endif
5759

60+
/*
61+
* Use a single vmcb (vmcb01 because it's always valid) for
62+
* context switching guest state via VMLOAD/VMSAVE, that way
63+
* the state doesn't need to be copied between vmcb01 and
64+
* vmcb02 when switching vmcbs for nested virtualization.
65+
*/
66+
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
67+
1: vmload %_ASM_AX
68+
2:
69+
5870
/* Get svm->current_vmcb->pa into RAX. */
5971
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
6072
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
@@ -80,16 +92,11 @@ SYM_FUNC_START(__svm_vcpu_run)
8092
/* Enter guest mode */
8193
sti
8294

83-
1: vmrun %_ASM_AX
84-
85-
2: cli
86-
87-
#ifdef CONFIG_RETPOLINE
88-
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
89-
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
90-
#endif
95+
3: vmrun %_ASM_AX
96+
4:
97+
cli
9198

92-
/* "POP" @svm to RAX. */
99+
/* Pop @svm to RAX while it's the only available register. */
93100
pop %_ASM_AX
94101

95102
/* Save all guest registers. */
@@ -110,6 +117,18 @@ SYM_FUNC_START(__svm_vcpu_run)
110117
mov %r15, VCPU_R15(%_ASM_AX)
111118
#endif
112119

120+
/* @svm can stay in RDI from now on. */
121+
mov %_ASM_AX, %_ASM_DI
122+
123+
mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
124+
5: vmsave %_ASM_AX
125+
6:
126+
127+
#ifdef CONFIG_RETPOLINE
128+
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
129+
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
130+
#endif
131+
113132
/*
114133
* Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
115134
* untrained as soon as we exit the VM and are back to the
@@ -159,11 +178,19 @@ SYM_FUNC_START(__svm_vcpu_run)
159178
pop %_ASM_BP
160179
RET
161180

162-
3: cmpb $0, kvm_rebooting
181+
10: cmpb $0, kvm_rebooting
163182
jne 2b
164183
ud2
184+
30: cmpb $0, kvm_rebooting
185+
jne 4b
186+
ud2
187+
50: cmpb $0, kvm_rebooting
188+
jne 6b
189+
ud2
165190

166-
_ASM_EXTABLE(1b, 3b)
191+
_ASM_EXTABLE(1b, 10b)
192+
_ASM_EXTABLE(3b, 30b)
193+
_ASM_EXTABLE(5b, 50b)
167194

168195
SYM_FUNC_END(__svm_vcpu_run)
169196

0 commit comments

Comments
 (0)