33#include <asm/asm.h>
44#include <asm/asm-offsets.h>
55#include <asm/bitsperlong.h>
6+ #include <asm/frame.h>
67#include <asm/kvm_vcpu_regs.h>
78#include <asm/nospec-branch.h>
89#include "kvm-asm-offsets.h"
6768 "", X86_FEATURE_V_SPEC_CTRL
6869901:
6970.endm
70- .macro RESTORE_HOST_SPEC_CTRL_BODY
71+ .macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted: req
7172900:
7273 /* Same for after vmexit. */
7374 mov $MSR_IA32_SPEC_CTRL, %ecx
7677 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
7778 * if it was not intercepted during guest execution.
7879 */
79- cmpb $0 , (%_ASM_SP)
80+ cmpb $0 , \spec_ctrl_intercepted
8081 jnz 998f
8182 rdmsr
8283 movl %eax , SVM_spec_ctrl(%_ASM_DI)
99100 */
100101SYM_FUNC_START(__svm_vcpu_run)
101102 push %_ASM_BP
103+ mov %_ASM_SP, %_ASM_BP
102104#ifdef CONFIG_X86_64
103105 push %r15
104106 push %r14
@@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
268270 RET
269271
270272 RESTORE_GUEST_SPEC_CTRL_BODY
271- RESTORE_HOST_SPEC_CTRL_BODY
273+ RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
272274
27327510: cmpb $0 , _ASM_RIP(kvm_rebooting)
274276 jne 2b
@@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
290292
291293SYM_FUNC_END(__svm_vcpu_run)
292294
295+ #ifdef CONFIG_KVM_AMD_SEV
296+
297+
298+ #ifdef CONFIG_X86_64
299+ #define SEV_ES_GPRS_BASE 0x300
300+ #define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
301+ #define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
302+ #define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
303+ #define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
304+ #define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
305+ #define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
306+ #define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
307+ #define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
308+ #endif
309+
293310/**
294311 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
295312 * @svm: struct vcpu_svm *
296313 * @spec_ctrl_intercepted: bool
297314 */
298315SYM_FUNC_START(__svm_sev_es_vcpu_run)
299- push %_ASM_BP
300- #ifdef CONFIG_X86_64
301- push %r15
302- push %r14
303- push %r13
304- push %r12
305- #else
306- push %edi
307- push %esi
308- #endif
309- push %_ASM_BX
316+ FRAME_BEGIN
310317
311318 /*
312- * Save variables needed after vmexit on the stack, in inverse
313- * order compared to when they are needed.
319+ * Save non-volatile (callee-saved) registers to the host save area.
320+ * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
321+ * saved on VMRUN.
314322 */
323+ mov %rbp , SEV_ES_RBP (%rdx )
324+ mov %r15 , SEV_ES_R15 (%rdx )
325+ mov %r14 , SEV_ES_R14 (%rdx )
326+ mov %r13 , SEV_ES_R13 (%rdx )
327+ mov %r12 , SEV_ES_R12 (%rdx )
328+ mov %rbx , SEV_ES_RBX (%rdx )
315329
316- /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
317- push %_ASM_ARG2
318-
319- /* Save @svm. */
320- push %_ASM_ARG1
321-
322- .ifnc _ASM_ARG1, _ASM_DI
323330 /*
324- * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
325- * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL .
331+ * Save volatile registers that hold arguments that are needed after
332+ * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted) .
326333 */
327- mov %_ASM_ARG1, %_ASM_DI
328- .endif
334+ mov %rdi , SEV_ES_RDI ( %rdx )
335+ mov %rsi , SEV_ES_RSI ( %rdx )
329336
330- /* Clobbers RAX, RCX, RDX. */
337+ /* Clobbers RAX, RCX, RDX (@hostsa). */
331338 RESTORE_GUEST_SPEC_CTRL
332339
333340 /* Get svm->current_vmcb->pa into RAX. */
334- mov SVM_current_vmcb(%_ASM_DI ), %_ASM_AX
335- mov KVM_VMCB_pa(%_ASM_AX ), %_ASM_AX
341+ mov SVM_current_vmcb(%rdi ), %rax
342+ mov KVM_VMCB_pa(%rax ), %rax
336343
337344 /* Enter guest mode */
338345 sti
339346
340- 1: vmrun %_ASM_AX
347+ 1: vmrun %rax
341348
3423492: cli
343350
344- /* Pop @svm to RDI, guest registers have been saved already. */
345- pop %_ASM_DI
346-
347351#ifdef CONFIG_MITIGATION_RETPOLINE
348352 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
349- FILL_RETURN_BUFFER %_ASM_AX , RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
353+ FILL_RETURN_BUFFER %rax , RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
350354#endif
351355
352- /* Clobbers RAX, RCX, RDX. */
356+ /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
353357 RESTORE_HOST_SPEC_CTRL
354358
355359 /*
@@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
361365 */
362366 UNTRAIN_RET_VM
363367
364- /* "Pop" @spec_ctrl_intercepted. */
365- pop %_ASM_BX
366-
367- pop %_ASM_BX
368-
369- #ifdef CONFIG_X86_64
370- pop %r12
371- pop %r13
372- pop %r14
373- pop %r15
374- #else
375- pop %esi
376- pop %edi
377- #endif
378- pop %_ASM_BP
368+ FRAME_END
379369 RET
380370
381371 RESTORE_GUEST_SPEC_CTRL_BODY
382- RESTORE_HOST_SPEC_CTRL_BODY
372+ RESTORE_HOST_SPEC_CTRL_BODY %sil
383373
384- 3: cmpb $0 , _ASM_RIP( kvm_rebooting)
374+ 3: cmpb $0 , kvm_rebooting( %rip )
385375 jne 2b
386376 ud2
387377
388378 _ASM_EXTABLE(1b, 3b)
389379
390380SYM_FUNC_END(__svm_sev_es_vcpu_run)
381+ #endif /* CONFIG_KVM_AMD_SEV */
0 commit comments