3333
3434/**
3535 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
36- * @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
36+ * @vmx: struct vcpu_vmx *
3737 * @regs: unsigned long * (to guest registers)
38- * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
38+ * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
39+ * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
3940 *
4041 * Returns:
4142 * 0 on VM-Exit, 1 on VM-Fail
@@ -54,6 +55,12 @@ SYM_FUNC_START(__vmx_vcpu_run)
5455#endif
5556 push %_ASM_BX
5657
58+ /* Save @vmx for SPEC_CTRL handling */
59+ push %_ASM_ARG1
60+
61+ /* Save @flags for SPEC_CTRL handling */
62+ push %_ASM_ARG3
63+
5764 /*
5865 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
5966 * @regs is needed after VM-Exit to save the guest's register values.
@@ -149,25 +156,23 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
149156 mov %r15 , VCPU_R15(%_ASM_AX)
150157#endif
151158
152- /* IMPORTANT: RSB must be stuffed before the first return. */
153- FILL_RETURN_BUFFER %_ASM_BX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
154-
155- /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
156- xor %eax , %eax
159+ /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
160+ xor %ebx , %ebx
157161
158162.Lclear_regs:
159163 /*
160- * Clear all general purpose registers except RSP and RAX to prevent
164+ * Clear all general purpose registers except RSP and RBX to prevent
161165 * speculative use of the guest's values, even those that are reloaded
162166 * via the stack. In theory, an L1 cache miss when restoring registers
163167 * could lead to speculative execution with the guest's values.
164168 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
165169 * free. RSP and RAX are exempt as RSP is restored by hardware during
166- * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
170+ * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
171+ * value.
167172 */
173+ xor %eax , %eax
168174 xor %ecx , %ecx
169175 xor %edx , %edx
170- xor %ebx , %ebx
171176 xor %ebp , %ebp
172177 xor %esi , %esi
173178 xor %edi , %edi
@@ -185,6 +190,28 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
185190 /* "POP" @regs. */
186191 add $WORD_SIZE, %_ASM_SP
187192
193+ /*
194+ * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
195+ * the first unbalanced RET after vmexit!
196+ *
197+ * For retpoline, RSB filling is needed to prevent poisoned RSB entries
198+ * and (in some cases) RSB underflow.
199+ *
200+ * eIBRS has its own protection against poisoned RSB, so it doesn't
201+ * need the RSB filling sequence. But it does need to be enabled
202+ * before the first unbalanced RET.
203+ */
204+
205+ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
206+
207+ pop %_ASM_ARG2 /* @flags */
208+ pop %_ASM_ARG1 /* @vmx */
209+
210+ call vmx_spec_ctrl_restore_host
211+
212+ /* Put return value in AX */
213+ mov %_ASM_BX, %_ASM_AX
214+
188215 pop %_ASM_BX
189216#ifdef CONFIG_X86_64
190217 pop %r12
@@ -204,7 +231,7 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
204231 ud2
205232.Lvmfail:
206233 /* VM-Fail: set return value to 1 */
207- mov $1 , %eax
234+ mov $1 , %_ASM_BX
208235 jmp .Lclear_regs
209236
210237SYM_FUNC_END(__vmx_vcpu_run)
0 commit comments