Skip to content

Commit e287bd0

Browse files
committed
KVM: SVM: restore host save area from assembly
Allow access to the percpu area via the GS segment base, which is needed in order to access the saved host spec_ctrl value. In linux-next FILL_RETURN_BUFFER also needs to access percpu data. For simplicity, the physical address of the save area is added to struct svm_cpu_data. Cc: stable@vger.kernel.org Fixes: a149180 ("x86: Add magic AMD return-thunk") Reported-by: Nathan Chancellor <nathan@kernel.org> Analyzed-by: Andrew Cooper <andrew.cooper3@citrix.com> Tested-by: Nathan Chancellor <nathan@kernel.org> Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent e61ab42 commit e287bd0

5 files changed

Lines changed: 26 additions & 13 deletions

File tree

arch/x86/kvm/kvm-asm-offsets.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ static void __used common(void)
1818
OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
1919
OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
2020
OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
21+
OFFSET(SD_save_area_pa, svm_cpu_data, save_area_pa);
2122
}
2223

2324
if (IS_ENABLED(CONFIG_KVM_INTEL)) {

arch/x86/kvm/svm/svm.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,7 @@ static int svm_hardware_enable(void)
592592

593593
wrmsrl(MSR_EFER, efer | EFER_SVME);
594594

595-
wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
595+
wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
596596

597597
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
598598
/*
@@ -648,6 +648,7 @@ static void svm_cpu_uninit(int cpu)
648648

649649
kfree(sd->sev_vmcbs);
650650
__free_page(sd->save_area);
651+
sd->save_area_pa = 0;
651652
sd->save_area = NULL;
652653
}
653654

@@ -665,6 +666,7 @@ static int svm_cpu_init(int cpu)
665666
if (ret)
666667
goto free_save_area;
667668

669+
sd->save_area_pa = __sme_page_pa(sd->save_area);
668670
return 0;
669671

670672
free_save_area:
@@ -1450,7 +1452,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
14501452
* Save additional host state that will be restored on VMEXIT (sev-es)
14511453
* or subsequent vmload of host save area.
14521454
*/
1453-
vmsave(__sme_page_pa(sd->save_area));
1455+
vmsave(sd->save_area_pa);
14541456
if (sev_es_guest(vcpu->kvm)) {
14551457
struct sev_es_save_area *hostsa;
14561458
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
@@ -3905,14 +3907,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
39053907

39063908
guest_state_enter_irqoff();
39073909

3908-
if (sev_es_guest(vcpu->kvm)) {
3910+
if (sev_es_guest(vcpu->kvm))
39093911
__svm_sev_es_vcpu_run(svm);
3910-
} else {
3911-
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3912-
3912+
else
39133913
__svm_vcpu_run(svm);
3914-
vmload(__sme_page_pa(sd->save_area));
3915-
}
39163914

39173915
guest_state_exit_irqoff();
39183916
}

arch/x86/kvm/svm/svm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,8 @@ struct svm_cpu_data {
287287
struct kvm_ldttss_desc *tss_desc;
288288

289289
struct page *save_area;
290+
unsigned long save_area_pa;
291+
290292
struct vmcb *current_vmcb;
291293

292294
/* index = sev_asid, value = vmcb pointer */

arch/x86/kvm/svm/svm_ops.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa)
6161
svm_asm1(vmsave, "a" (pa), "memory");
6262
}
6363

64-
static __always_inline void vmload(unsigned long pa)
65-
{
66-
svm_asm1(vmload, "a" (pa), "memory");
67-
}
68-
6964
#endif /* __KVM_X86_SVM_OPS_H */

arch/x86/kvm/svm/vmenter.S

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,14 @@ SYM_FUNC_START(__svm_vcpu_run)
4949
#endif
5050
push %_ASM_BX
5151

52+
/*
53+
* Save variables needed after vmexit on the stack, in inverse
54+
* order compared to when they are needed.
55+
*/
56+
57+
/* Needed to restore access to percpu variables. */
58+
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
59+
5260
/* Save @svm. */
5361
push %_ASM_ARG1
5462

@@ -124,6 +132,11 @@ SYM_FUNC_START(__svm_vcpu_run)
124132
5: vmsave %_ASM_AX
125133
6:
126134

135+
/* Restores GSBASE among other things, allowing access to percpu data. */
136+
pop %_ASM_AX
137+
7: vmload %_ASM_AX
138+
8:
139+
127140
#ifdef CONFIG_RETPOLINE
128141
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
129142
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
@@ -187,10 +200,14 @@ SYM_FUNC_START(__svm_vcpu_run)
187200
50: cmpb $0, kvm_rebooting
188201
jne 6b
189202
ud2
203+
70: cmpb $0, kvm_rebooting
204+
jne 8b
205+
ud2
190206

191207
_ASM_EXTABLE(1b, 10b)
192208
_ASM_EXTABLE(3b, 30b)
193209
_ASM_EXTABLE(5b, 50b)
210+
_ASM_EXTABLE(7b, 70b)
194211

195212
SYM_FUNC_END(__svm_vcpu_run)
196213

0 commit comments

Comments
 (0)