Skip to content

Commit 8a48214

Browse files
yosrym93bonzini
authored andcommitted
KVM: nSVM: Fix and simplify LBR virtualization handling with nested
The current scheme for handling LBRV when nested is used is very complicated, especially when L1 does not enable LBRV (i.e. does not set LBR_CTL_ENABLE_MASK). To avoid copying LBRs between VMCB01 and VMCB02 on every nested transition, the current implementation switches between using VMCB01 or VMCB02 as the source of truth for the LBRs while L2 is running. If L2 enables LBR, VMCB02 is used as the source of truth. When L2 disables LBR, the LBRs are copied to VMCB01 and VMCB01 is used as the source of truth. This introduces significant complexity, and incorrect behavior in some cases. For example, on a nested #VMEXIT, the LBRs are only copied from VMCB02 to VMCB01 if LBRV is enabled in VMCB01. This is because L2's writes to MSR_IA32_DEBUGCTLMSR to enable LBR are intercepted and propagated to VMCB01 instead of VMCB02. However, LBRV is only enabled in VMCB02 when L2 is running. This means that if L2 enables LBR and exits to L1, the LBRs will not be propagated from VMCB02 to VMCB01, because LBRV is disabled in VMCB01. There is no meaningful difference in CPUID rate in L2 when copying LBRs on every nested transition vs. the current approach, so do the simple and correct thing and always copy LBRs between VMCB01 and VMCB02 on nested transitions (when LBRV is disabled by L1). Drop the conditional LBRs copying in __svm_{enable/disable}_lbrv() as it is now unnecessary. VMCB02 becomes the only source of truth for LBRs when L2 is running, regardless of LBRV being enabled by L1, drop svm_get_lbr_vmcb() and use svm->vmcb directly in its place. Fixes: 1d5a1b5 ("KVM: x86: nSVM: correctly virtualize LBR msrs when L2 is running") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251108004524.1600006-4-yosry.ahmed@linux.dev Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent fbe5e5f commit 8a48214

2 files changed

Lines changed: 17 additions & 49 deletions

File tree

arch/x86/kvm/svm/nested.c

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -677,11 +677,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
677677
*/
678678
svm_copy_lbrs(vmcb02, vmcb12);
679679
vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
680-
svm_update_lbrv(&svm->vcpu);
681-
682-
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
680+
} else {
683681
svm_copy_lbrs(vmcb02, vmcb01);
684682
}
683+
svm_update_lbrv(&svm->vcpu);
685684
}
686685

687686
static inline bool is_evtinj_soft(u32 evtinj)
@@ -833,11 +832,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
833832
svm->soft_int_next_rip = vmcb12_rip;
834833
}
835834

836-
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
837-
LBR_CTL_ENABLE_MASK;
838-
if (guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV))
839-
vmcb02->control.virt_ext |=
840-
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
835+
/* LBR_CTL_ENABLE_MASK is controlled by svm_update_lbrv() */
841836

842837
if (!nested_vmcb_needs_vls_intercept(svm))
843838
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
@@ -1189,13 +1184,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
11891184
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
11901185

11911186
if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
1192-
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1187+
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)))
11931188
svm_copy_lbrs(vmcb12, vmcb02);
1194-
svm_update_lbrv(vcpu);
1195-
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1189+
else
11961190
svm_copy_lbrs(vmcb01, vmcb02);
1197-
svm_update_lbrv(vcpu);
1198-
}
1191+
1192+
svm_update_lbrv(vcpu);
11991193

12001194
if (vnmi) {
12011195
if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)

arch/x86/kvm/svm/svm.c

Lines changed: 10 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -808,13 +808,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
808808

809809
static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
810810
{
811-
struct vcpu_svm *svm = to_svm(vcpu);
812-
813-
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
814-
815-
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
816-
if (is_guest_mode(vcpu))
817-
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
811+
to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
818812
}
819813

820814
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
@@ -825,35 +819,15 @@ void svm_enable_lbrv(struct kvm_vcpu *vcpu)
825819

826820
static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
827821
{
828-
struct vcpu_svm *svm = to_svm(vcpu);
829-
830822
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
831-
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
832-
833-
/*
834-
* Move the LBR msrs back to the vmcb01 to avoid copying them
835-
* on nested guest entries.
836-
*/
837-
if (is_guest_mode(vcpu))
838-
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
839-
}
840-
841-
static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
842-
{
843-
/*
844-
* If LBR virtualization is disabled, the LBR MSRs are always kept in
845-
* vmcb01. If LBR virtualization is enabled and L1 is running VMs of
846-
* its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
847-
*/
848-
return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
849-
svm->vmcb01.ptr;
823+
to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
850824
}
851825

852826
void svm_update_lbrv(struct kvm_vcpu *vcpu)
853827
{
854828
struct vcpu_svm *svm = to_svm(vcpu);
855829
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
856-
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
830+
bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
857831
(is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
858832
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
859833

@@ -2733,19 +2707,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
27332707
msr_info->data = svm->tsc_aux;
27342708
break;
27352709
case MSR_IA32_DEBUGCTLMSR:
2736-
msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
2710+
msr_info->data = svm->vmcb->save.dbgctl;
27372711
break;
27382712
case MSR_IA32_LASTBRANCHFROMIP:
2739-
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
2713+
msr_info->data = svm->vmcb->save.br_from;
27402714
break;
27412715
case MSR_IA32_LASTBRANCHTOIP:
2742-
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
2716+
msr_info->data = svm->vmcb->save.br_to;
27432717
break;
27442718
case MSR_IA32_LASTINTFROMIP:
2745-
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
2719+
msr_info->data = svm->vmcb->save.last_excp_from;
27462720
break;
27472721
case MSR_IA32_LASTINTTOIP:
2748-
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
2722+
msr_info->data = svm->vmcb->save.last_excp_to;
27492723
break;
27502724
case MSR_VM_HSAVE_PA:
27512725
msr_info->data = svm->nested.hsave_msr;
@@ -3013,10 +2987,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
30132987
if (data & DEBUGCTL_RESERVED_BITS)
30142988
return 1;
30152989

3016-
if (svm_get_lbr_vmcb(svm)->save.dbgctl == data)
2990+
if (svm->vmcb->save.dbgctl == data)
30172991
break;
30182992

3019-
svm_get_lbr_vmcb(svm)->save.dbgctl = data;
2993+
svm->vmcb->save.dbgctl = data;
30202994
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
30212995
svm_update_lbrv(vcpu);
30222996
break;

0 commit comments

Comments
 (0)