Skip to content

Commit 2239d13

Browse files
committed
KVM: VMX: Compartmentalize adding MSRs to host vs. guest auto-load list
Undo the bundling of the "host" and "guest" MSR auto-load list logic so that the code can be deduplicated by factoring out the logic to a separate helper. Now that "list full" situations are treated as fatal to the VM, there is no need to pre-check both lists. For all intents and purposes, this reverts the add_atomic_switch_msr() changes made by commit 3190709 ("x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host number accounting"). Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Tested-by: Manali Shukla <manali.shukla@amd.com> Link: https://patch.msgid.link/20251206001720.468579-42-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 0c4ff08 commit 2239d13

1 file changed

Lines changed: 12 additions & 11 deletions

File tree

arch/x86/kvm/vmx/vmx.c

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1096,9 +1096,9 @@ static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
10961096
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
10971097
u64 guest_val, u64 host_val)
10981098
{
1099-
int i, j = 0;
11001099
struct msr_autoload *m = &vmx->msr_autoload;
11011100
struct kvm *kvm = vmx->vcpu.kvm;
1101+
int i;
11021102

11031103
switch (msr) {
11041104
case MSR_EFER:
@@ -1133,25 +1133,26 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
11331133
}
11341134

11351135
i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1136-
j = vmx_find_loadstore_msr_slot(&m->host, msr);
1137-
1138-
if (KVM_BUG_ON(i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm) ||
1139-
KVM_BUG_ON(j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
1140-
return;
1141-
11421136
if (i < 0) {
1137+
if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm))
1138+
return;
1139+
11431140
i = m->guest.nr++;
11441141
m->guest.val[i].index = msr;
11451142
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
11461143
}
11471144
m->guest.val[i].value = guest_val;
11481145

1149-
if (j < 0) {
1150-
j = m->host.nr++;
1151-
m->host.val[j].index = msr;
1146+
i = vmx_find_loadstore_msr_slot(&m->host, msr);
1147+
if (i < 0) {
1148+
if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
1149+
return;
1150+
1151+
i = m->host.nr++;
1152+
m->host.val[i].index = msr;
11521153
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
11531154
}
1154-
m->host.val[j].value = host_val;
1155+
m->host.val[i].value = host_val;
11551156
}
11561157

11571158
static bool update_transition_efer(struct vcpu_vmx *vmx)

0 commit comments

Comments
 (0)