Skip to content

Commit 3f06b89

Browse files
gaochaointelsean-jc
authored andcommitted
KVM: x86: Deduplicate MSR interception enabling and disabling
Extract a common function from MSR interception disabling logic and create disabling and enabling functions based on it. This removes most of the duplicated code for MSR interception disabling/enabling. No functional change intended. Signed-off-by: Chao Gao <chao.gao@intel.com> Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20250612081947.94081-2-chao.gao@intel.com [sean: s/enable/set, inline the wrappers] Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 58c81bc commit 3f06b89

4 files changed

Lines changed: 29 additions & 57 deletions

File tree

arch/x86/kvm/svm/svm.c

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -679,21 +679,21 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
679679
return svm_test_msr_bitmap_write(msrpm, msr);
680680
}
681681

682-
void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
682+
void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
683683
{
684684
struct vcpu_svm *svm = to_svm(vcpu);
685685
void *msrpm = svm->msrpm;
686686

687687
/* Don't disable interception for MSRs userspace wants to handle. */
688688
if (type & MSR_TYPE_R) {
689-
if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
689+
if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
690690
svm_clear_msr_bitmap_read(msrpm, msr);
691691
else
692692
svm_set_msr_bitmap_read(msrpm, msr);
693693
}
694694

695695
if (type & MSR_TYPE_W) {
696-
if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
696+
if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
697697
svm_clear_msr_bitmap_write(msrpm, msr);
698698
else
699699
svm_set_msr_bitmap_write(msrpm, msr);
@@ -703,21 +703,6 @@ void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
703703
svm->nested.force_msr_bitmap_recalc = true;
704704
}
705705

706-
void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
707-
{
708-
struct vcpu_svm *svm = to_svm(vcpu);
709-
void *msrpm = svm->msrpm;
710-
711-
if (type & MSR_TYPE_R)
712-
svm_set_msr_bitmap_read(msrpm, msr);
713-
714-
if (type & MSR_TYPE_W)
715-
svm_set_msr_bitmap_write(msrpm, msr);
716-
717-
svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
718-
svm->nested.force_msr_bitmap_recalc = true;
719-
}
720-
721706
void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
722707
{
723708
unsigned int order = get_order(size);

arch/x86/kvm/svm/svm.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -694,16 +694,18 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
694694
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
695695
int trig_mode, int vec);
696696

697-
void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
698-
void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
697+
void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
699698

700-
static inline void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
701-
int type, bool enable_intercept)
699+
static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
700+
u32 msr, int type)
702701
{
703-
if (enable_intercept)
704-
svm_enable_intercept_for_msr(vcpu, msr, type);
705-
else
706-
svm_disable_intercept_for_msr(vcpu, msr, type);
702+
svm_set_intercept_for_msr(vcpu, msr, type, false);
703+
}
704+
705+
static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
706+
u32 msr, int type)
707+
{
708+
svm_set_intercept_for_msr(vcpu, msr, type, true);
707709
}
708710

709711
/* nested.c */

arch/x86/kvm/vmx/vmx.c

Lines changed: 3 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3963,7 +3963,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
39633963
vmx->nested.force_msr_bitmap_recalc = true;
39643964
}
39653965

3966-
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3966+
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
39673967
{
39683968
struct vcpu_vmx *vmx = to_vmx(vcpu);
39693969
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
@@ -3974,37 +3974,20 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
39743974
vmx_msr_bitmap_l01_changed(vmx);
39753975

39763976
if (type & MSR_TYPE_R) {
3977-
if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
3977+
if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
39783978
vmx_clear_msr_bitmap_read(msr_bitmap, msr);
39793979
else
39803980
vmx_set_msr_bitmap_read(msr_bitmap, msr);
39813981
}
39823982

39833983
if (type & MSR_TYPE_W) {
3984-
if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
3984+
if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
39853985
vmx_clear_msr_bitmap_write(msr_bitmap, msr);
39863986
else
39873987
vmx_set_msr_bitmap_write(msr_bitmap, msr);
39883988
}
39893989
}
39903990

3991-
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3992-
{
3993-
struct vcpu_vmx *vmx = to_vmx(vcpu);
3994-
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3995-
3996-
if (!cpu_has_vmx_msr_bitmap())
3997-
return;
3998-
3999-
vmx_msr_bitmap_l01_changed(vmx);
4000-
4001-
if (type & MSR_TYPE_R)
4002-
vmx_set_msr_bitmap_read(msr_bitmap, msr);
4003-
4004-
if (type & MSR_TYPE_W)
4005-
vmx_set_msr_bitmap_write(msr_bitmap, msr);
4006-
}
4007-
40083991
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
40093992
{
40103993
/*

arch/x86/kvm/vmx/vmx.h

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -386,23 +386,25 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
386386
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
387387
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
388388

389-
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
390-
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
389+
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
390+
391+
static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
392+
u32 msr, int type)
393+
{
394+
vmx_set_intercept_for_msr(vcpu, msr, type, false);
395+
}
396+
397+
static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
398+
u32 msr, int type)
399+
{
400+
vmx_set_intercept_for_msr(vcpu, msr, type, true);
401+
}
391402

392403
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
393404
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
394405

395406
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
396407

397-
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
398-
int type, bool value)
399-
{
400-
if (value)
401-
vmx_enable_intercept_for_msr(vcpu, msr, type);
402-
else
403-
vmx_disable_intercept_for_msr(vcpu, msr, type);
404-
}
405-
406408
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
407409

408410
u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);

0 commit comments

Comments
 (0)