Skip to content

Commit 8bb8b60

Browse files
committed
KVM: x86: Push acquisition of SRCU in fastpath into kvm_pmu_trigger_event()
Acquire SRCU in the VM-Exit fastpath if and only if KVM needs to check the PMU event filter, to further trim the amount of code that is executed with SRCU protection in the fastpath. Counter-intuitively, holding SRCU can do more harm than good due to masking potential bugs, and introducing a new SRCU-protected asset to code reachable via kvm_skip_emulated_instruction() would be quite notable, i.e. definitely worth auditing. E.g. the primary user of kvm->srcu is KVM's memslots, accessing memslots all but guarantees guest memory may be accessed, accessing guest memory can fault, and page faults might sleep, which isn't allowed while IRQs are disabled. Not acquiring SRCU means the (hypothetical) illegal sleep would be flagged when running with PROVE_RCU=y, even if DEBUG_ATOMIC_SLEEP=n. Note, performance is NOT a motivating factor, as SRCU lock/unlock only adds ~15 cycles of latency to fastpath VM-Exits. I.e. overhead isn't a concern _if_ SRCU protection needs to be extended beyond PMU events, e.g. to honor userspace MSR filters. Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20250805190526.1453366-18-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 3eced8b commit 8bb8b60

2 files changed

Lines changed: 8 additions & 14 deletions

File tree

arch/x86/kvm/pmu.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -960,7 +960,7 @@ static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu,
960960
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
961961
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
962962
struct kvm_pmc *pmc;
963-
int i;
963+
int i, idx;
964964

965965
BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
966966

@@ -973,12 +973,14 @@ static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu,
973973
(unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
974974
return;
975975

976+
idx = srcu_read_lock(&vcpu->kvm->srcu);
976977
kvm_for_each_pmc(pmu, pmc, i, bitmap) {
977978
if (!pmc_is_event_allowed(pmc) || !cpl_is_matched(pmc))
978979
continue;
979980

980981
kvm_pmu_incr_counter(pmc);
981982
}
983+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
982984
}
983985

984986
void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)

arch/x86/kvm/x86.c

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2138,7 +2138,6 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
21382138
{
21392139
u64 data = kvm_read_edx_eax(vcpu);
21402140
u32 msr = kvm_rcx_read(vcpu);
2141-
int r;
21422141

21432142
switch (msr) {
21442143
case APIC_BASE_MSR + (APIC_ICR >> 4):
@@ -2153,13 +2152,12 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
21532152
return EXIT_FASTPATH_NONE;
21542153
}
21552154

2156-
kvm_vcpu_srcu_read_lock(vcpu);
2157-
r = kvm_skip_emulated_instruction(vcpu);
2158-
kvm_vcpu_srcu_read_unlock(vcpu);
2159-
21602155
trace_kvm_msr_write(msr, data);
21612156

2162-
return r ? EXIT_FASTPATH_REENTER_GUEST : EXIT_FASTPATH_EXIT_USERSPACE;
2157+
if (!kvm_skip_emulated_instruction(vcpu))
2158+
return EXIT_FASTPATH_EXIT_USERSPACE;
2159+
2160+
return EXIT_FASTPATH_REENTER_GUEST;
21632161
}
21642162
EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
21652163

@@ -11254,13 +11252,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1125411252

1125511253
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
1125611254
{
11257-
int ret;
11258-
11259-
kvm_vcpu_srcu_read_lock(vcpu);
11260-
ret = kvm_emulate_halt(vcpu);
11261-
kvm_vcpu_srcu_read_unlock(vcpu);
11262-
11263-
if (!ret)
11255+
if (!kvm_emulate_halt(vcpu))
1126411256
return EXIT_FASTPATH_EXIT_USERSPACE;
1126511257

1126611258
if (kvm_vcpu_running(vcpu))

0 commit comments

Comments
 (0)