Skip to content

Commit 43f5bea

Browse files
committed
KVM: x86/pmu: Add wrappers for counting emulated instructions/branches
Add wrappers for triggering instruction retired and branch retired PMU events in anticipation of reworking the internal mechanisms to track which PMCs need to be evaluated, e.g. to avoid having to walk and check every PMC. Opportunistically bury "struct kvm_pmu_emulated_event_selectors" in pmu.c. No functional change intended. Link: https://lore.kernel.org/r/20250805190526.1453366-11-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent a3e80bf commit 43f5bea

4 files changed

Lines changed: 24 additions & 15 deletions

File tree

arch/x86/kvm/pmu.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,11 @@
2929
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
3030
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
3131

32-
struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
33-
EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
32+
struct kvm_pmu_emulated_event_selectors {
33+
u64 INSTRUCTIONS_RETIRED;
34+
u64 BRANCH_INSTRUCTIONS_RETIRED;
35+
};
36+
static struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
3437

3538
/* Precise Distribution of Instructions Retired (PDIR) */
3639
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
@@ -912,7 +915,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
912915
select_user;
913916
}
914917

915-
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
918+
static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
916919
{
917920
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
918921
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -949,7 +952,18 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
949952
kvm_pmu_incr_counter(pmc);
950953
}
951954
}
952-
EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
955+
956+
void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)
957+
{
958+
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
959+
}
960+
EXPORT_SYMBOL_GPL(kvm_pmu_instruction_retired);
961+
962+
void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
963+
{
964+
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
965+
}
966+
EXPORT_SYMBOL_GPL(kvm_pmu_branch_retired);
953967

954968
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
955969
{

arch/x86/kvm/pmu.h

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,6 @@
2323

2424
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
2525

26-
struct kvm_pmu_emulated_event_selectors {
27-
u64 INSTRUCTIONS_RETIRED;
28-
u64 BRANCH_INSTRUCTIONS_RETIRED;
29-
};
30-
3126
struct kvm_pmu_ops {
3227
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
3328
unsigned int idx, u64 *mask);
@@ -178,7 +173,6 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
178173
}
179174

180175
extern struct x86_pmu_capability kvm_pmu_cap;
181-
extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
182176

183177
void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
184178

@@ -227,7 +221,8 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
227221
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
228222
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
229223
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
230-
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
224+
void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu);
225+
void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
231226

232227
bool is_vmware_backdoor_pmc(u32 pmc_idx);
233228

arch/x86/kvm/vmx/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3690,7 +3690,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
36903690
return 1;
36913691
}
36923692

3693-
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
3693+
kvm_pmu_branch_retired(vcpu);
36943694

36953695
if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
36963696
return nested_vmx_failInvalid(vcpu);

arch/x86/kvm/x86.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8820,7 +8820,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
88208820
if (unlikely(!r))
88218821
return 0;
88228822

8823-
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
8823+
kvm_pmu_instruction_retired(vcpu);
88248824

88258825
/*
88268826
* rflags is the old, "raw" value of the flags. The new value has
@@ -9161,9 +9161,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
91619161
*/
91629162
if (!ctxt->have_exception ||
91639163
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
9164-
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
9164+
kvm_pmu_instruction_retired(vcpu);
91659165
if (ctxt->is_branch)
9166-
kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
9166+
kvm_pmu_branch_retired(vcpu);
91679167
kvm_rip_write(vcpu, ctxt->eip);
91689168
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
91699169
r = kvm_vcpu_do_singlestep(vcpu);

0 commit comments

Comments
 (0)