Skip to content

Commit 2093d8c

Browse files
author
Peter Zijlstra
committed
perf/x86/intel: Optimize PEBS extended config
Similar to enable_acr_event, avoid the branch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
1 parent 02da693 commit 2093d8c

1 file changed

Lines changed: 14 additions & 11 deletions

File tree

arch/x86/events/intel/core.c

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2582,9 +2582,6 @@ static inline void __intel_pmu_update_event_ext(int idx, u64 ext)
25822582

25832583
static void intel_pmu_disable_event_ext(struct perf_event *event)
25842584
{
2585-
if (!x86_pmu.arch_pebs)
2586-
return;
2587-
25882585
/*
25892586
* Only clear CFG_C MSR for PEBS counter group events,
25902587
* it avoids the HW counter's value to be added into
@@ -2602,6 +2599,8 @@ static void intel_pmu_disable_event_ext(struct perf_event *event)
26022599
__intel_pmu_update_event_ext(event->hw.idx, 0);
26032600
}
26042601

2602+
DEFINE_STATIC_CALL_NULL(intel_pmu_disable_event_ext, intel_pmu_disable_event_ext);
2603+
26052604
static void intel_pmu_disable_event(struct perf_event *event)
26062605
{
26072606
struct hw_perf_event *hwc = &event->hw;
@@ -2610,11 +2609,11 @@ static void intel_pmu_disable_event(struct perf_event *event)
26102609
switch (idx) {
26112610
case 0 ... INTEL_PMC_IDX_FIXED - 1:
26122611
intel_clear_masks(event, idx);
2613-
intel_pmu_disable_event_ext(event);
2612+
static_call_cond(intel_pmu_disable_event_ext)(event);
26142613
x86_pmu_disable_event(event);
26152614
break;
26162615
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2617-
intel_pmu_disable_event_ext(event);
2616+
static_call_cond(intel_pmu_disable_event_ext)(event);
26182617
fallthrough;
26192618
case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
26202619
intel_pmu_disable_fixed(event);
@@ -2990,9 +2989,6 @@ static void intel_pmu_enable_event_ext(struct perf_event *event)
29902989
struct arch_pebs_cap cap;
29912990
u64 ext = 0;
29922991

2993-
if (!x86_pmu.arch_pebs)
2994-
return;
2995-
29962992
cap = hybrid(cpuc->pmu, arch_pebs_cap);
29972993

29982994
if (event->attr.precise_ip) {
@@ -3056,6 +3052,8 @@ static void intel_pmu_enable_event_ext(struct perf_event *event)
30563052
__intel_pmu_update_event_ext(hwc->idx, ext);
30573053
}
30583054

3055+
DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext);
3056+
30593057
static void intel_pmu_enable_event(struct perf_event *event)
30603058
{
30613059
u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
@@ -3071,12 +3069,12 @@ static void intel_pmu_enable_event(struct perf_event *event)
30713069
enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
30723070
intel_set_masks(event, idx);
30733071
static_call_cond(intel_pmu_enable_acr_event)(event);
3074-
intel_pmu_enable_event_ext(event);
3072+
static_call_cond(intel_pmu_enable_event_ext)(event);
30753073
__x86_pmu_enable_event(hwc, enable_mask);
30763074
break;
30773075
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
30783076
static_call_cond(intel_pmu_enable_acr_event)(event);
3079-
intel_pmu_enable_event_ext(event);
3077+
static_call_cond(intel_pmu_enable_event_ext)(event);
30803078
fallthrough;
30813079
case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
30823080
intel_pmu_enable_fixed(event);
@@ -8106,8 +8104,13 @@ __init int intel_pmu_init(void)
81068104
if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
81078105
update_pmu_cap(NULL);
81088106

8109-
if (x86_pmu.arch_pebs)
8107+
if (x86_pmu.arch_pebs) {
8108+
static_call_update(intel_pmu_disable_event_ext,
8109+
intel_pmu_disable_event_ext);
8110+
static_call_update(intel_pmu_enable_event_ext,
8111+
intel_pmu_enable_event_ext);
81108112
pr_cont("Architectural PEBS, ");
8113+
}
81118114

81128115
intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64,
81138116
&x86_pmu.fixed_cntr_mask64,

0 commit comments

Comments
 (0)