Skip to content

Commit df29ddf

Browse files
Marc Zyngierwilldeacon
authored andcommitted
arm64: perf: Abstract system register accesses away
As we want to enable 32bit support, we need to distanciate the PMUv3 driver from the AArch64 system register names. This patch moves all system register accesses to an architecture specific include file, allowing the 32bit counterpart to be slotted in at a later time. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Co-developed-by: Zaid Al-Bassam <zalbassam@google.com> Signed-off-by: Zaid Al-Bassam <zalbassam@google.com> Tested-by: Florian Fainelli <f.fainelli@gmail.com> Link: https://lore.kernel.org/r/20230317195027.3746949-3-zalbassam@google.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 7755cec commit df29ddf

3 files changed

Lines changed: 205 additions & 92 deletions

File tree

arch/arm64/include/asm/arm_pmuv3.h

Lines changed: 137 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,137 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2012 ARM Ltd.
4+
*/
5+
6+
#ifndef __ASM_PMUV3_H
7+
#define __ASM_PMUV3_H
8+
9+
#include <asm/cpufeature.h>
10+
#include <asm/sysreg.h>
11+
12+
#define RETURN_READ_PMEVCNTRN(n) \
13+
return read_sysreg(pmevcntr##n##_el0)
14+
static unsigned long read_pmevcntrn(int n)
15+
{
16+
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
17+
return 0;
18+
}
19+
20+
#define WRITE_PMEVCNTRN(n) \
21+
write_sysreg(val, pmevcntr##n##_el0)
22+
static void write_pmevcntrn(int n, unsigned long val)
23+
{
24+
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
25+
}
26+
27+
#define WRITE_PMEVTYPERN(n) \
28+
write_sysreg(val, pmevtyper##n##_el0)
29+
static void write_pmevtypern(int n, unsigned long val)
30+
{
31+
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
32+
}
33+
34+
static inline unsigned long read_pmmir(void)
35+
{
36+
return read_cpuid(PMMIR_EL1);
37+
}
38+
39+
static inline u32 read_pmuver(void)
40+
{
41+
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
42+
43+
return cpuid_feature_extract_unsigned_field(dfr0,
44+
ID_AA64DFR0_EL1_PMUVer_SHIFT);
45+
}
46+
47+
static inline void write_pmcr(u32 val)
48+
{
49+
write_sysreg(val, pmcr_el0);
50+
}
51+
52+
static inline u32 read_pmcr(void)
53+
{
54+
return read_sysreg(pmcr_el0);
55+
}
56+
57+
static inline void write_pmselr(u32 val)
58+
{
59+
write_sysreg(val, pmselr_el0);
60+
}
61+
62+
static inline void write_pmccntr(u64 val)
63+
{
64+
write_sysreg(val, pmccntr_el0);
65+
}
66+
67+
static inline u64 read_pmccntr(void)
68+
{
69+
return read_sysreg(pmccntr_el0);
70+
}
71+
72+
static inline void write_pmxevcntr(u32 val)
73+
{
74+
write_sysreg(val, pmxevcntr_el0);
75+
}
76+
77+
static inline u32 read_pmxevcntr(void)
78+
{
79+
return read_sysreg(pmxevcntr_el0);
80+
}
81+
82+
static inline void write_pmxevtyper(u32 val)
83+
{
84+
write_sysreg(val, pmxevtyper_el0);
85+
}
86+
87+
static inline void write_pmcntenset(u32 val)
88+
{
89+
write_sysreg(val, pmcntenset_el0);
90+
}
91+
92+
static inline void write_pmcntenclr(u32 val)
93+
{
94+
write_sysreg(val, pmcntenclr_el0);
95+
}
96+
97+
static inline void write_pmintenset(u32 val)
98+
{
99+
write_sysreg(val, pmintenset_el1);
100+
}
101+
102+
static inline void write_pmintenclr(u32 val)
103+
{
104+
write_sysreg(val, pmintenclr_el1);
105+
}
106+
107+
static inline void write_pmccfiltr(u32 val)
108+
{
109+
write_sysreg(val, pmccfiltr_el0);
110+
}
111+
112+
static inline void write_pmovsclr(u32 val)
113+
{
114+
write_sysreg(val, pmovsclr_el0);
115+
}
116+
117+
static inline u32 read_pmovsclr(void)
118+
{
119+
return read_sysreg(pmovsclr_el0);
120+
}
121+
122+
static inline void write_pmuserenr(u32 val)
123+
{
124+
write_sysreg(val, pmuserenr_el0);
125+
}
126+
127+
static inline u32 read_pmceid0(void)
128+
{
129+
return read_sysreg(pmceid0_el0);
130+
}
131+
132+
static inline u32 read_pmceid1(void)
133+
{
134+
return read_sysreg(pmceid1_el0);
135+
}
136+
137+
#endif

drivers/perf/arm_pmuv3.c

Lines changed: 23 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010

1111
#include <asm/irq_regs.h>
1212
#include <asm/perf_event.h>
13-
#include <asm/sysreg.h>
1413
#include <asm/virt.h>
1514

1615
#include <clocksource/arm_arch_timer.h>
@@ -25,6 +24,8 @@
2524
#include <linux/sched_clock.h>
2625
#include <linux/smp.h>
2726

27+
#include <asm/arm_pmuv3.h>
28+
2829
/* ARMv8 Cortex-A53 specific event types. */
2930
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
3031

@@ -425,83 +426,16 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
425426
#define ARMV8_IDX_TO_COUNTER(x) \
426427
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
427428

428-
/*
429-
* This code is really good
430-
*/
431-
432-
#define PMEVN_CASE(n, case_macro) \
433-
case n: case_macro(n); break
434-
435-
#define PMEVN_SWITCH(x, case_macro) \
436-
do { \
437-
switch (x) { \
438-
PMEVN_CASE(0, case_macro); \
439-
PMEVN_CASE(1, case_macro); \
440-
PMEVN_CASE(2, case_macro); \
441-
PMEVN_CASE(3, case_macro); \
442-
PMEVN_CASE(4, case_macro); \
443-
PMEVN_CASE(5, case_macro); \
444-
PMEVN_CASE(6, case_macro); \
445-
PMEVN_CASE(7, case_macro); \
446-
PMEVN_CASE(8, case_macro); \
447-
PMEVN_CASE(9, case_macro); \
448-
PMEVN_CASE(10, case_macro); \
449-
PMEVN_CASE(11, case_macro); \
450-
PMEVN_CASE(12, case_macro); \
451-
PMEVN_CASE(13, case_macro); \
452-
PMEVN_CASE(14, case_macro); \
453-
PMEVN_CASE(15, case_macro); \
454-
PMEVN_CASE(16, case_macro); \
455-
PMEVN_CASE(17, case_macro); \
456-
PMEVN_CASE(18, case_macro); \
457-
PMEVN_CASE(19, case_macro); \
458-
PMEVN_CASE(20, case_macro); \
459-
PMEVN_CASE(21, case_macro); \
460-
PMEVN_CASE(22, case_macro); \
461-
PMEVN_CASE(23, case_macro); \
462-
PMEVN_CASE(24, case_macro); \
463-
PMEVN_CASE(25, case_macro); \
464-
PMEVN_CASE(26, case_macro); \
465-
PMEVN_CASE(27, case_macro); \
466-
PMEVN_CASE(28, case_macro); \
467-
PMEVN_CASE(29, case_macro); \
468-
PMEVN_CASE(30, case_macro); \
469-
default: WARN(1, "Invalid PMEV* index\n"); \
470-
} \
471-
} while (0)
472-
473-
#define RETURN_READ_PMEVCNTRN(n) \
474-
return read_sysreg(pmevcntr##n##_el0)
475-
static unsigned long read_pmevcntrn(int n)
476-
{
477-
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
478-
return 0;
479-
}
480-
481-
#define WRITE_PMEVCNTRN(n) \
482-
write_sysreg(val, pmevcntr##n##_el0)
483-
static void write_pmevcntrn(int n, unsigned long val)
484-
{
485-
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
486-
}
487-
488-
#define WRITE_PMEVTYPERN(n) \
489-
write_sysreg(val, pmevtyper##n##_el0)
490-
static void write_pmevtypern(int n, unsigned long val)
491-
{
492-
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
493-
}
494-
495429
static inline u32 armv8pmu_pmcr_read(void)
496430
{
497-
return read_sysreg(pmcr_el0);
431+
return read_pmcr();
498432
}
499433

500434
static inline void armv8pmu_pmcr_write(u32 val)
501435
{
502436
val &= ARMV8_PMU_PMCR_MASK;
503437
isb();
504-
write_sysreg(val, pmcr_el0);
438+
write_pmcr(val);
505439
}
506440

507441
static inline int armv8pmu_has_overflowed(u32 pmovsr)
@@ -576,7 +510,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
576510
u64 value;
577511

578512
if (idx == ARMV8_IDX_CYCLE_COUNTER)
579-
value = read_sysreg(pmccntr_el0);
513+
value = read_pmccntr();
580514
else
581515
value = armv8pmu_read_hw_counter(event);
582516

@@ -611,7 +545,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
611545
value = armv8pmu_bias_long_counter(event, value);
612546

613547
if (idx == ARMV8_IDX_CYCLE_COUNTER)
614-
write_sysreg(value, pmccntr_el0);
548+
write_pmccntr(value);
615549
else
616550
armv8pmu_write_hw_counter(event, value);
617551
}
@@ -642,7 +576,7 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
642576
armv8pmu_write_evtype(idx, chain_evt);
643577
} else {
644578
if (idx == ARMV8_IDX_CYCLE_COUNTER)
645-
write_sysreg(hwc->config_base, pmccfiltr_el0);
579+
write_pmccfiltr(hwc->config_base);
646580
else
647581
armv8pmu_write_evtype(idx, hwc->config_base);
648582
}
@@ -665,7 +599,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
665599
* enable the counter.
666600
* */
667601
isb();
668-
write_sysreg(mask, pmcntenset_el0);
602+
write_pmcntenset(mask);
669603
}
670604

671605
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
@@ -682,7 +616,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
682616

683617
static inline void armv8pmu_disable_counter(u32 mask)
684618
{
685-
write_sysreg(mask, pmcntenclr_el0);
619+
write_pmcntenclr(mask);
686620
/*
687621
* Make sure the effects of disabling the counter are visible before we
688622
* start configuring the event.
@@ -704,7 +638,7 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
704638

705639
static inline void armv8pmu_enable_intens(u32 mask)
706640
{
707-
write_sysreg(mask, pmintenset_el1);
641+
write_pmintenset(mask);
708642
}
709643

710644
static inline void armv8pmu_enable_event_irq(struct perf_event *event)
@@ -715,10 +649,10 @@ static inline void armv8pmu_enable_event_irq(struct perf_event *event)
715649

716650
static inline void armv8pmu_disable_intens(u32 mask)
717651
{
718-
write_sysreg(mask, pmintenclr_el1);
652+
write_pmintenclr(mask);
719653
isb();
720654
/* Clear the overflow flag in case an interrupt is pending. */
721-
write_sysreg(mask, pmovsclr_el0);
655+
write_pmovsclr(mask);
722656
isb();
723657
}
724658

@@ -733,18 +667,18 @@ static inline u32 armv8pmu_getreset_flags(void)
733667
u32 value;
734668

735669
/* Read */
736-
value = read_sysreg(pmovsclr_el0);
670+
value = read_pmovsclr();
737671

738672
/* Write to clear flags */
739673
value &= ARMV8_PMU_OVSR_MASK;
740-
write_sysreg(value, pmovsclr_el0);
674+
write_pmovsclr(value);
741675

742676
return value;
743677
}
744678

745679
static void armv8pmu_disable_user_access(void)
746680
{
747-
write_sysreg(0, pmuserenr_el0);
681+
write_pmuserenr(0);
748682
}
749683

750684
static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
@@ -755,13 +689,13 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
755689
/* Clear any unused counters to avoid leaking their contents */
756690
for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
757691
if (i == ARMV8_IDX_CYCLE_COUNTER)
758-
write_sysreg(0, pmccntr_el0);
692+
write_pmccntr(0);
759693
else
760694
armv8pmu_write_evcntr(i, 0);
761695
}
762696

763-
write_sysreg(0, pmuserenr_el0);
764-
write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
697+
write_pmuserenr(0);
698+
write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
765699
}
766700

767701
static void armv8pmu_enable_event(struct perf_event *event)
@@ -1145,14 +1079,11 @@ static void __armv8pmu_probe_pmu(void *info)
11451079
{
11461080
struct armv8pmu_probe_info *probe = info;
11471081
struct arm_pmu *cpu_pmu = probe->pmu;
1148-
u64 dfr0;
11491082
u64 pmceid_raw[2];
11501083
u32 pmceid[2];
11511084
int pmuver;
11521085

1153-
dfr0 = read_sysreg(id_aa64dfr0_el1);
1154-
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1155-
ID_AA64DFR0_EL1_PMUVer_SHIFT);
1086+
pmuver = read_pmuver();
11561087
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
11571088
pmuver == ID_AA64DFR0_EL1_PMUVer_NI)
11581089
return;
@@ -1167,8 +1098,8 @@ static void __armv8pmu_probe_pmu(void *info)
11671098
/* Add the CPU cycles counter */
11681099
cpu_pmu->num_events += 1;
11691100

1170-
pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
1171-
pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
1101+
pmceid[0] = pmceid_raw[0] = read_pmceid0();
1102+
pmceid[1] = pmceid_raw[1] = read_pmceid1();
11721103

11731104
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
11741105
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
@@ -1179,9 +1110,9 @@ static void __armv8pmu_probe_pmu(void *info)
11791110
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
11801111
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
11811112

1182-
/* store PMMIR_EL1 register for sysfs */
1113+
/* store PMMIR register for sysfs */
11831114
if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
1184-
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
1115+
cpu_pmu->reg_pmmir = read_pmmir();
11851116
else
11861117
cpu_pmu->reg_pmmir = 0;
11871118
}

0 commit comments

Comments
 (0)