1010
1111#include <asm/irq_regs.h>
1212#include <asm/perf_event.h>
13- #include <asm/sysreg.h>
1413#include <asm/virt.h>
1514
1615#include <clocksource/arm_arch_timer.h>
2524#include <linux/sched_clock.h>
2625#include <linux/smp.h>
2726
27+ #include <asm/arm_pmuv3.h>
28+
2829/* ARMv8 Cortex-A53 specific event types. */
2930#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
3031
@@ -425,83 +426,16 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
425426#define ARMV8_IDX_TO_COUNTER (x ) \
426427 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
427428
428- /*
429- * This code is really good
430- */
431-
432- #define PMEVN_CASE (n , case_macro ) \
433- case n: case_macro(n); break
434-
435- #define PMEVN_SWITCH (x , case_macro ) \
436- do { \
437- switch (x) { \
438- PMEVN_CASE(0, case_macro); \
439- PMEVN_CASE(1, case_macro); \
440- PMEVN_CASE(2, case_macro); \
441- PMEVN_CASE(3, case_macro); \
442- PMEVN_CASE(4, case_macro); \
443- PMEVN_CASE(5, case_macro); \
444- PMEVN_CASE(6, case_macro); \
445- PMEVN_CASE(7, case_macro); \
446- PMEVN_CASE(8, case_macro); \
447- PMEVN_CASE(9, case_macro); \
448- PMEVN_CASE(10, case_macro); \
449- PMEVN_CASE(11, case_macro); \
450- PMEVN_CASE(12, case_macro); \
451- PMEVN_CASE(13, case_macro); \
452- PMEVN_CASE(14, case_macro); \
453- PMEVN_CASE(15, case_macro); \
454- PMEVN_CASE(16, case_macro); \
455- PMEVN_CASE(17, case_macro); \
456- PMEVN_CASE(18, case_macro); \
457- PMEVN_CASE(19, case_macro); \
458- PMEVN_CASE(20, case_macro); \
459- PMEVN_CASE(21, case_macro); \
460- PMEVN_CASE(22, case_macro); \
461- PMEVN_CASE(23, case_macro); \
462- PMEVN_CASE(24, case_macro); \
463- PMEVN_CASE(25, case_macro); \
464- PMEVN_CASE(26, case_macro); \
465- PMEVN_CASE(27, case_macro); \
466- PMEVN_CASE(28, case_macro); \
467- PMEVN_CASE(29, case_macro); \
468- PMEVN_CASE(30, case_macro); \
469- default: WARN(1, "Invalid PMEV* index\n"); \
470- } \
471- } while (0)
472-
473- #define RETURN_READ_PMEVCNTRN (n ) \
474- return read_sysreg(pmevcntr##n##_el0)
475- static unsigned long read_pmevcntrn (int n )
476- {
477- PMEVN_SWITCH (n , RETURN_READ_PMEVCNTRN );
478- return 0 ;
479- }
480-
481- #define WRITE_PMEVCNTRN (n ) \
482- write_sysreg(val, pmevcntr##n##_el0)
483- static void write_pmevcntrn (int n , unsigned long val )
484- {
485- PMEVN_SWITCH (n , WRITE_PMEVCNTRN );
486- }
487-
488- #define WRITE_PMEVTYPERN (n ) \
489- write_sysreg(val, pmevtyper##n##_el0)
490- static void write_pmevtypern (int n , unsigned long val )
491- {
492- PMEVN_SWITCH (n , WRITE_PMEVTYPERN );
493- }
494-
495429static inline u32 armv8pmu_pmcr_read (void )
496430{
497- return read_sysreg ( pmcr_el0 );
431+ return read_pmcr ( );
498432}
499433
500434static inline void armv8pmu_pmcr_write (u32 val )
501435{
502436 val &= ARMV8_PMU_PMCR_MASK ;
503437 isb ();
504- write_sysreg (val , pmcr_el0 );
438+ write_pmcr (val );
505439}
506440
507441static inline int armv8pmu_has_overflowed (u32 pmovsr )
@@ -576,7 +510,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
576510 u64 value ;
577511
578512 if (idx == ARMV8_IDX_CYCLE_COUNTER )
579- value = read_sysreg ( pmccntr_el0 );
513+ value = read_pmccntr ( );
580514 else
581515 value = armv8pmu_read_hw_counter (event );
582516
@@ -611,7 +545,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
611545 value = armv8pmu_bias_long_counter (event , value );
612546
613547 if (idx == ARMV8_IDX_CYCLE_COUNTER )
614- write_sysreg (value , pmccntr_el0 );
548+ write_pmccntr (value );
615549 else
616550 armv8pmu_write_hw_counter (event , value );
617551}
@@ -642,7 +576,7 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
642576 armv8pmu_write_evtype (idx , chain_evt );
643577 } else {
644578 if (idx == ARMV8_IDX_CYCLE_COUNTER )
645- write_sysreg (hwc -> config_base , pmccfiltr_el0 );
579+ write_pmccfiltr (hwc -> config_base );
646580 else
647581 armv8pmu_write_evtype (idx , hwc -> config_base );
648582 }
@@ -665,7 +599,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
665599 * enable the counter.
666600 * */
667601 isb ();
668- write_sysreg (mask , pmcntenset_el0 );
602+ write_pmcntenset (mask );
669603}
670604
671605static inline void armv8pmu_enable_event_counter (struct perf_event * event )
@@ -682,7 +616,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
682616
683617static inline void armv8pmu_disable_counter (u32 mask )
684618{
685- write_sysreg (mask , pmcntenclr_el0 );
619+ write_pmcntenclr (mask );
686620 /*
687621 * Make sure the effects of disabling the counter are visible before we
688622 * start configuring the event.
@@ -704,7 +638,7 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
704638
705639static inline void armv8pmu_enable_intens (u32 mask )
706640{
707- write_sysreg (mask , pmintenset_el1 );
641+ write_pmintenset (mask );
708642}
709643
710644static inline void armv8pmu_enable_event_irq (struct perf_event * event )
@@ -715,10 +649,10 @@ static inline void armv8pmu_enable_event_irq(struct perf_event *event)
715649
716650static inline void armv8pmu_disable_intens (u32 mask )
717651{
718- write_sysreg (mask , pmintenclr_el1 );
652+ write_pmintenclr (mask );
719653 isb ();
720654 /* Clear the overflow flag in case an interrupt is pending. */
721- write_sysreg (mask , pmovsclr_el0 );
655+ write_pmovsclr (mask );
722656 isb ();
723657}
724658
@@ -733,18 +667,18 @@ static inline u32 armv8pmu_getreset_flags(void)
733667 u32 value ;
734668
735669 /* Read */
736- value = read_sysreg ( pmovsclr_el0 );
670+ value = read_pmovsclr ( );
737671
738672 /* Write to clear flags */
739673 value &= ARMV8_PMU_OVSR_MASK ;
740- write_sysreg (value , pmovsclr_el0 );
674+ write_pmovsclr (value );
741675
742676 return value ;
743677}
744678
745679static void armv8pmu_disable_user_access (void )
746680{
747- write_sysreg ( 0 , pmuserenr_el0 );
681+ write_pmuserenr ( 0 );
748682}
749683
750684static void armv8pmu_enable_user_access (struct arm_pmu * cpu_pmu )
@@ -755,13 +689,13 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
755689 /* Clear any unused counters to avoid leaking their contents */
756690 for_each_clear_bit (i , cpuc -> used_mask , cpu_pmu -> num_events ) {
757691 if (i == ARMV8_IDX_CYCLE_COUNTER )
758- write_sysreg ( 0 , pmccntr_el0 );
692+ write_pmccntr ( 0 );
759693 else
760694 armv8pmu_write_evcntr (i , 0 );
761695 }
762696
763- write_sysreg ( 0 , pmuserenr_el0 );
764- write_sysreg (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR , pmuserenr_el0 );
697+ write_pmuserenr ( 0 );
698+ write_pmuserenr (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR );
765699}
766700
767701static void armv8pmu_enable_event (struct perf_event * event )
@@ -1145,14 +1079,11 @@ static void __armv8pmu_probe_pmu(void *info)
11451079{
11461080 struct armv8pmu_probe_info * probe = info ;
11471081 struct arm_pmu * cpu_pmu = probe -> pmu ;
1148- u64 dfr0 ;
11491082 u64 pmceid_raw [2 ];
11501083 u32 pmceid [2 ];
11511084 int pmuver ;
11521085
1153- dfr0 = read_sysreg (id_aa64dfr0_el1 );
1154- pmuver = cpuid_feature_extract_unsigned_field (dfr0 ,
1155- ID_AA64DFR0_EL1_PMUVer_SHIFT );
1086+ pmuver = read_pmuver ();
11561087 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
11571088 pmuver == ID_AA64DFR0_EL1_PMUVer_NI )
11581089 return ;
@@ -1167,8 +1098,8 @@ static void __armv8pmu_probe_pmu(void *info)
11671098 /* Add the CPU cycles counter */
11681099 cpu_pmu -> num_events += 1 ;
11691100
1170- pmceid [0 ] = pmceid_raw [0 ] = read_sysreg ( pmceid0_el0 );
1171- pmceid [1 ] = pmceid_raw [1 ] = read_sysreg ( pmceid1_el0 );
1101+ pmceid [0 ] = pmceid_raw [0 ] = read_pmceid0 ( );
1102+ pmceid [1 ] = pmceid_raw [1 ] = read_pmceid1 ( );
11721103
11731104 bitmap_from_arr32 (cpu_pmu -> pmceid_bitmap ,
11741105 pmceid , ARMV8_PMUV3_MAX_COMMON_EVENTS );
@@ -1179,9 +1110,9 @@ static void __armv8pmu_probe_pmu(void *info)
11791110 bitmap_from_arr32 (cpu_pmu -> pmceid_ext_bitmap ,
11801111 pmceid , ARMV8_PMUV3_MAX_COMMON_EVENTS );
11811112
1182- /* store PMMIR_EL1 register for sysfs */
1113+ /* store PMMIR register for sysfs */
11831114 if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw [1 ] & BIT (31 )))
1184- cpu_pmu -> reg_pmmir = read_cpuid ( PMMIR_EL1 );
1115+ cpu_pmu -> reg_pmmir = read_pmmir ( );
11851116 else
11861117 cpu_pmu -> reg_pmmir = 0 ;
11871118}
0 commit comments