@@ -113,69 +113,6 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
113113 [C (ITLB )][C (OP_WRITE )][C (RESULT_MISS )] = ARMV6_PERFCTR_ITLB_MISS ,
114114};
115115
116- enum armv6mpcore_perf_types {
117- ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0 ,
118- ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1 ,
119- ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2 ,
120- ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3 ,
121- ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4 ,
122- ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5 ,
123- ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6 ,
124- ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7 ,
125- ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8 ,
126- ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA ,
127- ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB ,
128- ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC ,
129- ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD ,
130- ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE ,
131- ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF ,
132- ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10 ,
133- ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11 ,
134- ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12 ,
135- ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13 ,
136- ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF ,
137- };
138-
139- /*
140- * The hardware events that we support. We do support cache operations but
141- * we have harvard caches and no way to combine instruction and data
142- * accesses/misses in hardware.
143- */
144- static const unsigned armv6mpcore_perf_map [PERF_COUNT_HW_MAX ] = {
145- PERF_MAP_ALL_UNSUPPORTED ,
146- [PERF_COUNT_HW_CPU_CYCLES ] = ARMV6MPCORE_PERFCTR_CPU_CYCLES ,
147- [PERF_COUNT_HW_INSTRUCTIONS ] = ARMV6MPCORE_PERFCTR_INSTR_EXEC ,
148- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = ARMV6MPCORE_PERFCTR_BR_EXEC ,
149- [PERF_COUNT_HW_BRANCH_MISSES ] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT ,
150- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] = ARMV6MPCORE_PERFCTR_IBUF_STALL ,
151- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL ,
152- };
153-
154- static const unsigned armv6mpcore_perf_cache_map [PERF_COUNT_HW_CACHE_MAX ]
155- [PERF_COUNT_HW_CACHE_OP_MAX ]
156- [PERF_COUNT_HW_CACHE_RESULT_MAX ] = {
157- PERF_CACHE_MAP_ALL_UNSUPPORTED ,
158-
159- [C (L1D )][C (OP_READ )][C (RESULT_ACCESS )] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS ,
160- [C (L1D )][C (OP_READ )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS ,
161- [C (L1D )][C (OP_WRITE )][C (RESULT_ACCESS )] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS ,
162- [C (L1D )][C (OP_WRITE )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS ,
163-
164- [C (L1I )][C (OP_READ )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_ICACHE_MISS ,
165-
166- /*
167- * The ARM performance counters can count micro DTLB misses, micro ITLB
168- * misses and main TLB misses. There isn't an event for TLB misses, so
169- * use the micro misses here and if users want the main TLB misses they
170- * can use a raw counter.
171- */
172- [C (DTLB )][C (OP_READ )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_DTLB_MISS ,
173- [C (DTLB )][C (OP_WRITE )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_DTLB_MISS ,
174-
175- [C (ITLB )][C (OP_READ )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_ITLB_MISS ,
176- [C (ITLB )][C (OP_WRITE )][C (RESULT_MISS )] = ARMV6MPCORE_PERFCTR_ITLB_MISS ,
177- };
178-
179116static inline unsigned long
180117armv6_pmcr_read (void )
181118{
@@ -452,37 +389,6 @@ static void armv6pmu_disable_event(struct perf_event *event)
452389 raw_spin_unlock_irqrestore (& events -> pmu_lock , flags );
453390}
454391
455- static void armv6mpcore_pmu_disable_event (struct perf_event * event )
456- {
457- unsigned long val , mask , flags , evt = 0 ;
458- struct arm_pmu * cpu_pmu = to_arm_pmu (event -> pmu );
459- struct hw_perf_event * hwc = & event -> hw ;
460- struct pmu_hw_events * events = this_cpu_ptr (cpu_pmu -> hw_events );
461- int idx = hwc -> idx ;
462-
463- if (ARMV6_CYCLE_COUNTER == idx ) {
464- mask = ARMV6_PMCR_CCOUNT_IEN ;
465- } else if (ARMV6_COUNTER0 == idx ) {
466- mask = ARMV6_PMCR_COUNT0_IEN ;
467- } else if (ARMV6_COUNTER1 == idx ) {
468- mask = ARMV6_PMCR_COUNT1_IEN ;
469- } else {
470- WARN_ONCE (1 , "invalid counter number (%d)\n" , idx );
471- return ;
472- }
473-
474- /*
475- * Unlike UP ARMv6, we don't have a way of stopping the counters. We
476- * simply disable the interrupt reporting.
477- */
478- raw_spin_lock_irqsave (& events -> pmu_lock , flags );
479- val = armv6_pmcr_read ();
480- val &= ~mask ;
481- val |= evt ;
482- armv6_pmcr_write (val );
483- raw_spin_unlock_irqrestore (& events -> pmu_lock , flags );
484- }
485-
486392static int armv6_map_event (struct perf_event * event )
487393{
488394 return armpmu_map_event (event , & armv6_perf_map ,
0 commit comments