@@ -2614,6 +2614,57 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
26142614 }
26152615}
26162616
2617+ static __always_inline void
2618+ __intel_pmu_handle_pebs_record (struct pt_regs * iregs ,
2619+ struct pt_regs * regs ,
2620+ struct perf_sample_data * data ,
2621+ void * at , u64 pebs_status ,
2622+ short * counts , void * * last ,
2623+ setup_fn setup_sample )
2624+ {
2625+ struct cpu_hw_events * cpuc = this_cpu_ptr (& cpu_hw_events );
2626+ struct perf_event * event ;
2627+ int bit ;
2628+
2629+ for_each_set_bit (bit , (unsigned long * )& pebs_status , X86_PMC_IDX_MAX ) {
2630+ event = cpuc -> events [bit ];
2631+
2632+ if (WARN_ON_ONCE (!event ) ||
2633+ WARN_ON_ONCE (!event -> attr .precise_ip ))
2634+ continue ;
2635+
2636+ if (counts [bit ]++ ) {
2637+ __intel_pmu_pebs_event (event , iregs , regs , data ,
2638+ last [bit ], setup_sample );
2639+ }
2640+
2641+ last [bit ] = at ;
2642+ }
2643+ }
2644+
2645+ static __always_inline void
2646+ __intel_pmu_handle_last_pebs_record (struct pt_regs * iregs ,
2647+ struct pt_regs * regs ,
2648+ struct perf_sample_data * data ,
2649+ u64 mask , short * counts , void * * last ,
2650+ setup_fn setup_sample )
2651+ {
2652+ struct cpu_hw_events * cpuc = this_cpu_ptr (& cpu_hw_events );
2653+ struct perf_event * event ;
2654+ int bit ;
2655+
2656+ for_each_set_bit (bit , (unsigned long * )& mask , X86_PMC_IDX_MAX ) {
2657+ if (!counts [bit ])
2658+ continue ;
2659+
2660+ event = cpuc -> events [bit ];
2661+
2662+ __intel_pmu_pebs_last_event (event , iregs , regs , data , last [bit ],
2663+ counts [bit ], setup_sample );
2664+ }
2665+
2666+ }
2667+
26172668static void intel_pmu_drain_pebs_icl (struct pt_regs * iregs , struct perf_sample_data * data )
26182669{
26192670 short counts [INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS ] = {};
@@ -2623,9 +2674,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
26232674 struct x86_perf_regs perf_regs ;
26242675 struct pt_regs * regs = & perf_regs .regs ;
26252676 struct pebs_basic * basic ;
2626- struct perf_event * event ;
26272677 void * base , * at , * top ;
2628- int bit ;
26292678 u64 mask ;
26302679
26312680 if (!x86_pmu .pebs_active )
@@ -2638,6 +2687,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
26382687
26392688 mask = hybrid (cpuc -> pmu , pebs_events_mask ) |
26402689 (hybrid (cpuc -> pmu , fixed_cntr_mask64 ) << INTEL_PMC_IDX_FIXED );
2690+ mask &= cpuc -> pebs_enabled ;
26412691
26422692 if (unlikely (base >= top )) {
26432693 intel_pmu_pebs_event_update_no_drain (cpuc , mask );
@@ -2655,31 +2705,14 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
26552705 if (basic -> format_size != cpuc -> pebs_record_size )
26562706 continue ;
26572707
2658- pebs_status = basic -> applicable_counters & cpuc -> pebs_enabled & mask ;
2659- for_each_set_bit (bit , (unsigned long * )& pebs_status , X86_PMC_IDX_MAX ) {
2660- event = cpuc -> events [bit ];
2661-
2662- if (WARN_ON_ONCE (!event ) ||
2663- WARN_ON_ONCE (!event -> attr .precise_ip ))
2664- continue ;
2665-
2666- if (counts [bit ]++ ) {
2667- __intel_pmu_pebs_event (event , iregs , regs , data , last [bit ],
2668- setup_pebs_adaptive_sample_data );
2669- }
2670- last [bit ] = at ;
2671- }
2708+ pebs_status = mask & basic -> applicable_counters ;
2709+ __intel_pmu_handle_pebs_record (iregs , regs , data , at ,
2710+ pebs_status , counts , last ,
2711+ setup_pebs_adaptive_sample_data );
26722712 }
26732713
2674- for_each_set_bit (bit , (unsigned long * )& mask , X86_PMC_IDX_MAX ) {
2675- if (!counts [bit ])
2676- continue ;
2677-
2678- event = cpuc -> events [bit ];
2679-
2680- __intel_pmu_pebs_last_event (event , iregs , regs , data , last [bit ],
2681- counts [bit ], setup_pebs_adaptive_sample_data );
2682- }
2714+ __intel_pmu_handle_last_pebs_record (iregs , regs , data , mask , counts , last ,
2715+ setup_pebs_adaptive_sample_data );
26832716}
26842717
26852718static void __init intel_arch_pebs_init (void )
0 commit comments