@@ -94,15 +94,40 @@ void __init intel_pmu_pebs_data_source_nhm(void)
9494 pebs_data_source [0x07 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOP , HITM );
9595}
9696
97- void __init intel_pmu_pebs_data_source_skl (bool pmem )
97+ static void __init __intel_pmu_pebs_data_source_skl (bool pmem , u64 * data_source )
9898{
9999 u64 pmem_or_l4 = pmem ? LEVEL (PMEM ) : LEVEL (L4 );
100100
101- pebs_data_source [0x08 ] = OP_LH | pmem_or_l4 | P (SNOOP , HIT );
102- pebs_data_source [0x09 ] = OP_LH | pmem_or_l4 | REM | P (SNOOP , HIT );
103- pebs_data_source [0x0b ] = OP_LH | LEVEL (RAM ) | REM | P (SNOOP , NONE );
104- pebs_data_source [0x0c ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOPX , FWD );
105- pebs_data_source [0x0d ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOP , HITM );
101+ data_source [0x08 ] = OP_LH | pmem_or_l4 | P (SNOOP , HIT );
102+ data_source [0x09 ] = OP_LH | pmem_or_l4 | REM | P (SNOOP , HIT );
103+ data_source [0x0b ] = OP_LH | LEVEL (RAM ) | REM | P (SNOOP , NONE );
104+ data_source [0x0c ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOPX , FWD );
105+ data_source [0x0d ] = OP_LH | LEVEL (ANY_CACHE ) | REM | P (SNOOP , HITM );
106+ }
107+
108+ void __init intel_pmu_pebs_data_source_skl (bool pmem )
109+ {
110+ __intel_pmu_pebs_data_source_skl (pmem , pebs_data_source );
111+ }
112+
113+ static void __init intel_pmu_pebs_data_source_grt (u64 * data_source )
114+ {
115+ data_source [0x05 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOP , HIT );
116+ data_source [0x06 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOP , HITM );
117+ data_source [0x08 ] = OP_LH | P (LVL , L3 ) | LEVEL (L3 ) | P (SNOOPX , FWD );
118+ }
119+
120+ void __init intel_pmu_pebs_data_source_adl (void )
121+ {
122+ u64 * data_source ;
123+
124+ data_source = x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ].pebs_data_source ;
125+ memcpy (data_source , pebs_data_source , sizeof (pebs_data_source ));
126+ __intel_pmu_pebs_data_source_skl (false, data_source );
127+
128+ data_source = x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ].pebs_data_source ;
129+ memcpy (data_source , pebs_data_source , sizeof (pebs_data_source ));
130+ intel_pmu_pebs_data_source_grt (data_source );
106131}
107132
108133static u64 precise_store_data (u64 status )
@@ -198,7 +223,7 @@ u64 adl_latency_data_small(struct perf_event *event, u64 status)
198223
199224 dse .val = status ;
200225
201- val = pebs_data_source [dse .ld_dse ];
226+ val = hybrid_var ( event -> pmu , pebs_data_source ) [dse .ld_dse ];
202227
203228 /*
204229 * For the atom core on ADL,
@@ -214,7 +239,7 @@ u64 adl_latency_data_small(struct perf_event *event, u64 status)
214239 return val ;
215240}
216241
217- static u64 load_latency_data (u64 status )
242+ static u64 load_latency_data (struct perf_event * event , u64 status )
218243{
219244 union intel_x86_pebs_dse dse ;
220245 u64 val ;
@@ -224,7 +249,7 @@ static u64 load_latency_data(u64 status)
224249 /*
225250 * use the mapping table for bit 0-3
226251 */
227- val = pebs_data_source [dse .ld_dse ];
252+ val = hybrid_var ( event -> pmu , pebs_data_source ) [dse .ld_dse ];
228253
229254 /*
230255 * Nehalem models do not support TLB, Lock infos
@@ -263,7 +288,7 @@ static u64 load_latency_data(u64 status)
263288 return val ;
264289}
265290
266- static u64 store_latency_data (u64 status )
291+ static u64 store_latency_data (struct perf_event * event , u64 status )
267292{
268293 union intel_x86_pebs_dse dse ;
269294 u64 val ;
@@ -273,7 +298,7 @@ static u64 store_latency_data(u64 status)
273298 /*
274299 * use the mapping table for bit 0-3
275300 */
276- val = pebs_data_source [dse .st_lat_dse ];
301+ val = hybrid_var ( event -> pmu , pebs_data_source ) [dse .st_lat_dse ];
277302
278303 pebs_set_tlb_lock (& val , dse .st_lat_stlb_miss , dse .st_lat_locked );
279304
@@ -1459,9 +1484,9 @@ static u64 get_data_src(struct perf_event *event, u64 aux)
14591484 bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC );
14601485
14611486 if (fl & PERF_X86_EVENT_PEBS_LDLAT )
1462- val = load_latency_data (aux );
1487+ val = load_latency_data (event , aux );
14631488 else if (fl & PERF_X86_EVENT_PEBS_STLAT )
1464- val = store_latency_data (aux );
1489+ val = store_latency_data (event , aux );
14651490 else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID )
14661491 val = x86_pmu .pebs_latency_data (event , aux );
14671492 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC ))
0 commit comments