4141 * MSR_CORE_C1_RES: CORE C1 Residency Counter
4242 * perf code: 0x00
4343 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
44- * MTL
44+ * MTL,SRF
4545 * Scope: Core (each processor core has a MSR)
4646 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
4747 * perf code: 0x01
5252 * perf code: 0x02
5353 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
5454 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
55- * TGL,TNT,RKL,ADL,RPL,SPR,MTL
55+ * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF
5656 * Scope: Core
5757 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
5858 * perf code: 0x03
7575 * perf code: 0x02
7676 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
7777 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
78- * TGL,TNT,RKL,ADL,RPL,SPR,MTL
78+ * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF
7979 * Scope: Package (physical package)
8080 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
8181 * perf code: 0x03
9797 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
9898 * TNT,RKL,ADL,RPL,MTL
9999 * Scope: Package (physical package)
100+ * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter.
101+ * perf code: 0x00
102+ * Available model: SRF
103+ * Scope: A cluster of cores shared L2 cache
100104 *
101105 */
102106
@@ -130,6 +134,7 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev,
130134struct cstate_model {
131135 unsigned long core_events ;
132136 unsigned long pkg_events ;
137+ unsigned long module_events ;
133138 unsigned long quirks ;
134139};
135140
@@ -270,6 +275,28 @@ static struct perf_msr pkg_msr[] = {
270275
271276static cpumask_t cstate_pkg_cpu_mask ;
272277
278+ /* cstate_module PMU */
279+ static struct pmu cstate_module_pmu ;
280+ static bool has_cstate_module ;
281+
282+ enum perf_cstate_module_events {
283+ PERF_CSTATE_MODULE_C6_RES = 0 ,
284+
285+ PERF_CSTATE_MODULE_EVENT_MAX ,
286+ };
287+
288+ PMU_EVENT_ATTR_STRING (c6 - residency , attr_cstate_module_c6 , "event=0x00" );
289+
290+ static unsigned long module_msr_mask ;
291+
292+ PMU_EVENT_GROUP (events , cstate_module_c6 );
293+
294+ static struct perf_msr module_msr [] = {
295+ [PERF_CSTATE_MODULE_C6_RES ] = { MSR_MODULE_C6_RES_MS , & group_cstate_module_c6 , test_msr },
296+ };
297+
298+ static cpumask_t cstate_module_cpu_mask ;
299+
273300static ssize_t cstate_get_attr_cpumask (struct device * dev ,
274301 struct device_attribute * attr ,
275302 char * buf )
@@ -280,6 +307,8 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev,
280307 return cpumap_print_to_pagebuf (true, buf , & cstate_core_cpu_mask );
281308 else if (pmu == & cstate_pkg_pmu )
282309 return cpumap_print_to_pagebuf (true, buf , & cstate_pkg_cpu_mask );
310+ else if (pmu == & cstate_module_pmu )
311+ return cpumap_print_to_pagebuf (true, buf , & cstate_module_cpu_mask );
283312 else
284313 return 0 ;
285314}
@@ -320,6 +349,15 @@ static int cstate_pmu_event_init(struct perf_event *event)
320349 event -> hw .event_base = pkg_msr [cfg ].msr ;
321350 cpu = cpumask_any_and (& cstate_pkg_cpu_mask ,
322351 topology_die_cpumask (event -> cpu ));
352+ } else if (event -> pmu == & cstate_module_pmu ) {
353+ if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX )
354+ return - EINVAL ;
355+ cfg = array_index_nospec ((unsigned long )cfg , PERF_CSTATE_MODULE_EVENT_MAX );
356+ if (!(module_msr_mask & (1 << cfg )))
357+ return - EINVAL ;
358+ event -> hw .event_base = module_msr [cfg ].msr ;
359+ cpu = cpumask_any_and (& cstate_module_cpu_mask ,
360+ topology_cluster_cpumask (event -> cpu ));
323361 } else {
324362 return - ENOENT ;
325363 }
@@ -407,6 +445,17 @@ static int cstate_cpu_exit(unsigned int cpu)
407445 perf_pmu_migrate_context (& cstate_pkg_pmu , cpu , target );
408446 }
409447 }
448+
449+ if (has_cstate_module &&
450+ cpumask_test_and_clear_cpu (cpu , & cstate_module_cpu_mask )) {
451+
452+ target = cpumask_any_but (topology_cluster_cpumask (cpu ), cpu );
453+ /* Migrate events if there is a valid target */
454+ if (target < nr_cpu_ids ) {
455+ cpumask_set_cpu (target , & cstate_module_cpu_mask );
456+ perf_pmu_migrate_context (& cstate_module_pmu , cpu , target );
457+ }
458+ }
410459 return 0 ;
411460}
412461
@@ -433,6 +482,15 @@ static int cstate_cpu_init(unsigned int cpu)
433482 if (has_cstate_pkg && target >= nr_cpu_ids )
434483 cpumask_set_cpu (cpu , & cstate_pkg_cpu_mask );
435484
485+ /*
486+ * If this is the first online thread of that cluster, set it
487+ * in the cluster cpu mask as the designated reader.
488+ */
489+ target = cpumask_any_and (& cstate_module_cpu_mask ,
490+ topology_cluster_cpumask (cpu ));
491+ if (has_cstate_module && target >= nr_cpu_ids )
492+ cpumask_set_cpu (cpu , & cstate_module_cpu_mask );
493+
436494 return 0 ;
437495}
438496
@@ -455,6 +513,11 @@ static const struct attribute_group *pkg_attr_update[] = {
455513 NULL ,
456514};
457515
516+ static const struct attribute_group * module_attr_update [] = {
517+ & group_cstate_module_c6 ,
518+ NULL
519+ };
520+
458521static struct pmu cstate_core_pmu = {
459522 .attr_groups = cstate_attr_groups ,
460523 .attr_update = core_attr_update ,
@@ -485,6 +548,21 @@ static struct pmu cstate_pkg_pmu = {
485548 .module = THIS_MODULE ,
486549};
487550
551+ static struct pmu cstate_module_pmu = {
552+ .attr_groups = cstate_attr_groups ,
553+ .attr_update = module_attr_update ,
554+ .name = "cstate_module" ,
555+ .task_ctx_nr = perf_invalid_context ,
556+ .event_init = cstate_pmu_event_init ,
557+ .add = cstate_pmu_event_add ,
558+ .del = cstate_pmu_event_del ,
559+ .start = cstate_pmu_event_start ,
560+ .stop = cstate_pmu_event_stop ,
561+ .read = cstate_pmu_event_update ,
562+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE ,
563+ .module = THIS_MODULE ,
564+ };
565+
488566static const struct cstate_model nhm_cstates __initconst = {
489567 .core_events = BIT (PERF_CSTATE_CORE_C3_RES ) |
490568 BIT (PERF_CSTATE_CORE_C6_RES ),
@@ -599,6 +677,15 @@ static const struct cstate_model glm_cstates __initconst = {
599677 BIT (PERF_CSTATE_PKG_C10_RES ),
600678};
601679
680+ static const struct cstate_model srf_cstates __initconst = {
681+ .core_events = BIT (PERF_CSTATE_CORE_C1_RES ) |
682+ BIT (PERF_CSTATE_CORE_C6_RES ),
683+
684+ .pkg_events = BIT (PERF_CSTATE_PKG_C6_RES ),
685+
686+ .module_events = BIT (PERF_CSTATE_MODULE_C6_RES ),
687+ };
688+
602689
603690static const struct x86_cpu_id intel_cstates_match [] __initconst = {
604691 X86_MATCH_INTEL_FAM6_MODEL (NEHALEM , & nhm_cstates ),
@@ -651,6 +738,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
651738 X86_MATCH_INTEL_FAM6_MODEL (ATOM_TREMONT , & glm_cstates ),
652739 X86_MATCH_INTEL_FAM6_MODEL (ATOM_TREMONT_L , & glm_cstates ),
653740 X86_MATCH_INTEL_FAM6_MODEL (ATOM_GRACEMONT , & adl_cstates ),
741+ X86_MATCH_INTEL_FAM6_MODEL (ATOM_CRESTMONT_X , & srf_cstates ),
654742
655743 X86_MATCH_INTEL_FAM6_MODEL (ICELAKE_L , & icl_cstates ),
656744 X86_MATCH_INTEL_FAM6_MODEL (ICELAKE , & icl_cstates ),
@@ -692,10 +780,14 @@ static int __init cstate_probe(const struct cstate_model *cm)
692780 pkg_msr_mask = perf_msr_probe (pkg_msr , PERF_CSTATE_PKG_EVENT_MAX ,
693781 true, (void * ) & cm -> pkg_events );
694782
783+ module_msr_mask = perf_msr_probe (module_msr , PERF_CSTATE_MODULE_EVENT_MAX ,
784+ true, (void * ) & cm -> module_events );
785+
695786 has_cstate_core = !!core_msr_mask ;
696787 has_cstate_pkg = !!pkg_msr_mask ;
788+ has_cstate_module = !!module_msr_mask ;
697789
698- return (has_cstate_core || has_cstate_pkg ) ? 0 : - ENODEV ;
790+ return (has_cstate_core || has_cstate_pkg || has_cstate_module ) ? 0 : - ENODEV ;
699791}
700792
701793static inline void cstate_cleanup (void )
@@ -708,6 +800,9 @@ static inline void cstate_cleanup(void)
708800
709801 if (has_cstate_pkg )
710802 perf_pmu_unregister (& cstate_pkg_pmu );
803+
804+ if (has_cstate_module )
805+ perf_pmu_unregister (& cstate_module_pmu );
711806}
712807
713808static int __init cstate_init (void )
@@ -744,6 +839,16 @@ static int __init cstate_init(void)
744839 return err ;
745840 }
746841 }
842+
843+ if (has_cstate_module ) {
844+ err = perf_pmu_register (& cstate_module_pmu , cstate_module_pmu .name , -1 );
845+ if (err ) {
846+ has_cstate_module = false;
847+ pr_info ("Failed to register cstate cluster pmu\n" );
848+ cstate_cleanup ();
849+ return err ;
850+ }
851+ }
747852 return 0 ;
748853}
749854
0 commit comments