22/*
33 * Performance event support - Processor Activity Instrumentation Facility
44 *
5- * Copyright IBM Corp. 2022
5+ * Copyright IBM Corp. 2026
66 * Author(s): Thomas Richter <tmricht@linux.ibm.com>
77 */
8- #define KMSG_COMPONENT "pai_crypto "
8+ #define KMSG_COMPONENT "pai "
99#define pr_fmt (fmt ) KMSG_COMPONENT ": " fmt
1010
1111#include <linux/kernel.h>
@@ -49,7 +49,7 @@ struct pai_mapptr {
4949static struct pai_root { /* Anchor to per CPU data */
5050 refcount_t refcnt ; /* Overall active events */
5151 struct pai_mapptr __percpu * mapptr ;
52- } pai_root ;
52+ } pai_root [ PAI_PMU_MAX ] ;
5353
5454/* This table defines the different parameters of the PAI PMUs. During
5555 * initialization the machine dependent values are extracted and saved.
@@ -74,14 +74,14 @@ struct pai_pmu { /* Define PAI PMU characteristics */
7474static struct pai_pmu pai_pmu []; /* Forward declaration */
7575
7676/* Free per CPU data when the last event is removed. */
77- static void pai_root_free (void )
77+ static void pai_root_free (int idx )
7878{
79- if (refcount_dec_and_test (& pai_root .refcnt )) {
80- free_percpu (pai_root .mapptr );
81- pai_root .mapptr = NULL ;
79+ if (refcount_dec_and_test (& pai_root [ idx ] .refcnt )) {
80+ free_percpu (pai_root [ idx ] .mapptr );
81+ pai_root [ idx ] .mapptr = NULL ;
8282 }
83- debug_sprintf_event (paidbg , 5 , "%s root.refcount %d\n" , __func__ ,
84- refcount_read (& pai_root .refcnt ));
83+ debug_sprintf_event (paidbg , 5 , "%s root[%d] .refcount %d\n" , __func__ ,
84+ idx , refcount_read (& pai_root [ idx ] .refcnt ));
8585}
8686
8787/*
@@ -90,14 +90,14 @@ static void pai_root_free(void)
9090 * CPUs possible, which might be larger than the number of CPUs currently
9191 * online.
9292 */
93- static int pai_root_alloc (void )
93+ static int pai_root_alloc (int idx )
9494{
95- if (!refcount_inc_not_zero (& pai_root .refcnt )) {
95+ if (!refcount_inc_not_zero (& pai_root [ idx ] .refcnt )) {
9696 /* The memory is already zeroed. */
97- pai_root .mapptr = alloc_percpu (struct pai_mapptr );
98- if (!pai_root .mapptr )
97+ pai_root [ idx ] .mapptr = alloc_percpu (struct pai_mapptr );
98+ if (!pai_root [ idx ] .mapptr )
9999 return - ENOMEM ;
100- refcount_set (& pai_root .refcnt , 1 );
100+ refcount_set (& pai_root [ idx ] .refcnt , 1 );
101101 }
102102 return 0 ;
103103}
@@ -119,17 +119,18 @@ static void pai_free(struct pai_mapptr *mp)
119119 */
120120static void pai_event_destroy_cpu (struct perf_event * event , int cpu )
121121{
122- struct pai_mapptr * mp = per_cpu_ptr (pai_root .mapptr , cpu );
122+ int idx = PAI_PMU_IDX (event );
123+ struct pai_mapptr * mp = per_cpu_ptr (pai_root [idx ].mapptr , cpu );
123124 struct pai_map * cpump = mp -> mapptr ;
124125
125126 mutex_lock (& pai_reserve_mutex );
126- debug_sprintf_event (paidbg , 5 , "%s event %#llx cpu %d users %d "
127- "refcnt %u\n" , __func__ , event -> attr .config ,
127+ debug_sprintf_event (paidbg , 5 , "%s event %#llx idx %d cpu %d users %d "
128+ "refcnt %u\n" , __func__ , event -> attr .config , idx ,
128129 event -> cpu , cpump -> active_events ,
129130 refcount_read (& cpump -> refcnt ));
130131 if (refcount_dec_and_test (& cpump -> refcnt ))
131132 pai_free (mp );
132- pai_root_free ();
133+ pai_root_free (idx );
133134 mutex_unlock (& pai_reserve_mutex );
134135}
135136
@@ -162,10 +163,10 @@ static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset)
162163 */
163164static u64 pai_getdata (struct perf_event * event , bool kernel )
164165{
165- struct pai_mapptr * mp = this_cpu_ptr (pai_root .mapptr );
166- struct pai_map * cpump = mp -> mapptr ;
167166 int idx = PAI_PMU_IDX (event );
167+ struct pai_mapptr * mp = this_cpu_ptr (pai_root [idx ].mapptr );
168168 struct pai_pmu * pp = & pai_pmu [idx ];
169+ struct pai_map * cpump = mp -> mapptr ;
169170 unsigned int i ;
170171 u64 sum = 0 ;
171172
@@ -213,12 +214,12 @@ static int pai_alloc_cpu(struct perf_event *event, int cpu)
213214
214215 mutex_lock (& pai_reserve_mutex );
215216 /* Allocate root node */
216- rc = pai_root_alloc ();
217+ rc = pai_root_alloc (idx );
217218 if (rc )
218219 goto unlock ;
219220
220221 /* Allocate node for this event */
221- mp = per_cpu_ptr (pai_root .mapptr , cpu );
222+ mp = per_cpu_ptr (pai_root [ idx ] .mapptr , cpu );
222223 cpump = mp -> mapptr ;
223224 if (!cpump ) { /* Paicrypt_map allocated? */
224225 rc = - ENOMEM ;
@@ -250,7 +251,7 @@ static int pai_alloc_cpu(struct perf_event *event, int cpu)
250251 * the event in not created, its destroy() function is never
251252 * invoked. Adjust the reference counter for the anchor.
252253 */
253- pai_root_free ();
254+ pai_root_free (idx );
254255 }
255256unlock :
256257 mutex_unlock (& pai_reserve_mutex );
@@ -387,7 +388,7 @@ static void pai_start(struct perf_event *event, int flags,
387388{
388389 int idx = PAI_PMU_IDX (event );
389390 struct pai_pmu * pp = & pai_pmu [idx ];
390- struct pai_mapptr * mp = this_cpu_ptr (pai_root .mapptr );
391+ struct pai_mapptr * mp = this_cpu_ptr (pai_root [ idx ] .mapptr );
391392 struct pai_map * cpump = mp -> mapptr ;
392393 u64 sum ;
393394
@@ -413,9 +414,9 @@ static void paicrypt_start(struct perf_event *event, int flags)
413414
414415static int pai_add (struct perf_event * event , int flags )
415416{
416- struct pai_mapptr * mp = this_cpu_ptr (pai_root .mapptr );
417- struct pai_map * cpump = mp -> mapptr ;
418417 int idx = PAI_PMU_IDX (event );
418+ struct pai_mapptr * mp = this_cpu_ptr (pai_root [idx ].mapptr );
419+ struct pai_map * cpump = mp -> mapptr ;
419420 unsigned long ccd ;
420421
421422 if (++ cpump -> active_events == 1 ) {
@@ -437,9 +438,9 @@ static int paicrypt_add(struct perf_event *event, int flags)
437438static void pai_have_sample (struct perf_event * , struct pai_map * );
438439static void pai_stop (struct perf_event * event , int flags )
439440{
440- struct pai_mapptr * mp = this_cpu_ptr (pai_root .mapptr );
441- struct pai_map * cpump = mp -> mapptr ;
442441 int idx = PAI_PMU_IDX (event );
442+ struct pai_mapptr * mp = this_cpu_ptr (pai_root [idx ].mapptr );
443+ struct pai_map * cpump = mp -> mapptr ;
443444
444445 if (!event -> attr .sample_period ) { /* Counting */
445446 pai_pmu [idx ].pmu -> read (event );
@@ -462,9 +463,9 @@ static void paicrypt_stop(struct perf_event *event, int flags)
462463
463464static void pai_del (struct perf_event * event , int flags )
464465{
465- struct pai_mapptr * mp = this_cpu_ptr (pai_root .mapptr );
466- struct pai_map * cpump = mp -> mapptr ;
467466 int idx = PAI_PMU_IDX (event );
467+ struct pai_mapptr * mp = this_cpu_ptr (pai_root [idx ].mapptr );
468+ struct pai_map * cpump = mp -> mapptr ;
468469
469470 pai_pmu [idx ].pmu -> stop (event , PERF_EF_UPDATE );
470471 if (-- cpump -> active_events == 0 ) {
@@ -587,9 +588,9 @@ static void pai_have_sample(struct perf_event *event, struct pai_map *cpump)
587588}
588589
589590/* Check if there is data to be saved on schedule out of a task. */
590- static void pai_have_samples (void )
591+ static void pai_have_samples (int idx )
591592{
592- struct pai_mapptr * mp = this_cpu_ptr (pai_root .mapptr );
593+ struct pai_mapptr * mp = this_cpu_ptr (pai_root [ idx ] .mapptr );
593594 struct pai_map * cpump = mp -> mapptr ;
594595 struct perf_event * event ;
595596
@@ -607,7 +608,7 @@ static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
607608 * results on schedule_out and if page was dirty, save old values.
608609 */
609610 if (!sched_in )
610- pai_have_samples ();
611+ pai_have_samples (PAI_PMU_CRYPTO );
611612}
612613
613614/* Attribute definitions for paicrypt interface. As with other CPU
@@ -982,7 +983,7 @@ static int __init paipmu_setup(void)
982983 return install_ok ;
983984}
984985
985- static int __init paicrypt_init (void )
986+ static int __init pai_init (void )
986987{
987988 /* Setup s390dbf facility */
988989 paidbg = debug_register (KMSG_COMPONENT , 32 , 256 , 128 );
@@ -1001,4 +1002,4 @@ static int __init paicrypt_init(void)
10011002 return 0 ;
10021003}
10031004
1004- device_initcall (paicrypt_init );
1005+ device_initcall (pai_init );
0 commit comments