@@ -189,14 +189,14 @@ static void group_init(struct psi_group *group)
189189 INIT_DELAYED_WORK (& group -> avgs_work , psi_avgs_work );
190190 mutex_init (& group -> avgs_lock );
191191 /* Init trigger-related members */
192- atomic_set (& group -> poll_scheduled , 0 );
193- mutex_init (& group -> trigger_lock );
194- INIT_LIST_HEAD (& group -> triggers );
195- group -> poll_min_period = U32_MAX ;
196- group -> polling_next_update = ULLONG_MAX ;
197- init_waitqueue_head (& group -> poll_wait );
198- timer_setup (& group -> poll_timer , poll_timer_fn , 0 );
199- rcu_assign_pointer (group -> poll_task , NULL );
192+ atomic_set (& group -> rtpoll_scheduled , 0 );
193+ mutex_init (& group -> rtpoll_trigger_lock );
194+ INIT_LIST_HEAD (& group -> rtpoll_triggers );
195+ group -> rtpoll_min_period = U32_MAX ;
196+ group -> rtpoll_next_update = ULLONG_MAX ;
197+ init_waitqueue_head (& group -> rtpoll_wait );
198+ timer_setup (& group -> rtpoll_timer , poll_timer_fn , 0 );
199+ rcu_assign_pointer (group -> rtpoll_task , NULL );
200200}
201201
202202void __init psi_init (void )
@@ -440,11 +440,11 @@ static u64 update_triggers(struct psi_group *group, u64 now)
440440 * On subsequent updates, calculate growth deltas and let
441441 * watchers know when their specified thresholds are exceeded.
442442 */
443- list_for_each_entry (t , & group -> triggers , node ) {
443+ list_for_each_entry (t , & group -> rtpoll_triggers , node ) {
444444 u64 growth ;
445445 bool new_stall ;
446446
447- new_stall = group -> polling_total [t -> state ] != total [t -> state ];
447+ new_stall = group -> rtpoll_total [t -> state ] != total [t -> state ];
448448
449449 /* Check for stall activity or a previous threshold breach */
450450 if (!new_stall && !t -> pending_event )
@@ -486,10 +486,10 @@ static u64 update_triggers(struct psi_group *group, u64 now)
486486 }
487487
488488 if (update_total )
489- memcpy (group -> polling_total , total ,
490- sizeof (group -> polling_total ));
489+ memcpy (group -> rtpoll_total , total ,
490+ sizeof (group -> rtpoll_total ));
491491
492- return now + group -> poll_min_period ;
492+ return now + group -> rtpoll_min_period ;
493493}
494494
495495static u64 update_averages (struct psi_group * group , u64 now )
@@ -582,53 +582,53 @@ static void init_triggers(struct psi_group *group, u64 now)
582582{
583583 struct psi_trigger * t ;
584584
585- list_for_each_entry (t , & group -> triggers , node )
585+ list_for_each_entry (t , & group -> rtpoll_triggers , node )
586586 window_reset (& t -> win , now ,
587587 group -> total [PSI_POLL ][t -> state ], 0 );
588- memcpy (group -> polling_total , group -> total [PSI_POLL ],
589- sizeof (group -> polling_total ));
590- group -> polling_next_update = now + group -> poll_min_period ;
588+ memcpy (group -> rtpoll_total , group -> total [PSI_POLL ],
589+ sizeof (group -> rtpoll_total ));
590+ group -> rtpoll_next_update = now + group -> rtpoll_min_period ;
591591}
592592
593593/* Schedule polling if it's not already scheduled or forced. */
594- static void psi_schedule_poll_work (struct psi_group * group , unsigned long delay ,
594+ static void psi_schedule_rtpoll_work (struct psi_group * group , unsigned long delay ,
595595 bool force )
596596{
597597 struct task_struct * task ;
598598
599599 /*
600600 * atomic_xchg should be called even when !force to provide a
601- * full memory barrier (see the comment inside psi_poll_work ).
601+ * full memory barrier (see the comment inside psi_rtpoll_work ).
602602 */
603- if (atomic_xchg (& group -> poll_scheduled , 1 ) && !force )
603+ if (atomic_xchg (& group -> rtpoll_scheduled , 1 ) && !force )
604604 return ;
605605
606606 rcu_read_lock ();
607607
608- task = rcu_dereference (group -> poll_task );
608+ task = rcu_dereference (group -> rtpoll_task );
609609 /*
610610 * kworker might be NULL in case psi_trigger_destroy races with
611611 * psi_task_change (hotpath) which can't use locks
612612 */
613613 if (likely (task ))
614- mod_timer (& group -> poll_timer , jiffies + delay );
614+ mod_timer (& group -> rtpoll_timer , jiffies + delay );
615615 else
616- atomic_set (& group -> poll_scheduled , 0 );
616+ atomic_set (& group -> rtpoll_scheduled , 0 );
617617
618618 rcu_read_unlock ();
619619}
620620
621- static void psi_poll_work (struct psi_group * group )
621+ static void psi_rtpoll_work (struct psi_group * group )
622622{
623623 bool force_reschedule = false;
624624 u32 changed_states ;
625625 u64 now ;
626626
627- mutex_lock (& group -> trigger_lock );
627+ mutex_lock (& group -> rtpoll_trigger_lock );
628628
629629 now = sched_clock ();
630630
631- if (now > group -> polling_until ) {
631+ if (now > group -> rtpoll_until ) {
632632 /*
633633 * We are either about to start or might stop polling if no
634634 * state change was recorded. Resetting poll_scheduled leaves
@@ -638,7 +638,7 @@ static void psi_poll_work(struct psi_group *group)
638638 * should be negligible and polling_next_update still keeps
639639 * updates correctly on schedule.
640640 */
641- atomic_set (& group -> poll_scheduled , 0 );
641+ atomic_set (& group -> rtpoll_scheduled , 0 );
642642 /*
643643 * A task change can race with the poll worker that is supposed to
644644 * report on it. To avoid missing events, ensure ordering between
@@ -667,60 +667,60 @@ static void psi_poll_work(struct psi_group *group)
667667
668668 collect_percpu_times (group , PSI_POLL , & changed_states );
669669
670- if (changed_states & group -> poll_states ) {
670+ if (changed_states & group -> rtpoll_states ) {
671671 /* Initialize trigger windows when entering polling mode */
672- if (now > group -> polling_until )
672+ if (now > group -> rtpoll_until )
673673 init_triggers (group , now );
674674
675675 /*
676676 * Keep the monitor active for at least the duration of the
677677 * minimum tracking window as long as monitor states are
678678 * changing.
679679 */
680- group -> polling_until = now +
681- group -> poll_min_period * UPDATES_PER_WINDOW ;
680+ group -> rtpoll_until = now +
681+ group -> rtpoll_min_period * UPDATES_PER_WINDOW ;
682682 }
683683
684- if (now > group -> polling_until ) {
685- group -> polling_next_update = ULLONG_MAX ;
684+ if (now > group -> rtpoll_until ) {
685+ group -> rtpoll_next_update = ULLONG_MAX ;
686686 goto out ;
687687 }
688688
689- if (now >= group -> polling_next_update )
690- group -> polling_next_update = update_triggers (group , now );
689+ if (now >= group -> rtpoll_next_update )
690+ group -> rtpoll_next_update = update_triggers (group , now );
691691
692- psi_schedule_poll_work (group ,
693- nsecs_to_jiffies (group -> polling_next_update - now ) + 1 ,
692+ psi_schedule_rtpoll_work (group ,
693+ nsecs_to_jiffies (group -> rtpoll_next_update - now ) + 1 ,
694694 force_reschedule );
695695
696696out :
697- mutex_unlock (& group -> trigger_lock );
697+ mutex_unlock (& group -> rtpoll_trigger_lock );
698698}
699699
700- static int psi_poll_worker (void * data )
700+ static int psi_rtpoll_worker (void * data )
701701{
702702 struct psi_group * group = (struct psi_group * )data ;
703703
704704 sched_set_fifo_low (current );
705705
706706 while (true) {
707- wait_event_interruptible (group -> poll_wait ,
708- atomic_cmpxchg (& group -> poll_wakeup , 1 , 0 ) ||
707+ wait_event_interruptible (group -> rtpoll_wait ,
708+ atomic_cmpxchg (& group -> rtpoll_wakeup , 1 , 0 ) ||
709709 kthread_should_stop ());
710710 if (kthread_should_stop ())
711711 break ;
712712
713- psi_poll_work (group );
713+ psi_rtpoll_work (group );
714714 }
715715 return 0 ;
716716}
717717
718718static void poll_timer_fn (struct timer_list * t )
719719{
720- struct psi_group * group = from_timer (group , t , poll_timer );
720+ struct psi_group * group = from_timer (group , t , rtpoll_timer );
721721
722- atomic_set (& group -> poll_wakeup , 1 );
723- wake_up_interruptible (& group -> poll_wait );
722+ atomic_set (& group -> rtpoll_wakeup , 1 );
723+ wake_up_interruptible (& group -> rtpoll_wait );
724724}
725725
726726static void record_times (struct psi_group_cpu * groupc , u64 now )
@@ -851,8 +851,8 @@ static void psi_group_change(struct psi_group *group, int cpu,
851851
852852 write_seqcount_end (& groupc -> seq );
853853
854- if (state_mask & group -> poll_states )
855- psi_schedule_poll_work (group , 1 , false);
854+ if (state_mask & group -> rtpoll_states )
855+ psi_schedule_rtpoll_work (group , 1 , false);
856856
857857 if (wake_clock && !delayed_work_pending (& group -> avgs_work ))
858858 schedule_delayed_work (& group -> avgs_work , PSI_FREQ );
@@ -1005,8 +1005,8 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)
10051005
10061006 write_seqcount_end (& groupc -> seq );
10071007
1008- if (group -> poll_states & (1 << PSI_IRQ_FULL ))
1009- psi_schedule_poll_work (group , 1 , false);
1008+ if (group -> rtpoll_states & (1 << PSI_IRQ_FULL ))
1009+ psi_schedule_rtpoll_work (group , 1 , false);
10101010 } while ((group = group -> parent ));
10111011}
10121012#endif
@@ -1101,7 +1101,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
11011101 cancel_delayed_work_sync (& cgroup -> psi -> avgs_work );
11021102 free_percpu (cgroup -> psi -> pcpu );
11031103 /* All triggers must be removed by now */
1104- WARN_ONCE (cgroup -> psi -> poll_states , "psi: trigger leak\n" );
1104+ WARN_ONCE (cgroup -> psi -> rtpoll_states , "psi: trigger leak\n" );
11051105 kfree (cgroup -> psi );
11061106}
11071107
@@ -1302,29 +1302,29 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
13021302 init_waitqueue_head (& t -> event_wait );
13031303 t -> pending_event = false;
13041304
1305- mutex_lock (& group -> trigger_lock );
1305+ mutex_lock (& group -> rtpoll_trigger_lock );
13061306
1307- if (!rcu_access_pointer (group -> poll_task )) {
1307+ if (!rcu_access_pointer (group -> rtpoll_task )) {
13081308 struct task_struct * task ;
13091309
1310- task = kthread_create (psi_poll_worker , group , "psimon" );
1310+ task = kthread_create (psi_rtpoll_worker , group , "psimon" );
13111311 if (IS_ERR (task )) {
13121312 kfree (t );
1313- mutex_unlock (& group -> trigger_lock );
1313+ mutex_unlock (& group -> rtpoll_trigger_lock );
13141314 return ERR_CAST (task );
13151315 }
1316- atomic_set (& group -> poll_wakeup , 0 );
1316+ atomic_set (& group -> rtpoll_wakeup , 0 );
13171317 wake_up_process (task );
1318- rcu_assign_pointer (group -> poll_task , task );
1318+ rcu_assign_pointer (group -> rtpoll_task , task );
13191319 }
13201320
1321- list_add (& t -> node , & group -> triggers );
1322- group -> poll_min_period = min (group -> poll_min_period ,
1321+ list_add (& t -> node , & group -> rtpoll_triggers );
1322+ group -> rtpoll_min_period = min (group -> rtpoll_min_period ,
13231323 div_u64 (t -> win .size , UPDATES_PER_WINDOW ));
1324- group -> nr_triggers [t -> state ]++ ;
1325- group -> poll_states |= (1 << t -> state );
1324+ group -> rtpoll_nr_triggers [t -> state ]++ ;
1325+ group -> rtpoll_states |= (1 << t -> state );
13261326
1327- mutex_unlock (& group -> trigger_lock );
1327+ mutex_unlock (& group -> rtpoll_trigger_lock );
13281328
13291329 return t ;
13301330}
@@ -1349,51 +1349,52 @@ void psi_trigger_destroy(struct psi_trigger *t)
13491349 */
13501350 wake_up_pollfree (& t -> event_wait );
13511351
1352- mutex_lock (& group -> trigger_lock );
1352+ mutex_lock (& group -> rtpoll_trigger_lock );
13531353
13541354 if (!list_empty (& t -> node )) {
13551355 struct psi_trigger * tmp ;
13561356 u64 period = ULLONG_MAX ;
13571357
13581358 list_del (& t -> node );
1359- group -> nr_triggers [t -> state ]-- ;
1360- if (!group -> nr_triggers [t -> state ])
1361- group -> poll_states &= ~(1 << t -> state );
1359+ group -> rtpoll_nr_triggers [t -> state ]-- ;
1360+ if (!group -> rtpoll_nr_triggers [t -> state ])
1361+ group -> rtpoll_states &= ~(1 << t -> state );
13621362 /* reset min update period for the remaining triggers */
1363- list_for_each_entry (tmp , & group -> triggers , node )
1363+ list_for_each_entry (tmp , & group -> rtpoll_triggers , node )
13641364 period = min (period , div_u64 (tmp -> win .size ,
13651365 UPDATES_PER_WINDOW ));
1366- group -> poll_min_period = period ;
1367- /* Destroy poll_task when the last trigger is destroyed */
1368- if (group -> poll_states == 0 ) {
1369- group -> polling_until = 0 ;
1366+ group -> rtpoll_min_period = period ;
1367+ /* Destroy rtpoll_task when the last trigger is destroyed */
1368+ if (group -> rtpoll_states == 0 ) {
1369+ group -> rtpoll_until = 0 ;
13701370 task_to_destroy = rcu_dereference_protected (
1371- group -> poll_task ,
1372- lockdep_is_held (& group -> trigger_lock ));
1373- rcu_assign_pointer (group -> poll_task , NULL );
1374- del_timer (& group -> poll_timer );
1371+ group -> rtpoll_task ,
1372+ lockdep_is_held (& group -> rtpoll_trigger_lock ));
1373+ rcu_assign_pointer (group -> rtpoll_task , NULL );
1374+ del_timer (& group -> rtpoll_timer );
13751375 }
13761376 }
13771377
1378- mutex_unlock (& group -> trigger_lock );
1378+ mutex_unlock (& group -> rtpoll_trigger_lock );
13791379
13801380 /*
1381- * Wait for psi_schedule_poll_work RCU to complete its read-side
1381+ * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
13821382 * critical section before destroying the trigger and optionally the
1383- * poll_task .
1383+ * rtpoll_task .
13841384 */
13851385 synchronize_rcu ();
13861386 /*
1387- * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1388- * deadlock while waiting for psi_poll_work to acquire trigger_lock
1387+ * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
1388+ * a deadlock while waiting for psi_rtpoll_work to acquire
1389+ * rtpoll_trigger_lock
13891390 */
13901391 if (task_to_destroy ) {
13911392 /*
13921393 * After the RCU grace period has expired, the worker
1393- * can no longer be found through group->poll_task .
1394+ * can no longer be found through group->rtpoll_task .
13941395 */
13951396 kthread_stop (task_to_destroy );
1396- atomic_set (& group -> poll_scheduled , 0 );
1397+ atomic_set (& group -> rtpoll_scheduled , 0 );
13971398 }
13981399 kfree (t );
13991400}
0 commit comments