@@ -323,6 +323,46 @@ void tick_nohz_full_kick_cpu(int cpu)
323323 irq_work_queue_on (& per_cpu (nohz_full_kick_work , cpu ), cpu );
324324}
325325
326+ static void tick_nohz_kick_task (struct task_struct * tsk )
327+ {
328+ int cpu ;
329+
330+ /*
331+ * If the task is not running, run_posix_cpu_timers()
332+ * has nothing to elapse, IPI can then be spared.
333+ *
334+ * activate_task() STORE p->tick_dep_mask
335+ * STORE p->on_rq
336+ * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or())
337+ * LOCK rq->lock LOAD p->on_rq
338+ * smp_mb__after_spin_lock()
339+ * tick_nohz_task_switch()
340+ * LOAD p->tick_dep_mask
341+ */
342+ if (!sched_task_on_rq (tsk ))
343+ return ;
344+
345+ /*
346+ * If the task concurrently migrates to another CPU,
347+ * we guarantee it sees the new tick dependency upon
348+ * schedule.
349+ *
350+ * set_task_cpu(p, cpu);
351+ * STORE p->cpu = @cpu
352+ * __schedule() (switch to task 'p')
353+ * LOCK rq->lock
354+ * smp_mb__after_spin_lock() STORE p->tick_dep_mask
355+ * tick_nohz_task_switch() smp_mb() (atomic_fetch_or())
356+ * LOAD p->tick_dep_mask LOAD p->cpu
357+ */
358+ cpu = task_cpu (tsk );
359+
360+ preempt_disable ();
361+ if (cpu_online (cpu ))
362+ tick_nohz_full_kick_cpu (cpu );
363+ preempt_enable ();
364+ }
365+
326366/*
327367 * Kick all full dynticks CPUs in order to force these to re-evaluate
328368 * their dependency on the tick and restart it if necessary.
@@ -405,19 +445,8 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
405445 */
406446void tick_nohz_dep_set_task (struct task_struct * tsk , enum tick_dep_bits bit )
407447{
408- if (!atomic_fetch_or (BIT (bit ), & tsk -> tick_dep_mask )) {
409- if (tsk == current ) {
410- preempt_disable ();
411- tick_nohz_full_kick ();
412- preempt_enable ();
413- } else {
414- /*
415- * Some future tick_nohz_full_kick_task()
416- * should optimize this.
417- */
418- tick_nohz_full_kick_all ();
419- }
420- }
448+ if (!atomic_fetch_or (BIT (bit ), & tsk -> tick_dep_mask ))
449+ tick_nohz_kick_task (tsk );
421450}
422451EXPORT_SYMBOL_GPL (tick_nohz_dep_set_task );
423452
@@ -431,9 +460,20 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
431460 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
432461 * per process timers.
433462 */
434- void tick_nohz_dep_set_signal (struct signal_struct * sig , enum tick_dep_bits bit )
463+ void tick_nohz_dep_set_signal (struct task_struct * tsk ,
464+ enum tick_dep_bits bit )
435465{
436- tick_nohz_dep_set_all (& sig -> tick_dep_mask , bit );
466+ int prev ;
467+ struct signal_struct * sig = tsk -> signal ;
468+
469+ prev = atomic_fetch_or (BIT (bit ), & sig -> tick_dep_mask );
470+ if (!prev ) {
471+ struct task_struct * t ;
472+
473+ lockdep_assert_held (& tsk -> sighand -> siglock );
474+ __for_each_thread (sig , t )
475+ tick_nohz_kick_task (t );
476+ }
437477}
438478
439479void tick_nohz_dep_clear_signal (struct signal_struct * sig , enum tick_dep_bits bit )
@@ -448,13 +488,10 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
448488 */
449489void __tick_nohz_task_switch (void )
450490{
451- unsigned long flags ;
452491 struct tick_sched * ts ;
453492
454- local_irq_save (flags );
455-
456493 if (!tick_nohz_full_cpu (smp_processor_id ()))
457- goto out ;
494+ return ;
458495
459496 ts = this_cpu_ptr (& tick_cpu_sched );
460497
@@ -463,8 +500,6 @@ void __tick_nohz_task_switch(void)
463500 atomic_read (& current -> signal -> tick_dep_mask ))
464501 tick_nohz_full_kick ();
465502 }
466- out :
467- local_irq_restore (flags );
468503}
469504
470505/* Get the boot-time nohz CPU list from the kernel parameters. */
@@ -922,27 +957,31 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
922957 * Cancel the scheduled timer and restore the tick
923958 */
924959 ts -> tick_stopped = 0 ;
925- ts -> idle_exittime = now ;
926-
927960 tick_nohz_restart (ts , now );
928961}
929962
930- static void tick_nohz_full_update_tick (struct tick_sched * ts )
963+ static void __tick_nohz_full_update_tick (struct tick_sched * ts ,
964+ ktime_t now )
931965{
932966#ifdef CONFIG_NO_HZ_FULL
933967 int cpu = smp_processor_id ();
934968
935- if (!tick_nohz_full_cpu (cpu ))
969+ if (can_stop_full_tick (cpu , ts ))
970+ tick_nohz_stop_sched_tick (ts , cpu );
971+ else if (ts -> tick_stopped )
972+ tick_nohz_restart_sched_tick (ts , now );
973+ #endif
974+ }
975+
976+ static void tick_nohz_full_update_tick (struct tick_sched * ts )
977+ {
978+ if (!tick_nohz_full_cpu (smp_processor_id ()))
936979 return ;
937980
938981 if (!ts -> tick_stopped && ts -> nohz_mode == NOHZ_MODE_INACTIVE )
939982 return ;
940983
941- if (can_stop_full_tick (cpu , ts ))
942- tick_nohz_stop_sched_tick (ts , cpu );
943- else if (ts -> tick_stopped )
944- tick_nohz_restart_sched_tick (ts , ktime_get ());
945- #endif
984+ __tick_nohz_full_update_tick (ts , ktime_get ());
946985}
947986
948987static bool can_stop_idle_tick (int cpu , struct tick_sched * ts )
@@ -1189,11 +1228,13 @@ unsigned long tick_nohz_get_idle_calls(void)
11891228 return ts -> idle_calls ;
11901229}
11911230
1192- static void tick_nohz_account_idle_ticks (struct tick_sched * ts )
1231+ static void tick_nohz_account_idle_time (struct tick_sched * ts ,
1232+ ktime_t now )
11931233{
1194- #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
11951234 unsigned long ticks ;
11961235
1236+ ts -> idle_exittime = now ;
1237+
11971238 if (vtime_accounting_enabled_this_cpu ())
11981239 return ;
11991240 /*
@@ -1207,21 +1248,27 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
12071248 */
12081249 if (ticks && ticks < LONG_MAX )
12091250 account_idle_ticks (ticks );
1210- #endif
12111251}
12121252
1213- static void __tick_nohz_idle_restart_tick ( struct tick_sched * ts , ktime_t now )
1253+ void tick_nohz_idle_restart_tick ( void )
12141254{
1215- tick_nohz_restart_sched_tick (ts , now );
1216- tick_nohz_account_idle_ticks (ts );
1255+ struct tick_sched * ts = this_cpu_ptr (& tick_cpu_sched );
1256+
1257+ if (ts -> tick_stopped ) {
1258+ ktime_t now = ktime_get ();
1259+ tick_nohz_restart_sched_tick (ts , now );
1260+ tick_nohz_account_idle_time (ts , now );
1261+ }
12171262}
12181263
1219- void tick_nohz_idle_restart_tick ( void )
1264+ static void tick_nohz_idle_update_tick ( struct tick_sched * ts , ktime_t now )
12201265{
1221- struct tick_sched * ts = this_cpu_ptr (& tick_cpu_sched );
1266+ if (tick_nohz_full_cpu (smp_processor_id ()))
1267+ __tick_nohz_full_update_tick (ts , now );
1268+ else
1269+ tick_nohz_restart_sched_tick (ts , now );
12221270
1223- if (ts -> tick_stopped )
1224- __tick_nohz_idle_restart_tick (ts , ktime_get ());
1271+ tick_nohz_account_idle_time (ts , now );
12251272}
12261273
12271274/**
@@ -1253,7 +1300,7 @@ void tick_nohz_idle_exit(void)
12531300 tick_nohz_stop_idle (ts , now );
12541301
12551302 if (tick_stopped )
1256- __tick_nohz_idle_restart_tick (ts , now );
1303+ tick_nohz_idle_update_tick (ts , now );
12571304
12581305 local_irq_enable ();
12591306}
0 commit comments