@@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
9696
9797static DEFINE_PER_CPU_SHARED_ALIGNED (struct llist_head , call_single_queue ) ;
9898
99- static void flush_smp_call_function_queue (bool warn_cpu_offline );
99+ static void __flush_smp_call_function_queue (bool warn_cpu_offline );
100100
101101int smpcfd_prepare_cpu (unsigned int cpu )
102102{
@@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu)
141141 * ensure that the outgoing CPU doesn't go offline with work
142142 * still pending.
143143 */
144- flush_smp_call_function_queue (false);
144+ __flush_smp_call_function_queue (false);
145145 irq_work_run ();
146146 return 0 ;
147147}
@@ -541,11 +541,11 @@ void generic_smp_call_function_single_interrupt(void)
541541{
542542 cfd_seq_store (this_cpu_ptr (& cfd_seq_local )-> gotipi , CFD_SEQ_NOCPU ,
543543 smp_processor_id (), CFD_SEQ_GOTIPI );
544- flush_smp_call_function_queue (true);
544+ __flush_smp_call_function_queue (true);
545545}
546546
547547/**
548- * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
548+ * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
549549 *
550550 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
551551 * offline CPU. Skip this check if set to 'false'.
@@ -558,7 +558,7 @@ void generic_smp_call_function_single_interrupt(void)
558558 * Loop through the call_single_queue and run all the queued callbacks.
559559 * Must be called with interrupts disabled.
560560 */
561- static void flush_smp_call_function_queue (bool warn_cpu_offline )
561+ static void __flush_smp_call_function_queue (bool warn_cpu_offline )
562562{
563563 call_single_data_t * csd , * csd_next ;
564564 struct llist_node * entry , * prev ;
@@ -681,7 +681,20 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
681681 smp_processor_id (), CFD_SEQ_HDLEND );
682682}
683683
684- void flush_smp_call_function_from_idle (void )
684+
685+ /**
686+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
687+ * from task context (idle, migration thread)
688+ *
689+ * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
690+ * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
691+ * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
692+ * handle queued SMP function calls before scheduling.
693+ *
694+ * The migration thread has to ensure that an eventually pending wakeup has
695+ * been handled before it migrates a task.
696+ */
697+ void flush_smp_call_function_queue (void )
685698{
686699 unsigned long flags ;
687700
@@ -691,7 +704,7 @@ void flush_smp_call_function_from_idle(void)
691704 cfd_seq_store (this_cpu_ptr (& cfd_seq_local )-> idle , CFD_SEQ_NOCPU ,
692705 smp_processor_id (), CFD_SEQ_IDLE );
693706 local_irq_save (flags );
694- flush_smp_call_function_queue (true);
707+ __flush_smp_call_function_queue (true);
695708 if (local_softirq_pending ())
696709 do_softirq ();
697710
0 commit comments