@@ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker)
26952695 WARN_ON_ONCE (set_cpus_allowed_ptr (worker -> task , cpu_possible_mask ) < 0 );
26962696}
26972697
2698+
2699+ static void detach_worker (struct worker * worker )
2700+ {
2701+ lockdep_assert_held (& wq_pool_attach_mutex );
2702+
2703+ unbind_worker (worker );
2704+ list_del (& worker -> node );
2705+ worker -> pool = NULL ;
2706+ }
2707+
26982708/**
26992709 * worker_detach_from_pool() - detach a worker from its pool
27002710 * @worker: worker which is attached to its pool
@@ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker)
27112721 WARN_ON_ONCE (pool -> flags & POOL_BH );
27122722
27132723 mutex_lock (& wq_pool_attach_mutex );
2714-
2715- unbind_worker (worker );
2716- list_del (& worker -> node );
2717- worker -> pool = NULL ;
2718-
2724+ detach_worker (worker );
27192725 mutex_unlock (& wq_pool_attach_mutex );
27202726
27212727 /* clear leftover flags without pool->lock after it is detached */
@@ -2807,24 +2813,12 @@ static struct worker *create_worker(struct worker_pool *pool)
28072813 return NULL ;
28082814}
28092815
2810- static void wake_dying_workers (struct list_head * cull_list )
2816+ static void detach_dying_workers (struct list_head * cull_list )
28112817{
28122818 struct worker * worker ;
28132819
2814- list_for_each_entry (worker , cull_list , entry ) {
2815- unbind_worker (worker );
2816- /*
2817- * If the worker was somehow already running, then it had to be
2818- * in pool->idle_list when set_worker_dying() happened or we
2819- * wouldn't have gotten here.
2820- *
2821- * Thus, the worker must either have observed the WORKER_DIE
2822- * flag, or have set its state to TASK_IDLE. Either way, the
2823- * below will be observed by the worker and is safe to do
2824- * outside of pool->lock.
2825- */
2826- wake_up_process (worker -> task );
2827- }
2820+ list_for_each_entry (worker , cull_list , entry )
2821+ detach_worker (worker );
28282822}
28292823
28302824static void reap_dying_workers (struct list_head * cull_list )
@@ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work)
29302924
29312925 /*
29322926 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2933- * cannot proceed beyong worker_detach_from_pool () in its self-destruct
2934- * path. This is required as a previously-preempted worker could run after
2935- * set_worker_dying() has happened but before wake_dying_workers () did.
2927+ * cannot proceed beyong set_pf_worker () in its self-destruct path.
2928+ * This is required as a previously-preempted worker could run after
2929+ * set_worker_dying() has happened but before detach_dying_workers () did.
29362930 */
29372931 mutex_lock (& wq_pool_attach_mutex );
29382932 raw_spin_lock_irq (& pool -> lock );
@@ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work)
29532947 }
29542948
29552949 raw_spin_unlock_irq (& pool -> lock );
2956- wake_dying_workers (& cull_list );
2950+ detach_dying_workers (& cull_list );
29572951 mutex_unlock (& wq_pool_attach_mutex );
29582952
29592953 reap_dying_workers (& cull_list );
@@ -3336,7 +3330,6 @@ static int worker_thread(void *__worker)
33363330
33373331 set_task_comm (worker -> task , "kworker/dying" );
33383332 ida_free (& pool -> worker_ida , worker -> id );
3339- worker_detach_from_pool (worker );
33403333 WARN_ON_ONCE (!list_empty (& worker -> entry ));
33413334 return 0 ;
33423335 }
@@ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool)
49214914 WARN_ON (pool -> nr_workers || pool -> nr_idle );
49224915 raw_spin_unlock_irq (& pool -> lock );
49234916
4924- wake_dying_workers (& cull_list );
4917+ detach_dying_workers (& cull_list );
49254918
49264919 mutex_unlock (& wq_pool_attach_mutex );
49274920
0 commit comments