@@ -5108,6 +5108,19 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
51085108 return pwq ;
51095109}
51105110
5111+ static void apply_wqattrs_lock (void )
5112+ {
5113+ /* CPUs should stay stable across pwq creations and installations */
5114+ cpus_read_lock ();
5115+ mutex_lock (& wq_pool_mutex );
5116+ }
5117+
5118+ static void apply_wqattrs_unlock (void )
5119+ {
5120+ mutex_unlock (& wq_pool_mutex );
5121+ cpus_read_unlock ();
5122+ }
5123+
51115124/**
51125125 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
51135126 * @attrs: the wq_attrs of the default pwq of the target workqueue
@@ -5419,6 +5432,9 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
54195432 bool highpri = wq -> flags & WQ_HIGHPRI ;
54205433 int cpu , ret ;
54215434
5435+ lockdep_assert_cpus_held ();
5436+ lockdep_assert_held (& wq_pool_mutex );
5437+
54225438 wq -> cpu_pwq = alloc_percpu (struct pool_workqueue * );
54235439 if (!wq -> cpu_pwq )
54245440 goto enomem ;
@@ -5452,20 +5468,18 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
54525468 return 0 ;
54535469 }
54545470
5455- cpus_read_lock ();
54565471 if (wq -> flags & __WQ_ORDERED ) {
54575472 struct pool_workqueue * dfl_pwq ;
54585473
5459- ret = apply_workqueue_attrs (wq , ordered_wq_attrs [highpri ]);
5474+ ret = apply_workqueue_attrs_locked (wq , ordered_wq_attrs [highpri ]);
54605475 /* there should only be single pwq for ordering guarantee */
54615476 dfl_pwq = rcu_access_pointer (wq -> dfl_pwq );
54625477 WARN (!ret && (wq -> pwqs .next != & dfl_pwq -> pwqs_node ||
54635478 wq -> pwqs .prev != & dfl_pwq -> pwqs_node ),
54645479 "ordering guarantee broken for workqueue %s\n" , wq -> name );
54655480 } else {
5466- ret = apply_workqueue_attrs (wq , unbound_std_wq_attrs [highpri ]);
5481+ ret = apply_workqueue_attrs_locked (wq , unbound_std_wq_attrs [highpri ]);
54675482 }
5468- cpus_read_unlock ();
54695483
54705484 return ret ;
54715485
@@ -5672,23 +5686,23 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
56725686 goto err_unreg_lockdep ;
56735687 }
56745688
5675- if (alloc_and_link_pwqs (wq ) < 0 )
5676- goto err_free_node_nr_active ;
5677-
56785689 /*
5679- * wq_pool_mutex protects global freeze state and workqueues list.
5680- * Grab it, adjust max_active and add the new @wq to workqueues
5681- * list .
5690+ * wq_pool_mutex protects the workqueues list, allocations of PWQs,
5691+ * and the global freeze state. alloc_and_link_pwqs() also requires
5692+ * cpus_read_lock() for PWQs' affinities .
56825693 */
5683- mutex_lock (& wq_pool_mutex );
5694+ apply_wqattrs_lock ();
5695+
5696+ if (alloc_and_link_pwqs (wq ) < 0 )
5697+ goto err_unlock_free_node_nr_active ;
56845698
56855699 mutex_lock (& wq -> mutex );
56865700 wq_adjust_max_active (wq );
56875701 mutex_unlock (& wq -> mutex );
56885702
56895703 list_add_tail_rcu (& wq -> list , & workqueues );
56905704
5691- mutex_unlock ( & wq_pool_mutex );
5705+ apply_wqattrs_unlock ( );
56925706
56935707 if (wq_online && init_rescuer (wq ) < 0 )
56945708 goto err_destroy ;
@@ -5698,7 +5712,8 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
56985712
56995713 return wq ;
57005714
5701- err_free_node_nr_active :
5715+ err_unlock_free_node_nr_active :
5716+ apply_wqattrs_unlock ();
57025717 /*
57035718 * Failed alloc_and_link_pwqs() may leave pending pwq->release_work,
57045719 * flushing the pwq_release_worker ensures that the pwq_release_workfn()
@@ -6987,19 +7002,6 @@ static struct attribute *wq_sysfs_attrs[] = {
69877002};
69887003ATTRIBUTE_GROUPS (wq_sysfs );
69897004
6990- static void apply_wqattrs_lock (void )
6991- {
6992- /* CPUs should stay stable across pwq creations and installations */
6993- cpus_read_lock ();
6994- mutex_lock (& wq_pool_mutex );
6995- }
6996-
6997- static void apply_wqattrs_unlock (void )
6998- {
6999- mutex_unlock (& wq_pool_mutex );
7000- cpus_read_unlock ();
7001- }
7002-
70037005static ssize_t wq_nice_show (struct device * dev , struct device_attribute * attr ,
70047006 char * buf )
70057007{
0 commit comments