@@ -147,6 +147,9 @@ enum wq_internal_consts {
147147 *
148148 * WR: wq->mutex protected for writes. RCU protected for reads.
149149 *
150+ * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
151+ * with READ_ONCE() without locking.
152+ *
150153 * MD: wq_mayday_lock protected.
151154 *
152155 * WD: Used internally by the watchdog.
@@ -254,7 +257,6 @@ struct pool_workqueue {
254257 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
255258 */
256259 int nr_active ; /* L: nr of active works */
257- int max_active ; /* L: max active works */
258260 struct list_head inactive_works ; /* L: inactive works */
259261 struct list_head pwqs_node ; /* WR: node on wq->pwqs */
260262 struct list_head mayday_node ; /* MD: node on wq->maydays */
@@ -302,7 +304,8 @@ struct workqueue_struct {
302304 struct worker * rescuer ; /* MD: rescue worker */
303305
304306 int nr_drainers ; /* WQ: drain in progress */
305- int saved_max_active ; /* WQ: saved pwq max_active */
307+ int max_active ; /* WO: max active works */
308+ int saved_max_active ; /* WQ: saved max_active */
306309
307310 struct workqueue_attrs * unbound_attrs ; /* PW: only for unbound wqs */
308311 struct pool_workqueue * dfl_pwq ; /* PW: only for unbound wqs */
@@ -1496,7 +1499,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
14961499 pwq -> nr_active -- ;
14971500 if (!list_empty (& pwq -> inactive_works )) {
14981501 /* one down, submit an inactive one */
1499- if (pwq -> nr_active < pwq -> max_active )
1502+ if (pwq -> nr_active < READ_ONCE ( pwq -> wq -> max_active ) )
15001503 pwq_activate_first_inactive (pwq );
15011504 }
15021505 }
@@ -1797,7 +1800,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
17971800 pwq -> nr_in_flight [pwq -> work_color ]++ ;
17981801 work_flags = work_color_to_flags (pwq -> work_color );
17991802
1800- if (likely (pwq -> nr_active < pwq -> max_active )) {
1803+ /*
1804+ * Limit the number of concurrently active work items to max_active.
1805+ * @work must also queue behind existing inactive work items to maintain
1806+ * ordering when max_active changes. See wq_adjust_max_active().
1807+ */
1808+ if (list_empty (& pwq -> inactive_works ) &&
1809+ pwq -> nr_active < READ_ONCE (pwq -> wq -> max_active )) {
18011810 if (list_empty (& pool -> worklist ))
18021811 pool -> watchdog_ts = jiffies ;
18031812
@@ -4146,50 +4155,6 @@ static void pwq_release_workfn(struct kthread_work *work)
41464155 }
41474156}
41484157
4149- /**
4150- * pwq_adjust_max_active - update a pwq's max_active to the current setting
4151- * @pwq: target pool_workqueue
4152- *
4153- * If @pwq isn't freezing, set @pwq->max_active to the associated
4154- * workqueue's saved_max_active and activate inactive work items
4155- * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
4156- */
4157- static void pwq_adjust_max_active (struct pool_workqueue * pwq )
4158- {
4159- struct workqueue_struct * wq = pwq -> wq ;
4160- bool freezable = wq -> flags & WQ_FREEZABLE ;
4161- unsigned long flags ;
4162-
4163- /* for @wq->saved_max_active */
4164- lockdep_assert_held (& wq -> mutex );
4165-
4166- /* fast exit for non-freezable wqs */
4167- if (!freezable && pwq -> max_active == wq -> saved_max_active )
4168- return ;
4169-
4170- /* this function can be called during early boot w/ irq disabled */
4171- raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4172-
4173- /*
4174- * During [un]freezing, the caller is responsible for ensuring that
4175- * this function is called at least once after @workqueue_freezing
4176- * is updated and visible.
4177- */
4178- if (!freezable || !workqueue_freezing ) {
4179- pwq -> max_active = wq -> saved_max_active ;
4180-
4181- while (!list_empty (& pwq -> inactive_works ) &&
4182- pwq -> nr_active < pwq -> max_active )
4183- pwq_activate_first_inactive (pwq );
4184-
4185- kick_pool (pwq -> pool );
4186- } else {
4187- pwq -> max_active = 0 ;
4188- }
4189-
4190- raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
4191- }
4192-
41934158/* initialize newly allocated @pwq which is associated with @wq and @pool */
41944159static void init_pwq (struct pool_workqueue * pwq , struct workqueue_struct * wq ,
41954160 struct worker_pool * pool )
@@ -4222,9 +4187,6 @@ static void link_pwq(struct pool_workqueue *pwq)
42224187 /* set the matching work_color */
42234188 pwq -> work_color = wq -> work_color ;
42244189
4225- /* sync max_active to the current setting */
4226- pwq_adjust_max_active (pwq );
4227-
42284190 /* link in @pwq */
42294191 list_add_rcu (& pwq -> pwqs_node , & wq -> pwqs );
42304192}
@@ -4665,14 +4627,59 @@ static int init_rescuer(struct workqueue_struct *wq)
46654627 return 0 ;
46664628}
46674629
4630+ /**
4631+ * wq_adjust_max_active - update a wq's max_active to the current setting
4632+ * @wq: target workqueue
4633+ *
4634+ * If @wq isn't freezing, set @wq->max_active to the saved_max_active and
4635+ * activate inactive work items accordingly. If @wq is freezing, clear
4636+ * @wq->max_active to zero.
4637+ */
4638+ static void wq_adjust_max_active (struct workqueue_struct * wq )
4639+ {
4640+ struct pool_workqueue * pwq ;
4641+
4642+ lockdep_assert_held (& wq -> mutex );
4643+
4644+ if ((wq -> flags & WQ_FREEZABLE ) && workqueue_freezing ) {
4645+ WRITE_ONCE (wq -> max_active , 0 );
4646+ return ;
4647+ }
4648+
4649+ if (wq -> max_active == wq -> saved_max_active )
4650+ return ;
4651+
4652+ /*
4653+ * Update @wq->max_active and then kick inactive work items if more
4654+ * active work items are allowed. This doesn't break work item ordering
4655+ * because new work items are always queued behind existing inactive
4656+ * work items if there are any.
4657+ */
4658+ WRITE_ONCE (wq -> max_active , wq -> saved_max_active );
4659+
4660+ for_each_pwq (pwq , wq ) {
4661+ unsigned long flags ;
4662+
4663+ /* this function can be called during early boot w/ irq disabled */
4664+ raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4665+
4666+ while (!list_empty (& pwq -> inactive_works ) &&
4667+ pwq -> nr_active < wq -> max_active )
4668+ pwq_activate_first_inactive (pwq );
4669+
4670+ kick_pool (pwq -> pool );
4671+
4672+ raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
4673+ }
4674+ }
4675+
46684676__printf (1 , 4 )
46694677struct workqueue_struct * alloc_workqueue (const char * fmt ,
46704678 unsigned int flags ,
46714679 int max_active , ...)
46724680{
46734681 va_list args ;
46744682 struct workqueue_struct * wq ;
4675- struct pool_workqueue * pwq ;
46764683 int len ;
46774684
46784685 /*
@@ -4711,6 +4718,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
47114718
47124719 /* init wq */
47134720 wq -> flags = flags ;
4721+ wq -> max_active = max_active ;
47144722 wq -> saved_max_active = max_active ;
47154723 mutex_init (& wq -> mutex );
47164724 atomic_set (& wq -> nr_pwqs_to_flush , 0 );
@@ -4739,8 +4747,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
47394747 mutex_lock (& wq_pool_mutex );
47404748
47414749 mutex_lock (& wq -> mutex );
4742- for_each_pwq (pwq , wq )
4743- pwq_adjust_max_active (pwq );
4750+ wq_adjust_max_active (wq );
47444751 mutex_unlock (& wq -> mutex );
47454752
47464753 list_add_tail_rcu (& wq -> list , & workqueues );
@@ -4878,8 +4885,6 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
48784885 */
48794886void workqueue_set_max_active (struct workqueue_struct * wq , int max_active )
48804887{
4881- struct pool_workqueue * pwq ;
4882-
48834888 /* disallow meddling with max_active for ordered workqueues */
48844889 if (WARN_ON (wq -> flags & __WQ_ORDERED_EXPLICIT ))
48854890 return ;
@@ -4890,9 +4895,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
48904895
48914896 wq -> flags &= ~__WQ_ORDERED ;
48924897 wq -> saved_max_active = max_active ;
4893-
4894- for_each_pwq (pwq , wq )
4895- pwq_adjust_max_active (pwq );
4898+ wq_adjust_max_active (wq );
48964899
48974900 mutex_unlock (& wq -> mutex );
48984901}
@@ -5139,8 +5142,8 @@ static void show_pwq(struct pool_workqueue *pwq)
51395142 pr_info (" pwq %d:" , pool -> id );
51405143 pr_cont_pool_info (pool );
51415144
5142- pr_cont (" active=%d/%d refcnt=%d%s\n" ,
5143- pwq -> nr_active , pwq -> max_active , pwq -> refcnt ,
5145+ pr_cont (" active=%d refcnt=%d%s\n" ,
5146+ pwq -> nr_active , pwq -> refcnt ,
51445147 !list_empty (& pwq -> mayday_node ) ? " MAYDAY" : "" );
51455148
51465149 hash_for_each (pool -> busy_hash , bkt , worker , hentry ) {
@@ -5688,7 +5691,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
56885691void freeze_workqueues_begin (void )
56895692{
56905693 struct workqueue_struct * wq ;
5691- struct pool_workqueue * pwq ;
56925694
56935695 mutex_lock (& wq_pool_mutex );
56945696
@@ -5697,8 +5699,7 @@ void freeze_workqueues_begin(void)
56975699
56985700 list_for_each_entry (wq , & workqueues , list ) {
56995701 mutex_lock (& wq -> mutex );
5700- for_each_pwq (pwq , wq )
5701- pwq_adjust_max_active (pwq );
5702+ wq_adjust_max_active (wq );
57025703 mutex_unlock (& wq -> mutex );
57035704 }
57045705
@@ -5763,7 +5764,6 @@ bool freeze_workqueues_busy(void)
57635764void thaw_workqueues (void )
57645765{
57655766 struct workqueue_struct * wq ;
5766- struct pool_workqueue * pwq ;
57675767
57685768 mutex_lock (& wq_pool_mutex );
57695769
@@ -5775,8 +5775,7 @@ void thaw_workqueues(void)
57755775 /* restore max_active and repopulate worklist */
57765776 list_for_each_entry (wq , & workqueues , list ) {
57775777 mutex_lock (& wq -> mutex );
5778- for_each_pwq (pwq , wq )
5779- pwq_adjust_max_active (pwq );
5778+ wq_adjust_max_active (wq );
57805779 mutex_unlock (& wq -> mutex );
57815780 }
57825781
0 commit comments