@@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
375375static int worker_thread (void * __worker );
376376static void workqueue_sysfs_unregister (struct workqueue_struct * wq );
377377static void show_pwq (struct pool_workqueue * pwq );
378+ static void show_one_worker_pool (struct worker_pool * pool );
378379
379380#define CREATE_TRACE_POINTS
380381#include <trace/events/workqueue.h>
@@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
44474448 raw_spin_unlock_irq (& pwq -> pool -> lock );
44484449 mutex_unlock (& wq -> mutex );
44494450 mutex_unlock (& wq_pool_mutex );
4450- show_workqueue_state ( );
4451+ show_one_workqueue ( wq );
44514452 return ;
44524453 }
44534454 raw_spin_unlock_irq (& pwq -> pool -> lock );
@@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq)
47974798}
47984799
47994800/**
4800- * show_workqueue_state - dump workqueue state
4801- *
4802- * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4803- * all busy workqueues and pools.
4801+ * show_one_workqueue - dump state of specified workqueue
4802+ * @wq: workqueue whose state will be printed
48044803 */
4805- void show_workqueue_state ( void )
4804+ void show_one_workqueue ( struct workqueue_struct * wq )
48064805{
4807- struct workqueue_struct * wq ;
4808- struct worker_pool * pool ;
4806+ struct pool_workqueue * pwq ;
4807+ bool idle = true ;
48094808 unsigned long flags ;
4810- int pi ;
4811-
4812- rcu_read_lock ();
4813-
4814- pr_info ("Showing busy workqueues and worker pools:\n" );
4815-
4816- list_for_each_entry_rcu (wq , & workqueues , list ) {
4817- struct pool_workqueue * pwq ;
4818- bool idle = true;
48194809
4820- for_each_pwq (pwq , wq ) {
4821- if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4822- idle = false;
4823- break ;
4824- }
4810+ for_each_pwq (pwq , wq ) {
4811+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4812+ idle = false;
4813+ break ;
48254814 }
4826- if (idle )
4827- continue ;
4815+ }
4816+ if (idle ) /* Nothing to print for idle workqueue */
4817+ return ;
48284818
4829- pr_info ("workqueue %s: flags=0x%x\n" , wq -> name , wq -> flags );
4819+ pr_info ("workqueue %s: flags=0x%x\n" , wq -> name , wq -> flags );
48304820
4831- for_each_pwq (pwq , wq ) {
4832- raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4833- if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
4834- /*
4835- * Defer printing to avoid deadlocks in console
4836- * drivers that queue work while holding locks
4837- * also taken in their write paths.
4838- */
4839- printk_deferred_enter ();
4840- show_pwq (pwq );
4841- printk_deferred_exit ();
4842- }
4843- raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
4821+ for_each_pwq (pwq , wq ) {
4822+ raw_spin_lock_irqsave (& pwq -> pool -> lock , flags );
4823+ if (pwq -> nr_active || !list_empty (& pwq -> inactive_works )) {
48444824 /*
4845- * We could be printing a lot from atomic context, e.g.
4846- * sysrq-t -> show_workqueue_state(). Avoid triggering
4847- * hard lockup .
4825+ * Defer printing to avoid deadlocks in console
4826+ * drivers that queue work while holding locks
4827+ * also taken in their write paths .
48484828 */
4849- touch_nmi_watchdog ();
4850- }
4851- }
4852-
4853- for_each_pool (pool , pi ) {
4854- struct worker * worker ;
4855- bool first = true;
4856-
4857- raw_spin_lock_irqsave (& pool -> lock , flags );
4858- if (pool -> nr_workers == pool -> nr_idle )
4859- goto next_pool ;
4860- /*
4861- * Defer printing to avoid deadlocks in console drivers that
4862- * queue work while holding locks also taken in their write
4863- * paths.
4864- */
4865- printk_deferred_enter ();
4866- pr_info ("pool %d:" , pool -> id );
4867- pr_cont_pool_info (pool );
4868- pr_cont (" hung=%us workers=%d" ,
4869- jiffies_to_msecs (jiffies - pool -> watchdog_ts ) / 1000 ,
4870- pool -> nr_workers );
4871- if (pool -> manager )
4872- pr_cont (" manager: %d" ,
4873- task_pid_nr (pool -> manager -> task ));
4874- list_for_each_entry (worker , & pool -> idle_list , entry ) {
4875- pr_cont (" %s%d" , first ? "idle: " : "" ,
4876- task_pid_nr (worker -> task ));
4877- first = false;
4829+ printk_deferred_enter ();
4830+ show_pwq (pwq );
4831+ printk_deferred_exit ();
48784832 }
4879- pr_cont ("\n" );
4880- printk_deferred_exit ();
4881- next_pool :
4882- raw_spin_unlock_irqrestore (& pool -> lock , flags );
4833+ raw_spin_unlock_irqrestore (& pwq -> pool -> lock , flags );
48834834 /*
48844835 * We could be printing a lot from atomic context, e.g.
4885- * sysrq-t -> show_workqueue_state (). Avoid triggering
4836+ * sysrq-t -> show_all_workqueues (). Avoid triggering
48864837 * hard lockup.
48874838 */
48884839 touch_nmi_watchdog ();
48894840 }
48904841
4842+ }
4843+
4844+ /**
4845+ * show_one_worker_pool - dump state of specified worker pool
4846+ * @pool: worker pool whose state will be printed
4847+ */
4848+ static void show_one_worker_pool (struct worker_pool * pool )
4849+ {
4850+ struct worker * worker ;
4851+ bool first = true;
4852+ unsigned long flags ;
4853+
4854+ raw_spin_lock_irqsave (& pool -> lock , flags );
4855+ if (pool -> nr_workers == pool -> nr_idle )
4856+ goto next_pool ;
4857+ /*
4858+ * Defer printing to avoid deadlocks in console drivers that
4859+ * queue work while holding locks also taken in their write
4860+ * paths.
4861+ */
4862+ printk_deferred_enter ();
4863+ pr_info ("pool %d:" , pool -> id );
4864+ pr_cont_pool_info (pool );
4865+ pr_cont (" hung=%us workers=%d" ,
4866+ jiffies_to_msecs (jiffies - pool -> watchdog_ts ) / 1000 ,
4867+ pool -> nr_workers );
4868+ if (pool -> manager )
4869+ pr_cont (" manager: %d" ,
4870+ task_pid_nr (pool -> manager -> task ));
4871+ list_for_each_entry (worker , & pool -> idle_list , entry ) {
4872+ pr_cont (" %s%d" , first ? "idle: " : "" ,
4873+ task_pid_nr (worker -> task ));
4874+ first = false;
4875+ }
4876+ pr_cont ("\n" );
4877+ printk_deferred_exit ();
4878+ next_pool :
4879+ raw_spin_unlock_irqrestore (& pool -> lock , flags );
4880+ /*
4881+ * We could be printing a lot from atomic context, e.g.
4882+ * sysrq-t -> show_all_workqueues(). Avoid triggering
4883+ * hard lockup.
4884+ */
4885+ touch_nmi_watchdog ();
4886+
4887+ }
4888+
4889+ /**
4890+ * show_all_workqueues - dump workqueue state
4891+ *
4892+ * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4893+ * all busy workqueues and pools.
4894+ */
4895+ void show_all_workqueues (void )
4896+ {
4897+ struct workqueue_struct * wq ;
4898+ struct worker_pool * pool ;
4899+ int pi ;
4900+
4901+ rcu_read_lock ();
4902+
4903+ pr_info ("Showing busy workqueues and worker pools:\n" );
4904+
4905+ list_for_each_entry_rcu (wq , & workqueues , list )
4906+ show_one_workqueue (wq );
4907+
4908+ for_each_pool (pool , pi )
4909+ show_one_worker_pool (pool );
4910+
48914911 rcu_read_unlock ();
48924912}
48934913
@@ -5384,16 +5404,22 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
53845404 int ret = - EINVAL ;
53855405 cpumask_var_t saved_cpumask ;
53865406
5387- if (!zalloc_cpumask_var (& saved_cpumask , GFP_KERNEL ))
5388- return - ENOMEM ;
5389-
53905407 /*
53915408 * Not excluding isolated cpus on purpose.
53925409 * If the user wishes to include them, we allow that.
53935410 */
53945411 cpumask_and (cpumask , cpumask , cpu_possible_mask );
53955412 if (!cpumask_empty (cpumask )) {
53965413 apply_wqattrs_lock ();
5414+ if (cpumask_equal (cpumask , wq_unbound_cpumask )) {
5415+ ret = 0 ;
5416+ goto out_unlock ;
5417+ }
5418+
5419+ if (!zalloc_cpumask_var (& saved_cpumask , GFP_KERNEL )) {
5420+ ret = - ENOMEM ;
5421+ goto out_unlock ;
5422+ }
53975423
53985424 /* save the old wq_unbound_cpumask. */
53995425 cpumask_copy (saved_cpumask , wq_unbound_cpumask );
@@ -5406,10 +5432,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
54065432 if (ret < 0 )
54075433 cpumask_copy (wq_unbound_cpumask , saved_cpumask );
54085434
5435+ free_cpumask_var (saved_cpumask );
5436+ out_unlock :
54095437 apply_wqattrs_unlock ();
54105438 }
54115439
5412- free_cpumask_var (saved_cpumask );
54135440 return ret ;
54145441}
54155442
@@ -5869,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
58695896 rcu_read_unlock ();
58705897
58715898 if (lockup_detected )
5872- show_workqueue_state ();
5899+ show_all_workqueues ();
58735900
58745901 wq_watchdog_reset_touched ();
58755902 mod_timer (& wq_watchdog_timer , jiffies + thresh );
0 commit comments