@@ -1306,10 +1306,10 @@ static void set_load_weight(struct task_struct *p, bool update_load)
13061306static DEFINE_MUTEX (uclamp_mutex );
13071307
13081308/* Max allowed minimum utilization */
1309- static unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE ;
1309+ static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE ;
13101310
13111311/* Max allowed maximum utilization */
1312- static unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE ;
1312+ static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE ;
13131313
13141314/*
13151315 * By default RT tasks run at the maximum performance point/capacity of the
@@ -1456,33 +1456,6 @@ static void uclamp_update_util_min_rt_default(struct task_struct *p)
14561456 task_rq_unlock (rq , p , & rf );
14571457}
14581458
1459- static void uclamp_sync_util_min_rt_default (void )
1460- {
1461- struct task_struct * g , * p ;
1462-
1463- /*
1464- * copy_process() sysctl_uclamp
1465- * uclamp_min_rt = X;
1466- * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1467- * // link thread smp_mb__after_spinlock()
1468- * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1469- * sched_post_fork() for_each_process_thread()
1470- * __uclamp_sync_rt() __uclamp_sync_rt()
1471- *
1472- * Ensures that either sched_post_fork() will observe the new
1473- * uclamp_min_rt or for_each_process_thread() will observe the new
1474- * task.
1475- */
1476- read_lock (& tasklist_lock );
1477- smp_mb__after_spinlock ();
1478- read_unlock (& tasklist_lock );
1479-
1480- rcu_read_lock ();
1481- for_each_process_thread (g , p )
1482- uclamp_update_util_min_rt_default (p );
1483- rcu_read_unlock ();
1484- }
1485-
14861459static inline struct uclamp_se
14871460uclamp_tg_restrict (struct task_struct * p , enum uclamp_id clamp_id )
14881461{
@@ -1762,6 +1735,11 @@ uclamp_update_active_tasks(struct cgroup_subsys_state *css)
17621735}
17631736
17641737static void cpu_util_update_eff (struct cgroup_subsys_state * css );
1738+ #endif
1739+
1740+ #ifdef CONFIG_SYSCTL
1741+ #ifdef CONFIG_UCLAMP_TASK
1742+ #ifdef CONFIG_UCLAMP_TASK_GROUP
17651743static void uclamp_update_root_tg (void )
17661744{
17671745 struct task_group * tg = & root_task_group ;
@@ -1779,6 +1757,33 @@ static void uclamp_update_root_tg(void)
17791757static void uclamp_update_root_tg (void ) { }
17801758#endif
17811759
1760+ static void uclamp_sync_util_min_rt_default (void )
1761+ {
1762+ struct task_struct * g , * p ;
1763+
1764+ /*
1765+ * copy_process() sysctl_uclamp
1766+ * uclamp_min_rt = X;
1767+ * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1768+ * // link thread smp_mb__after_spinlock()
1769+ * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1770+ * sched_post_fork() for_each_process_thread()
1771+ * __uclamp_sync_rt() __uclamp_sync_rt()
1772+ *
1773+ * Ensures that either sched_post_fork() will observe the new
1774+ * uclamp_min_rt or for_each_process_thread() will observe the new
1775+ * task.
1776+ */
1777+ read_lock (& tasklist_lock );
1778+ smp_mb__after_spinlock ();
1779+ read_unlock (& tasklist_lock );
1780+
1781+ rcu_read_lock ();
1782+ for_each_process_thread (g , p )
1783+ uclamp_update_util_min_rt_default (p );
1784+ rcu_read_unlock ();
1785+ }
1786+
17821787static int sysctl_sched_uclamp_handler (struct ctl_table * table , int write ,
17831788 void * buffer , size_t * lenp , loff_t * ppos )
17841789{
@@ -1843,6 +1848,8 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
18431848
18441849 return result ;
18451850}
1851+ #endif
1852+ #endif
18461853
18471854static int uclamp_validate (struct task_struct * p ,
18481855 const struct sched_attr * attr )
0 commit comments