@@ -1922,12 +1922,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
19221922 rcu_read_unlock ();
19231923}
19241924
1925- /*
1926- * update_cpumasks_hier() flags
1927- */
1928- #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
1929- #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
1930-
19311925/*
19321926 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
19331927 * @cs: the cpuset to consider
@@ -1942,7 +1936,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
19421936 * Called with cpuset_mutex held
19431937 */
19441938static void update_cpumasks_hier (struct cpuset * cs , struct tmpmasks * tmp ,
1945- int flags )
1939+ bool force )
19461940{
19471941 struct cpuset * cp ;
19481942 struct cgroup_subsys_state * pos_css ;
@@ -2007,10 +2001,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
20072001 * Skip the whole subtree if
20082002 * 1) the cpumask remains the same,
20092003 * 2) has no partition root state,
2010- * 3) HIER_CHECKALL flag not set, and
2004+ * 3) force flag not set, and
20112005 * 4) for v2 load balance state same as its parent.
20122006 */
2013- if (!cp -> partition_root_state && !( flags & HIER_CHECKALL ) &&
2007+ if (!cp -> partition_root_state && !force &&
20142008 cpumask_equal (tmp -> new_cpus , cp -> effective_cpus ) &&
20152009 (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) ||
20162010 (is_sched_load_balance (parent ) == is_sched_load_balance (cp )))) {
@@ -2112,8 +2106,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
21122106 }
21132107 rcu_read_unlock ();
21142108
2115- if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD ) &&
2116- !force_sd_rebuild )
2109+ if (need_rebuild_sched_domains && !force_sd_rebuild )
21172110 rebuild_sched_domains_locked ();
21182111}
21192112
@@ -2141,9 +2134,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
21412134 * directly.
21422135 *
21432136 * The update_cpumasks_hier() function may sleep. So we have to
2144- * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
2145- * flag is used to suppress rebuild of sched domains as the callers
2146- * will take care of that.
2137+ * release the RCU read lock before calling it.
21472138 */
21482139 rcu_read_lock ();
21492140 cpuset_for_each_child (sibling , pos_css , parent ) {
@@ -2159,7 +2150,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
21592150 continue ;
21602151
21612152 rcu_read_unlock ();
2162- update_cpumasks_hier (sibling , tmp , HIER_NO_SD_REBUILD );
2153+ update_cpumasks_hier (sibling , tmp , false );
21632154 rcu_read_lock ();
21642155 css_put (& sibling -> css );
21652156 }
@@ -2179,7 +2170,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
21792170 struct tmpmasks tmp ;
21802171 struct cpuset * parent = parent_cs (cs );
21812172 bool invalidate = false;
2182- int hier_flags = 0 ;
2173+ bool force = false ;
21832174 int old_prs = cs -> partition_root_state ;
21842175
21852176 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
@@ -2240,8 +2231,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
22402231 * Check all the descendants in update_cpumasks_hier() if
22412232 * effective_xcpus is to be changed.
22422233 */
2243- if (!cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus ))
2244- hier_flags = HIER_CHECKALL ;
2234+ force = !cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus );
22452235
22462236 retval = validate_change (cs , trialcs );
22472237
@@ -2309,7 +2299,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
23092299 spin_unlock_irq (& callback_lock );
23102300
23112301 /* effective_cpus/effective_xcpus will be updated here */
2312- update_cpumasks_hier (cs , & tmp , hier_flags );
2302+ update_cpumasks_hier (cs , & tmp , force );
23132303
23142304 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
23152305 if (cs -> partition_root_state )
@@ -2334,7 +2324,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
23342324 struct tmpmasks tmp ;
23352325 struct cpuset * parent = parent_cs (cs );
23362326 bool invalidate = false;
2337- int hier_flags = 0 ;
2327+ bool force = false ;
23382328 int old_prs = cs -> partition_root_state ;
23392329
23402330 if (!* buf ) {
@@ -2357,8 +2347,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
23572347 * Check all the descendants in update_cpumasks_hier() if
23582348 * effective_xcpus is to be changed.
23592349 */
2360- if (!cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus ))
2361- hier_flags = HIER_CHECKALL ;
2350+ force = !cpumask_equal (cs -> effective_xcpus , trialcs -> effective_xcpus );
23622351
23632352 retval = validate_change (cs , trialcs );
23642353 if (retval )
@@ -2411,8 +2400,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
24112400 * of the subtree when it is a valid partition root or effective_xcpus
24122401 * is updated.
24132402 */
2414- if (is_partition_valid (cs ) || hier_flags )
2415- update_cpumasks_hier (cs , & tmp , hier_flags );
2403+ if (is_partition_valid (cs ) || force )
2404+ update_cpumasks_hier (cs , & tmp , force );
24162405
24172406 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
24182407 if (cs -> partition_root_state )
@@ -2853,7 +2842,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
28532842 update_unbound_workqueue_cpumask (new_xcpus_state );
28542843
28552844 /* Force update if switching back to member */
2856- update_cpumasks_hier (cs , & tmpmask , !new_prs ? HIER_CHECKALL : 0 );
2845+ update_cpumasks_hier (cs , & tmpmask , !new_prs );
28572846
28582847 /* Update sched domains and load balance flag */
28592848 update_partition_sd_lb (cs , old_prs );
0 commit comments