Skip to content

Commit 3324b21

Browse files
pdxChenPeter Zijlstra
authored andcommitted
sched/fair: Skip sched_balance_running cmpxchg when balance is not due
The NUMA sched domain sets the SD_SERIALIZE flag by default, allowing only one NUMA load balancing operation to run system-wide at a time. Currently, each sched group leader directly under NUMA domain attempts to acquire the global sched_balance_running flag via cmpxchg() before checking whether load balancing is due or whether it is the designated load balancer for that NUMA domain. On systems with a large number of cores, this causes significant cache contention on the shared sched_balance_running flag. This patch reduces unnecessary cmpxchg() operations by first checking that the balancer is the designated leader for a NUMA domain from should_we_balance(), and the balance interval has expired before trying to acquire sched_balance_running to load balance a NUMA domain. On a 2-socket Granite Rapids system with sub-NUMA clustering enabled, running an OLTP workload, 7.8% of total CPU cycles were previously spent in sched_balance_domain() contending on sched_balance_running before this change. : 104 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) : 105 { : 106 return arch_cmpxchg(&v->counter, old, new); 0.00 : ffffffff81326e6c: xor %eax,%eax 0.00 : ffffffff81326e6e: mov $0x1,%ecx 0.00 : ffffffff81326e73: lock cmpxchg %ecx,0x2394195(%rip) # ffffffff836bb010 <sched_balance_running> : 110 sched_balance_domains(): : 12234 if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1)) 99.39 : ffffffff81326e7b: test %eax,%eax 0.00 : ffffffff81326e7d: jne ffffffff81326e99 <sched_balance_domains+0x209> : 12238 if (time_after_eq(jiffies, sd->last_balance + interval)) { 0.00 : ffffffff81326e7f: mov 0x14e2b3a(%rip),%rax # ffffffff828099c0 <jiffies_64> 0.00 : ffffffff81326e86: sub 0x48(%r14),%rax 0.00 : ffffffff81326e8a: cmp %rdx,%rax After applying this fix, sched_balance_domain() is gone from the profile and there is a 5% throughput improvement. [peterz: made it so that redo retains the 'lock' and split out the CPU_NEWLY_IDLE change to a separate patch] Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Chen Yu <yu.c.chen@intel.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com> Reviewed-by: Srikar Dronamraju <srikar@linux.ibm.com> Tested-by: Mohini Narkhede <mohini.narkhede@intel.com> Tested-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://patch.msgid.link/6fed119b723c71552943bfe5798c93851b30a361.1762800251.git.tim.c.chen@linux.intel.com
1 parent 65177ea commit 3324b21

1 file changed

Lines changed: 28 additions & 26 deletions

File tree

kernel/sched/fair.c

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -11680,6 +11680,21 @@ static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd
1168011680
}
1168111681
}
1168211682

11683+
/*
11684+
* This flag serializes load-balancing passes over large domains
11685+
* (above the NODE topology level) - only one load-balancing instance
11686+
* may run at a time, to reduce overhead on very large systems with
11687+
* lots of CPUs and large NUMA distances.
11688+
*
11689+
* - Note that load-balancing passes triggered while another one
11690+
* is executing are skipped and not re-tried.
11691+
*
11692+
* - Also note that this does not serialize rebalance_domains()
11693+
* execution, as non-SD_SERIALIZE domains will still be
11694+
* load-balanced in parallel.
11695+
*/
11696+
static atomic_t sched_balance_running = ATOMIC_INIT(0);
11697+
1168311698
/*
1168411699
* Check this_cpu to ensure it is balanced within domain. Attempt to move
1168511700
* tasks if there is an imbalance.
@@ -11705,6 +11720,7 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
1170511720
.fbq_type = all,
1170611721
.tasks = LIST_HEAD_INIT(env.tasks),
1170711722
};
11723+
bool need_unlock = false;
1170811724

1170911725
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
1171011726

@@ -11716,6 +11732,14 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
1171611732
goto out_balanced;
1171711733
}
1171811734

11735+
if (!need_unlock && (sd->flags & SD_SERIALIZE) && idle != CPU_NEWLY_IDLE) {
11736+
int zero = 0;
11737+
if (!atomic_try_cmpxchg_acquire(&sched_balance_running, &zero, 1))
11738+
goto out_balanced;
11739+
11740+
need_unlock = true;
11741+
}
11742+
1171911743
group = sched_balance_find_src_group(&env);
1172011744
if (!group) {
1172111745
schedstat_inc(sd->lb_nobusyg[idle]);
@@ -11956,6 +11980,9 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
1195611980
sd->balance_interval < sd->max_interval)
1195711981
sd->balance_interval *= 2;
1195811982
out:
11983+
if (need_unlock)
11984+
atomic_set_release(&sched_balance_running, 0);
11985+
1195911986
return ld_moved;
1196011987
}
1196111988

@@ -12080,21 +12107,6 @@ static int active_load_balance_cpu_stop(void *data)
1208012107
return 0;
1208112108
}
1208212109

12083-
/*
12084-
* This flag serializes load-balancing passes over large domains
12085-
* (above the NODE topology level) - only one load-balancing instance
12086-
* may run at a time, to reduce overhead on very large systems with
12087-
* lots of CPUs and large NUMA distances.
12088-
*
12089-
* - Note that load-balancing passes triggered while another one
12090-
* is executing are skipped and not re-tried.
12091-
*
12092-
* - Also note that this does not serialize rebalance_domains()
12093-
* execution, as non-SD_SERIALIZE domains will still be
12094-
* load-balanced in parallel.
12095-
*/
12096-
static atomic_t sched_balance_running = ATOMIC_INIT(0);
12097-
1209812110
/*
1209912111
* Scale the max sched_balance_rq interval with the number of CPUs in the system.
1210012112
* This trades load-balance latency on larger machines for less cross talk.
@@ -12150,7 +12162,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
1215012162
/* Earliest time when we have to do rebalance again */
1215112163
unsigned long next_balance = jiffies + 60*HZ;
1215212164
int update_next_balance = 0;
12153-
int need_serialize, need_decay = 0;
12165+
int need_decay = 0;
1215412166
u64 max_cost = 0;
1215512167

1215612168
rcu_read_lock();
@@ -12174,13 +12186,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
1217412186
}
1217512187

1217612188
interval = get_sd_balance_interval(sd, busy);
12177-
12178-
need_serialize = sd->flags & SD_SERIALIZE;
12179-
if (need_serialize) {
12180-
if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
12181-
goto out;
12182-
}
12183-
1218412189
if (time_after_eq(jiffies, sd->last_balance + interval)) {
1218512190
if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
1218612191
/*
@@ -12194,9 +12199,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
1219412199
sd->last_balance = jiffies;
1219512200
interval = get_sd_balance_interval(sd, busy);
1219612201
}
12197-
if (need_serialize)
12198-
atomic_set_release(&sched_balance_running, 0);
12199-
out:
1220012202
if (time_after(next_balance, sd->last_balance + interval)) {
1220112203
next_balance = sd->last_balance + interval;
1220212204
update_next_balance = 1;

0 commit comments

Comments
 (0)