Skip to content

Commit 7dec062

Browse files
glemcoKAGA-KOKO
authored andcommitted
timers/migration: Exclude isolated cpus from hierarchy
The timer migration mechanism allows active CPUs to pull timers from idle ones to improve the overall idle time. This is however undesired when CPU intensive workloads run on isolated cores, as the algorithm would move the timers from housekeeping to isolated cores, negatively affecting the isolation. Exclude isolated cores from the timer migration algorithm, extend the concept of unavailable cores, currently used for offline ones, to isolated ones: * A core is unavailable if isolated or offline; * A core is available if non isolated and online; A core is considered unavailable as isolated if it belongs to: * the isolcpus (domain) list * an isolated cpuset Except if it is: * in the nohz_full list (already idle for the hierarchy) * the nohz timekeeper core (must be available to handle global timers) CPUs are added to the hierarchy during late boot, excluding isolated ones, the hierarchy is also adapted when the cpuset isolation changes. Due to how the timer migration algorithm works, any CPU part of the hierarchy can have their global timers pulled by remote CPUs and have to pull remote timers, only skipping pulling remote timers would break the logic. For this reason, prevent isolated CPUs from pulling remote global timers, but also the other way around: any global timer started on an isolated CPU will run there. This does not break the concept of isolation (global timers don't come from outside the CPU) and, if considered inappropriate, can usually be mitigated with other isolation techniques (e.g. IRQ pinning). This effect was noticed on a 128 cores machine running oslat on the isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs, and the CPU with lowest count in a timer migration hierarchy (here 1 and 65) appears as always active and continuously pulls global timers, from the housekeeping CPUs. This ends up moving driver work (e.g. delayed work) to isolated CPUs and causes latency spikes: before the change: # oslat -c 1-31,33-63,65-95,97-127 -D 62s ... Maximum: 1203 10 3 4 ... 5 (us) after the change: # oslat -c 1-31,33-63,65-95,97-127 -D 62s ... Maximum: 10 4 3 4 3 ... 5 (us) The same behaviour was observed on a machine with as few as 20 cores / 40 threads with isocpus set to: 1-9,11-39 with rtla-osnoise-top. Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: John B. Wyatt IV <jwyatt@redhat.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Link: https://patch.msgid.link/20251120145653.296659-8-gmonaco@redhat.com
1 parent b566510 commit 7dec062

3 files changed

Lines changed: 155 additions & 0 deletions

File tree

include/linux/timer.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,4 +188,13 @@ int timers_dead_cpu(unsigned int cpu);
188188
#define timers_dead_cpu NULL
189189
#endif
190190

191+
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
192+
extern int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask);
193+
#else
194+
static inline int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
195+
{
196+
return 0;
197+
}
198+
#endif
199+
191200
#endif

kernel/cgroup/cpuset.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1350,6 +1350,9 @@ static void update_isolation_cpumasks(bool isolcpus_updated)
13501350

13511351
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
13521352
WARN_ON_ONCE(ret < 0);
1353+
1354+
ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
1355+
WARN_ON_ONCE(ret < 0);
13531356
}
13541357

13551358
/**

kernel/time/timer_migration.c

Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/spinlock.h>
1111
#include <linux/timerqueue.h>
1212
#include <trace/events/ipi.h>
13+
#include <linux/sched/isolation.h>
1314

1415
#include "timer_migration.h"
1516
#include "tick-internal.h"
@@ -427,8 +428,13 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
427428
/*
428429
* CPUs available for timer migration.
429430
* Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
431+
* Additionally tmigr_available_mutex serializes set/clear operations with each other.
430432
*/
431433
static cpumask_var_t tmigr_available_cpumask;
434+
static DEFINE_MUTEX(tmigr_available_mutex);
435+
436+
/* Enabled during late initcall */
437+
static DEFINE_STATIC_KEY_FALSE(tmigr_exclude_isolated);
432438

433439
#define TMIGR_NONE 0xFF
434440
#define BIT_CNT 8
@@ -438,6 +444,33 @@ static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
438444
return !(tmc->tmgroup && tmc->available);
439445
}
440446

447+
/*
448+
* Returns true if @cpu should be excluded from the hierarchy as isolated.
449+
* Domain isolated CPUs don't participate in timer migration, nohz_full CPUs
450+
* are still part of the hierarchy but become idle (from a tick and timer
451+
* migration perspective) when they stop their tick. This lets the timekeeping
452+
* CPU handle their global timers. Marking also isolated CPUs as idle would be
453+
* too costly, hence they are completely excluded from the hierarchy.
454+
* This check is necessary, for instance, to prevent offline isolated CPUs from
455+
* being incorrectly marked as available once getting back online.
456+
*
457+
* This function returns false during early boot and the isolation logic is
458+
* enabled only after isolated CPUs are marked as unavailable at late boot.
459+
* The tick CPU can be isolated at boot, however we cannot mark it as
460+
* unavailable to avoid having no global migrator for the nohz_full CPUs. This
461+
* should be ensured by the callers of this function: implicitly from hotplug
462+
* callbacks and explicitly in tmigr_init_isolation() and
463+
* tmigr_isolated_exclude_cpumask().
464+
*/
465+
static inline bool tmigr_is_isolated(int cpu)
466+
{
467+
if (!static_branch_unlikely(&tmigr_exclude_isolated))
468+
return false;
469+
return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) ||
470+
cpuset_cpu_is_isolated(cpu)) &&
471+
housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE);
472+
}
473+
441474
/*
442475
* Returns true, when @childmask corresponds to the group migrator or when the
443476
* group is not active - so no migrator is set.
@@ -1439,8 +1472,12 @@ static int tmigr_clear_cpu_available(unsigned int cpu)
14391472
int migrator;
14401473
u64 firstexp;
14411474

1475+
guard(mutex)(&tmigr_available_mutex);
1476+
14421477
cpumask_clear_cpu(cpu, tmigr_available_cpumask);
14431478
scoped_guard(raw_spinlock_irq, &tmc->lock) {
1479+
if (!tmc->available)
1480+
return 0;
14441481
tmc->available = false;
14451482
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
14461483

@@ -1468,8 +1505,15 @@ static int tmigr_set_cpu_available(unsigned int cpu)
14681505
if (WARN_ON_ONCE(!tmc->tmgroup))
14691506
return -EINVAL;
14701507

1508+
if (tmigr_is_isolated(cpu))
1509+
return 0;
1510+
1511+
guard(mutex)(&tmigr_available_mutex);
1512+
14711513
cpumask_set_cpu(cpu, tmigr_available_cpumask);
14721514
scoped_guard(raw_spinlock_irq, &tmc->lock) {
1515+
if (tmc->available)
1516+
return 0;
14731517
trace_tmigr_cpu_available(tmc);
14741518
tmc->idle = timer_base_is_idle();
14751519
if (!tmc->idle)
@@ -1479,6 +1523,105 @@ static int tmigr_set_cpu_available(unsigned int cpu)
14791523
return 0;
14801524
}
14811525

1526+
static void tmigr_cpu_isolate(struct work_struct *ignored)
1527+
{
1528+
tmigr_clear_cpu_available(smp_processor_id());
1529+
}
1530+
1531+
static void tmigr_cpu_unisolate(struct work_struct *ignored)
1532+
{
1533+
tmigr_set_cpu_available(smp_processor_id());
1534+
}
1535+
1536+
/**
1537+
* tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
1538+
* @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
1539+
*
1540+
* This function can be called from cpuset code to provide the new set of
1541+
* isolated CPUs that should be excluded from the hierarchy.
1542+
* Online CPUs not present in exclude_cpumask but already excluded are brought
1543+
* back to the hierarchy.
1544+
* Functions to isolate/unisolate need to be called locally and can sleep.
1545+
*/
1546+
int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
1547+
{
1548+
struct work_struct __percpu *works __free(free_percpu) =
1549+
alloc_percpu(struct work_struct);
1550+
cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
1551+
int cpu;
1552+
1553+
lockdep_assert_cpus_held();
1554+
1555+
if (!works)
1556+
return -ENOMEM;
1557+
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
1558+
return -ENOMEM;
1559+
1560+
/*
1561+
* First set previously isolated CPUs as available (unisolate).
1562+
* This cpumask contains only CPUs that switched to available now.
1563+
*/
1564+
cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask);
1565+
cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask);
1566+
1567+
for_each_cpu(cpu, cpumask) {
1568+
struct work_struct *work = per_cpu_ptr(works, cpu);
1569+
1570+
INIT_WORK(work, tmigr_cpu_unisolate);
1571+
schedule_work_on(cpu, work);
1572+
}
1573+
for_each_cpu(cpu, cpumask)
1574+
flush_work(per_cpu_ptr(works, cpu));
1575+
1576+
/*
1577+
* Then clear previously available CPUs (isolate).
1578+
* This cpumask contains only CPUs that switched to not available now.
1579+
* There cannot be overlap with the newly available ones.
1580+
*/
1581+
cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask);
1582+
cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
1583+
/*
1584+
* Handle this here and not in the cpuset code because exclude_cpumask
1585+
* might include also the tick CPU if included in isolcpus.
1586+
*/
1587+
for_each_cpu(cpu, cpumask) {
1588+
if (!tick_nohz_cpu_hotpluggable(cpu)) {
1589+
cpumask_clear_cpu(cpu, cpumask);
1590+
break;
1591+
}
1592+
}
1593+
1594+
for_each_cpu(cpu, cpumask) {
1595+
struct work_struct *work = per_cpu_ptr(works, cpu);
1596+
1597+
INIT_WORK(work, tmigr_cpu_isolate);
1598+
schedule_work_on(cpu, work);
1599+
}
1600+
for_each_cpu(cpu, cpumask)
1601+
flush_work(per_cpu_ptr(works, cpu));
1602+
1603+
return 0;
1604+
}
1605+
1606+
static int __init tmigr_init_isolation(void)
1607+
{
1608+
cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
1609+
1610+
static_branch_enable(&tmigr_exclude_isolated);
1611+
1612+
if (!housekeeping_enabled(HK_TYPE_DOMAIN))
1613+
return 0;
1614+
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
1615+
return -ENOMEM;
1616+
1617+
cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
1618+
1619+
/* Protect against RCU torture hotplug testing */
1620+
guard(cpus_read_lock)();
1621+
return tmigr_isolated_exclude_cpumask(cpumask);
1622+
}
1623+
late_initcall(tmigr_init_isolation);
1624+
14821625
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
14831626
int node)
14841627
{

0 commit comments

Comments
 (0)