Skip to content

Commit 505da66

Browse files
guowangyPeter Zijlstra
authored andcommitted
sched/clock: Avoid false sharing for sched_clock_irqtime
Read-mostly sched_clock_irqtime may share the same cacheline with frequently updated nohz struct. Make it as static_key to avoid false sharing issue. The only user of disable_sched_clock_irqtime() is tsc_.*mark_unstable() which may be invoked under atomic context and require a workqueue to disable static_key. But both of them calls clear_sched_clock_stable() just before doing disable_sched_clock_irqtime(). We can reuse "sched_clock_work" to also disable sched_clock_irqtime(). One additional case need to handle is if the tsc is marked unstable before late_initcall() phase, sched_clock_work will not be invoked and sched_clock_irqtime will stay enabled although clock is unstable: tsc_init() enable_sched_clock_irqtime() # irqtime accounting is enabled here ... if (unsynchronized_tsc()) # true mark_tsc_unstable() clear_sched_clock_stable() __sched_clock_stable_early = 0; ... if (static_key_count(&sched_clock_running.key) == 2) # Only happens at sched_clock_init_late() __clear_sched_clock_stable(); # Never executed ... # late_initcall() phase sched_clock_init_late() if (__sched_clock_stable_early) # Already false __set_sched_clock_stable(); # sched_clock is never marked stable # TSC unstable, but sched_clock_work won't run to disable irqtime So we need to disable_sched_clock_irqtime() in sched_clock_init_late() if clock is unstable. Reported-by: Benjamin Lei <benjamin.lei@intel.com> Suggested-by: K Prateek Nayak <kprateek.nayak@amd.com> Suggested-by: Peter Zijlstra <peterz@infradead.org> Suggested-by: Shrikanth Hegde <sshegde@linux.ibm.com> Signed-off-by: Wangyang Guo <wangyang.guo@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Reviewed-by: Tianyou Li <tianyou.li@intel.com> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://patch.msgid.link/20260127072509.2627346-1-wangyang.guo@intel.com
1 parent dd6a37e commit 505da66

4 files changed

Lines changed: 10 additions & 8 deletions

File tree

arch/x86/kernel/tsc.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1143,7 +1143,6 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
11431143
tsc_unstable = 1;
11441144
if (using_native_sched_clock())
11451145
clear_sched_clock_stable();
1146-
disable_sched_clock_irqtime();
11471146
pr_info("Marking TSC unstable due to clocksource watchdog\n");
11481147
}
11491148

@@ -1213,7 +1212,6 @@ void mark_tsc_unstable(char *reason)
12131212
tsc_unstable = 1;
12141213
if (using_native_sched_clock())
12151214
clear_sched_clock_stable();
1216-
disable_sched_clock_irqtime();
12171215
pr_info("Marking TSC unstable due to %s\n", reason);
12181216

12191217
clocksource_mark_unstable(&clocksource_tsc_early);

kernel/sched/clock.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,7 @@ notrace static void __sched_clock_work(struct work_struct *work)
173173
scd->tick_gtod, __gtod_offset,
174174
scd->tick_raw, __sched_clock_offset);
175175

176+
disable_sched_clock_irqtime();
176177
static_branch_disable(&__sched_clock_stable);
177178
}
178179

@@ -238,6 +239,8 @@ static int __init sched_clock_init_late(void)
238239

239240
if (__sched_clock_stable_early)
240241
__set_sched_clock_stable();
242+
else
243+
disable_sched_clock_irqtime(); /* disable if clock unstable. */
241244

242245
return 0;
243246
}

kernel/sched/cputime.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212

1313
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1414

15+
DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime);
16+
1517
/*
1618
* There are no locks covering percpu hardirq/softirq time.
1719
* They are only modified in vtime_account, on corresponding CPU
@@ -25,16 +27,15 @@
2527
*/
2628
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
2729

28-
int sched_clock_irqtime;
29-
3030
void enable_sched_clock_irqtime(void)
3131
{
32-
sched_clock_irqtime = 1;
32+
static_branch_enable(&sched_clock_irqtime);
3333
}
3434

3535
void disable_sched_clock_irqtime(void)
3636
{
37-
sched_clock_irqtime = 0;
37+
if (irqtime_enabled())
38+
static_branch_disable(&sched_clock_irqtime);
3839
}
3940

4041
static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,

kernel/sched/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3333,11 +3333,11 @@ struct irqtime {
33333333
};
33343334

33353335
DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
3336-
extern int sched_clock_irqtime;
3336+
DECLARE_STATIC_KEY_FALSE(sched_clock_irqtime);
33373337

33383338
static inline int irqtime_enabled(void)
33393339
{
3340-
return sched_clock_irqtime;
3340+
return static_branch_likely(&sched_clock_irqtime);
33413341
}
33423342

33433343
/*

0 commit comments

Comments
 (0)