Skip to content

Commit 99cf983

Browse files
mrutland-armPeter Zijlstra
authored andcommitted
sched/preempt: Add PREEMPT_DYNAMIC using static keys
Where an architecture selects HAVE_STATIC_CALL but not HAVE_STATIC_CALL_INLINE, each static call has an out-of-line trampoline which will either branch to a callee or return to the caller. On such architectures, a number of constraints can conspire to make those trampolines more complicated and potentially less useful than we'd like. For example: * Hardware and software control flow integrity schemes can require the addition of "landing pad" instructions (e.g. `BTI` for arm64), which will also be present at the "real" callee. * Limited branch ranges can require that trampolines generate or load an address into a register and perform an indirect branch (or at least have a slow path that does so). This loses some of the benefits of having a direct branch. * Interaction with SW CFI schemes can be complicated and fragile, e.g. requiring that we can recognise idiomatic codegen and remove indirections understand, at least until clang proves more helpful mechanisms for dealing with this. For PREEMPT_DYNAMIC, we don't need the full power of static calls, as we really only need to enable/disable specific preemption functions. We can achieve the same effect without a number of the pain points above by using static keys to fold early returns into the preemption functions themselves rather than in an out-of-line trampoline, effectively inlining the trampoline into the start of the function. For arm64, this results in good code generation. For example, the dynamic_cond_resched() wrapper looks as follows when enabled. When disabled, the first `B` is replaced with a `NOP`, resulting in an early return. | <dynamic_cond_resched>: | bti c | b <dynamic_cond_resched+0x10> // or `nop` | mov w0, #0x0 | ret | mrs x0, sp_el0 | ldr x0, [x0, #8] | cbnz x0, <dynamic_cond_resched+0x8> | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret ... compared to the regular form of the function: | <__cond_resched>: | bti c | mrs x0, sp_el0 | ldr x1, [x0, #8] | cbz x1, <__cond_resched+0x18> | mov w0, #0x0 | ret | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl <preempt_schedule_common> | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret Any architecture which implements static keys should be able to use this to implement PREEMPT_DYNAMIC with similar cost to non-inlined static calls. Since this is likely to have greater overhead than (inlined) static calls, PREEMPT_DYNAMIC is only defaulted to enabled when HAVE_PREEMPT_DYNAMIC_CALL is selected. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20220214165216.2231574-6-mark.rutland@arm.com
1 parent 33c6473 commit 99cf983

8 files changed

Lines changed: 122 additions & 11 deletions

File tree

arch/Kconfig

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1278,11 +1278,41 @@ config HAVE_STATIC_CALL_INLINE
12781278

12791279
config HAVE_PREEMPT_DYNAMIC
12801280
bool
1281+
1282+
config HAVE_PREEMPT_DYNAMIC_CALL
1283+
bool
12811284
depends on HAVE_STATIC_CALL
1285+
select HAVE_PREEMPT_DYNAMIC
1286+
help
1287+
An architecture should select this if it can handle the preemption
1288+
model being selected at boot time using static calls.
1289+
1290+
Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
1291+
preemption function will be patched directly.
1292+
1293+
Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
1294+
call to a preemption function will go through a trampoline, and the
1295+
trampoline will be patched.
1296+
1297+
It is strongly advised to support inline static call to avoid any
1298+
overhead.
1299+
1300+
config HAVE_PREEMPT_DYNAMIC_KEY
1301+
bool
1302+
depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
1303+
select HAVE_PREEMPT_DYNAMIC
12821304
help
1283-
Select this if the architecture support boot time preempt setting
1284-
on top of static calls. It is strongly advised to support inline
1285-
static call to avoid any overhead.
1305+
An architecture should select this if it can handle the preemption
1306+
model being selected at boot time using static keys.
1307+
1308+
Each preemption function will be given an early return based on a
1309+
static key. This should have slightly lower overhead than non-inline
1310+
static calls, as this effectively inlines each trampoline into the
1311+
start of its callee. This may avoid redundant work, and may
1312+
integrate better with CFI schemes.
1313+
1314+
This will have greater overhead than using inline static calls as
1315+
the call to the preemption function cannot be entirely elided.
12861316

12871317
config ARCH_WANT_LD_ORPHAN_WARN
12881318
bool

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ config X86
245245
select HAVE_STACK_VALIDATION if X86_64
246246
select HAVE_STATIC_CALL
247247
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
248-
select HAVE_PREEMPT_DYNAMIC
248+
select HAVE_PREEMPT_DYNAMIC_CALL
249249
select HAVE_RSEQ
250250
select HAVE_SYSCALL_TRACEPOINTS
251251
select HAVE_UNSTABLE_SCHED_CLOCK

include/linux/entry-common.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -456,13 +456,19 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
456456
*/
457457
void raw_irqentry_exit_cond_resched(void);
458458
#ifdef CONFIG_PREEMPT_DYNAMIC
459+
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
459460
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
460461
#define irqentry_exit_cond_resched_dynamic_disabled NULL
461462
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
462463
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
463-
#else
464-
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
464+
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
465+
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
466+
void dynamic_irqentry_exit_cond_resched(void);
467+
#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
465468
#endif
469+
#else /* CONFIG_PREEMPT_DYNAMIC */
470+
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
471+
#endif /* CONFIG_PREEMPT_DYNAMIC */
466472

467473
/**
468474
* irqentry_exit - Handle return from exception that used irqentry_enter()

include/linux/kernel.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ struct user;
9999
extern int __cond_resched(void);
100100
# define might_resched() __cond_resched()
101101

102-
#elif defined(CONFIG_PREEMPT_DYNAMIC)
102+
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
103103

104104
extern int __cond_resched(void);
105105

@@ -110,6 +110,11 @@ static __always_inline void might_resched(void)
110110
static_call_mod(might_resched)();
111111
}
112112

113+
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
114+
115+
extern int dynamic_might_resched(void);
116+
# define might_resched() dynamic_might_resched()
117+
113118
#else
114119

115120
# define might_resched() do { } while (0)

include/linux/sched.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2020,7 +2020,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
20202020
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
20212021
extern int __cond_resched(void);
20222022

2023-
#ifdef CONFIG_PREEMPT_DYNAMIC
2023+
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
20242024

20252025
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
20262026

@@ -2029,6 +2029,14 @@ static __always_inline int _cond_resched(void)
20292029
return static_call_mod(cond_resched)();
20302030
}
20312031

2032+
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2033+
extern int dynamic_cond_resched(void);
2034+
2035+
static __always_inline int _cond_resched(void)
2036+
{
2037+
return dynamic_cond_resched();
2038+
}
2039+
20322040
#else
20332041

20342042
static inline int _cond_resched(void)

kernel/Kconfig.preempt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,9 @@ config PREEMPTION
9696
config PREEMPT_DYNAMIC
9797
bool "Preemption behaviour defined on boot"
9898
depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
99+
select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
99100
select PREEMPT_BUILD
100-
default y
101+
default y if HAVE_PREEMPT_DYNAMIC_CALL
101102
help
102103
This option allows to define the preemption model on the kernel
103104
command line parameter and thus override the default preemption

kernel/entry/common.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <linux/context_tracking.h>
44
#include <linux/entry-common.h>
55
#include <linux/highmem.h>
6+
#include <linux/jump_label.h>
67
#include <linux/livepatch.h>
78
#include <linux/audit.h>
89
#include <linux/tick.h>
@@ -392,7 +393,17 @@ void raw_irqentry_exit_cond_resched(void)
392393
}
393394
}
394395
#ifdef CONFIG_PREEMPT_DYNAMIC
396+
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
395397
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
398+
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
399+
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
400+
void dynamic_irqentry_exit_cond_resched(void)
401+
{
402+
if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
403+
return;
404+
raw_irqentry_exit_cond_resched();
405+
}
406+
#endif
396407
#endif
397408

398409
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)

kernel/sched/core.c

Lines changed: 52 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
#include <linux/nospec.h>
1616
#include <linux/blkdev.h>
17+
#include <linux/jump_label.h>
1718
#include <linux/kcov.h>
1819
#include <linux/scs.h>
1920

@@ -6484,21 +6485,31 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
64846485
*/
64856486
if (likely(!preemptible()))
64866487
return;
6487-
64886488
preempt_schedule_common();
64896489
}
64906490
NOKPROBE_SYMBOL(preempt_schedule);
64916491
EXPORT_SYMBOL(preempt_schedule);
64926492

64936493
#ifdef CONFIG_PREEMPT_DYNAMIC
6494+
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
64946495
#ifndef preempt_schedule_dynamic_enabled
64956496
#define preempt_schedule_dynamic_enabled preempt_schedule
64966497
#define preempt_schedule_dynamic_disabled NULL
64976498
#endif
64986499
DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
64996500
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6501+
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6502+
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
6503+
void __sched notrace dynamic_preempt_schedule(void)
6504+
{
6505+
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6506+
return;
6507+
preempt_schedule();
6508+
}
6509+
NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6510+
EXPORT_SYMBOL(dynamic_preempt_schedule);
6511+
#endif
65006512
#endif
6501-
65026513

65036514
/**
65046515
* preempt_schedule_notrace - preempt_schedule called by tracing
@@ -6553,12 +6564,24 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
65536564
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
65546565

65556566
#ifdef CONFIG_PREEMPT_DYNAMIC
6567+
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
65566568
#ifndef preempt_schedule_notrace_dynamic_enabled
65576569
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
65586570
#define preempt_schedule_notrace_dynamic_disabled NULL
65596571
#endif
65606572
DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
65616573
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6574+
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6575+
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
6576+
void __sched notrace dynamic_preempt_schedule_notrace(void)
6577+
{
6578+
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6579+
return;
6580+
preempt_schedule_notrace();
6581+
}
6582+
NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6583+
EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6584+
#endif
65626585
#endif
65636586

65646587
#endif /* CONFIG_PREEMPTION */
@@ -8068,6 +8091,7 @@ EXPORT_SYMBOL(__cond_resched);
80688091
#endif
80698092

80708093
#ifdef CONFIG_PREEMPT_DYNAMIC
8094+
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
80718095
#define cond_resched_dynamic_enabled __cond_resched
80728096
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
80738097
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
@@ -8077,6 +8101,25 @@ EXPORT_STATIC_CALL_TRAMP(cond_resched);
80778101
#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
80788102
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
80798103
EXPORT_STATIC_CALL_TRAMP(might_resched);
8104+
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8105+
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
8106+
int __sched dynamic_cond_resched(void)
8107+
{
8108+
if (!static_branch_unlikely(&sk_dynamic_cond_resched))
8109+
return 0;
8110+
return __cond_resched();
8111+
}
8112+
EXPORT_SYMBOL(dynamic_cond_resched);
8113+
8114+
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
8115+
int __sched dynamic_might_resched(void)
8116+
{
8117+
if (!static_branch_unlikely(&sk_dynamic_might_resched))
8118+
return 0;
8119+
return __cond_resched();
8120+
}
8121+
EXPORT_SYMBOL(dynamic_might_resched);
8122+
#endif
80808123
#endif
80818124

80828125
/*
@@ -8206,8 +8249,15 @@ int sched_dynamic_mode(const char *str)
82068249
return -EINVAL;
82078250
}
82088251

8252+
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
82098253
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
82108254
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
8255+
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8256+
#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
8257+
#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
8258+
#else
8259+
#error "Unsupported PREEMPT_DYNAMIC mechanism"
8260+
#endif
82118261

82128262
void sched_dynamic_update(int mode)
82138263
{

0 commit comments

Comments
 (0)