Skip to content

Commit cc2de8c

Browse files
marcanjannau
authored andcommitted
arm64: Introduce scaffolding to add ACTLR_EL1 to thread state
Some CPUs expose IMPDEF features in ACTLR_EL1 that can be meaningfully controlled per-thread (like TSO control on Apple cores). Add the basic scaffolding to save/restore this register as part of context switching. This mechanism is disabled by default both by config symbol and via a runtime check, which ensures it is never triggered unless the system is known to need it for some feature (which also implies that the layout of ACTLR_EL1 is uniform between all CPU core types). Signed-off-by: Hector Martin <marcan@marcan.st> Reviewed-by: Neal Gompa <neal@gompa.dev>
1 parent 7b2bd31 commit cc2de8c

5 files changed

Lines changed: 44 additions & 0 deletions

File tree

arch/arm64/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,6 +428,9 @@ config KASAN_SHADOW_OFFSET
428428
config UNWIND_TABLES
429429
bool
430430

431+
config ARM64_ACTLR_STATE
432+
bool
433+
431434
source "arch/arm64/Kconfig.platforms"
432435

433436
menu "Kernel Features"

arch/arm64/include/asm/cpufeature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -915,6 +915,11 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
915915
return 8;
916916
}
917917

918+
static __always_inline bool system_has_actlr_state(void)
919+
{
920+
return false;
921+
}
922+
918923
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
919924
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
920925

arch/arm64/include/asm/processor.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,9 @@ struct thread_struct {
185185
u64 svcr;
186186
u64 tpidr2_el0;
187187
u64 por_el0;
188+
#ifdef CONFIG_ARM64_ACTLR_STATE
189+
u64 actlr;
190+
#endif
188191
};
189192

190193
static inline unsigned int thread_get_vl(struct thread_struct *thread,

arch/arm64/kernel/process.c

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -385,6 +385,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
385385
if (system_supports_poe())
386386
p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
387387

388+
#ifdef CONFIG_ARM64_ACTLR_STATE
389+
if (system_has_actlr_state())
390+
p->thread.actlr = read_sysreg(actlr_el1);
391+
#endif
392+
388393
if (stack_start) {
389394
if (is_compat_thread(task_thread_info(p)))
390395
childregs->compat_sp = stack_start;
@@ -585,6 +590,25 @@ int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
585590
}
586591
#endif
587592

593+
#ifdef CONFIG_ARM64_ACTLR_STATE
594+
/*
595+
* IMPDEF control register ACTLR_EL1 handling. Some CPUs use this to
596+
* expose features that can be controlled by userspace.
597+
*/
598+
static void actlr_thread_switch(struct task_struct *next)
599+
{
600+
if (!system_has_actlr_state())
601+
return;
602+
603+
current->thread.actlr = read_sysreg(actlr_el1);
604+
write_sysreg(next->thread.actlr, actlr_el1);
605+
}
606+
#else
607+
static inline void actlr_thread_switch(struct task_struct *next)
608+
{
609+
}
610+
#endif
611+
588612
/*
589613
* Thread switching.
590614
*/
@@ -603,6 +627,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
603627
cntkctl_thread_switch(prev, next);
604628
ptrauth_thread_switch_user(next);
605629
permission_overlay_switch(next);
630+
actlr_thread_switch(next);
606631

607632
/*
608633
* Complete any pending TLB or cache maintenance on this CPU in case

arch/arm64/kernel/setup.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -363,6 +363,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
363363
*/
364364
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
365365
#endif
366+
#ifdef CONFIG_ARM64_ACTLR_STATE
367+
/* Store the boot CPU ACTLR_EL1 value as the default. This will only
368+
* be actually restored during context switching iff the platform is
369+
* known to use ACTLR_EL1 for exposable features and its layout is
370+
* known to be the same on all CPUs.
371+
*/
372+
init_task.thread.actlr = read_sysreg(actlr_el1);
373+
#endif
366374

367375
if (boot_args[1] || boot_args[2] || boot_args[3]) {
368376
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"

0 commit comments

Comments
 (0)