Skip to content

Commit e7cf636

Browse files
committed
Merge branch 'for-next/boot' into for-next/core
Boot path cleanups to enable early initialisation of per-cpu operations needed by KCSAN. * for-next/boot: arm64: scs: Drop unused 'tmp' argument to scs_{load, save} asm macros arm64: smp: initialize cpu offset earlier arm64: smp: unify task and sp setup arm64: smp: remove stack from secondary_data arm64: smp: remove pointless secondary_data maintenance arm64: assembler: add set_this_cpu_offset
2 parents 0b573a0 + 16c230b commit e7cf636

9 files changed

Lines changed: 51 additions & 58 deletions

File tree

arch/arm64/include/asm/assembler.h

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -232,15 +232,23 @@ lr .req x30 // link register
232232
* @dst: destination register
233233
*/
234234
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
235-
.macro this_cpu_offset, dst
235+
.macro get_this_cpu_offset, dst
236236
mrs \dst, tpidr_el2
237237
.endm
238238
#else
239-
.macro this_cpu_offset, dst
239+
.macro get_this_cpu_offset, dst
240240
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
241241
mrs \dst, tpidr_el1
242242
alternative_else
243243
mrs \dst, tpidr_el2
244+
alternative_endif
245+
.endm
246+
247+
.macro set_this_cpu_offset, src
248+
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
249+
msr tpidr_el1, \src
250+
alternative_else
251+
msr tpidr_el2, \src
244252
alternative_endif
245253
.endm
246254
#endif
@@ -253,7 +261,7 @@ alternative_endif
253261
.macro adr_this_cpu, dst, sym, tmp
254262
adrp \tmp, \sym
255263
add \dst, \tmp, #:lo12:\sym
256-
this_cpu_offset \tmp
264+
get_this_cpu_offset \tmp
257265
add \dst, \dst, \tmp
258266
.endm
259267

@@ -264,7 +272,7 @@ alternative_endif
264272
*/
265273
.macro ldr_this_cpu dst, sym, tmp
266274
adr_l \dst, \sym
267-
this_cpu_offset \tmp
275+
get_this_cpu_offset \tmp
268276
ldr \dst, [\dst, \tmp]
269277
.endm
270278

@@ -745,7 +753,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
745753
cbz \tmp, \lbl
746754
#endif
747755
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
748-
this_cpu_offset \tmp2
756+
get_this_cpu_offset \tmp2
749757
ldr w\tmp, [\tmp, \tmp2]
750758
cbnz w\tmp, \lbl // yield on pending softirq in task context
751759
.Lnoyield_\@:

arch/arm64/include/asm/scs.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,18 +9,18 @@
99
#ifdef CONFIG_SHADOW_CALL_STACK
1010
scs_sp .req x18
1111

12-
.macro scs_load tsk, tmp
12+
.macro scs_load tsk
1313
ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
1414
.endm
1515

16-
.macro scs_save tsk, tmp
16+
.macro scs_save tsk
1717
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
1818
.endm
1919
#else
20-
.macro scs_load tsk, tmp
20+
.macro scs_load tsk
2121
.endm
2222

23-
.macro scs_save tsk, tmp
23+
.macro scs_save tsk
2424
.endm
2525
#endif /* CONFIG_SHADOW_CALL_STACK */
2626

arch/arm64/include/asm/smp.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
7373

7474
/*
7575
* Initial data for bringing up a secondary CPU.
76-
* @stack - sp for the secondary CPU
7776
* @status - Result passed back from the secondary CPU to
7877
* indicate failure.
7978
*/
8079
struct secondary_data {
81-
void *stack;
8280
struct task_struct *task;
8381
long status;
8482
};

arch/arm64/kernel/asm-offsets.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
int main(void)
2828
{
2929
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
30+
DEFINE(TSK_CPU, offsetof(struct task_struct, cpu));
3031
BLANK();
3132
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
3233
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
@@ -99,7 +100,6 @@ int main(void)
99100
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
100101
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
101102
BLANK();
102-
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
103103
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
104104
BLANK();
105105
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));

arch/arm64/kernel/entry.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ alternative_else_nop_endif
275275

276276
mte_set_kernel_gcr x22, x23
277277

278-
scs_load tsk, x20
278+
scs_load tsk
279279
.else
280280
add x21, sp, #PT_REGS_SIZE
281281
get_current_task tsk
@@ -375,7 +375,7 @@ alternative_if ARM64_WORKAROUND_845719
375375
alternative_else_nop_endif
376376
#endif
377377
3:
378-
scs_save tsk, x0
378+
scs_save tsk
379379

380380
#ifdef CONFIG_ARM64_PTR_AUTH
381381
alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -979,8 +979,8 @@ SYM_FUNC_START(cpu_switch_to)
979979
mov sp, x9
980980
msr sp_el0, x1
981981
ptrauth_keys_install_kernel x1, x8, x9, x10
982-
scs_save x0, x8
983-
scs_load x1, x8
982+
scs_save x0
983+
scs_load x1
984984
ret
985985
SYM_FUNC_END(cpu_switch_to)
986986
NOKPROBE(cpu_switch_to)

arch/arm64/kernel/head.S

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -395,15 +395,29 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
395395
SYM_FUNC_END(__create_page_tables)
396396

397397
/*
398+
* Initialize CPU registers with task-specific and cpu-specific context.
399+
*
398400
* Create a final frame record at task_pt_regs(current)->stackframe, so
399401
* that the unwinder can identify the final frame record of any task by
400402
* its location in the task stack. We reserve the entire pt_regs space
401403
* for consistency with user tasks and kthreads.
402404
*/
403-
.macro setup_final_frame
405+
.macro init_cpu_task tsk, tmp1, tmp2
406+
msr sp_el0, \tsk
407+
408+
ldr \tmp1, [\tsk, #TSK_STACK]
409+
add sp, \tmp1, #THREAD_SIZE
404410
sub sp, sp, #PT_REGS_SIZE
411+
405412
stp xzr, xzr, [sp, #S_STACKFRAME]
406413
add x29, sp, #S_STACKFRAME
414+
415+
scs_load \tsk
416+
417+
adr_l \tmp1, __per_cpu_offset
418+
ldr w\tmp2, [\tsk, #TSK_CPU]
419+
ldr \tmp1, [\tmp1, \tmp2, lsl #3]
420+
set_this_cpu_offset \tmp1
407421
.endm
408422

409423
/*
@@ -412,22 +426,16 @@ SYM_FUNC_END(__create_page_tables)
412426
* x0 = __PHYS_OFFSET
413427
*/
414428
SYM_FUNC_START_LOCAL(__primary_switched)
415-
adrp x4, init_thread_union
416-
add sp, x4, #THREAD_SIZE
417-
adr_l x5, init_task
418-
msr sp_el0, x5 // Save thread_info
429+
adr_l x4, init_task
430+
init_cpu_task x4, x5, x6
419431

420432
adr_l x8, vectors // load VBAR_EL1 with virtual
421433
msr vbar_el1, x8 // vector table address
422434
isb
423435

424-
stp xzr, x30, [sp, #-16]!
436+
stp x29, x30, [sp, #-16]!
425437
mov x29, sp
426438

427-
#ifdef CONFIG_SHADOW_CALL_STACK
428-
adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
429-
#endif
430-
431439
str_l x21, __fdt_pointer, x5 // Save FDT pointer
432440

433441
ldr_l x4, kimage_vaddr // Save the offset between
@@ -459,8 +467,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
459467
0:
460468
#endif
461469
bl switch_to_vhe // Prefer VHE if possible
462-
add sp, sp, #16
463-
setup_final_frame
470+
ldp x29, x30, [sp], #16
464471
bl start_kernel
465472
ASM_BUG()
466473
SYM_FUNC_END(__primary_switched)
@@ -645,14 +652,10 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
645652
isb
646653

647654
adr_l x0, secondary_data
648-
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
649-
cbz x1, __secondary_too_slow
650-
mov sp, x1
651655
ldr x2, [x0, #CPU_BOOT_TASK]
652656
cbz x2, __secondary_too_slow
653-
msr sp_el0, x2
654-
scs_load x2, x3
655-
setup_final_frame
657+
658+
init_cpu_task x2, x1, x3
656659

657660
#ifdef CONFIG_ARM64_PTR_AUTH
658661
ptrauth_keys_init_cpu x2, x3, x4, x5

arch/arm64/kernel/setup.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
8787
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
8888
set_cpu_logical_map(0, mpidr);
8989

90-
/*
91-
* clear __my_cpu_offset on boot CPU to avoid hang caused by
92-
* using percpu variable early, for example, lockdep will
93-
* access percpu variable inside lock_release
94-
*/
95-
set_my_cpu_offset(0);
9690
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
9791
(unsigned long)mpidr, read_cpuid_id());
9892
}

arch/arm64/kernel/smp.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
120120
* page tables.
121121
*/
122122
secondary_data.task = idle;
123-
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
124123
update_cpu_boot_status(CPU_MMU_OFF);
125-
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
126124

127125
/* Now bring the CPU into our world */
128126
ret = boot_secondary(cpu, idle);
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
142140

143141
pr_crit("CPU%u: failed to come online\n", cpu);
144142
secondary_data.task = NULL;
145-
secondary_data.stack = NULL;
146-
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
147143
status = READ_ONCE(secondary_data.status);
148144
if (status == CPU_MMU_OFF)
149145
status = READ_ONCE(__early_cpu_boot_status);
@@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
202198
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
203199
struct mm_struct *mm = &init_mm;
204200
const struct cpu_operations *ops;
205-
unsigned int cpu;
206-
207-
cpu = task_cpu(current);
208-
set_my_cpu_offset(per_cpu_offset(cpu));
201+
unsigned int cpu = smp_processor_id();
209202

210203
/*
211204
* All kernel threads share the same mm context; grab a
@@ -452,6 +445,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
452445

453446
void __init smp_prepare_boot_cpu(void)
454447
{
448+
/*
449+
* The runtime per-cpu areas have been allocated by
450+
* setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
451+
* freed shortly, so we must move over to the runtime per-cpu area.
452+
*/
455453
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
456454
cpuinfo_store_boot_cpu();
457455

arch/arm64/mm/proc.S

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -83,11 +83,7 @@ SYM_FUNC_START(cpu_do_suspend)
8383
mrs x9, mdscr_el1
8484
mrs x10, oslsr_el1
8585
mrs x11, sctlr_el1
86-
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
87-
mrs x12, tpidr_el1
88-
alternative_else
89-
mrs x12, tpidr_el2
90-
alternative_endif
86+
get_this_cpu_offset x12
9187
mrs x13, sp_el0
9288
stp x2, x3, [x0]
9389
stp x4, x5, [x0, #16]
@@ -145,11 +141,7 @@ SYM_FUNC_START(cpu_do_resume)
145141
msr mdscr_el1, x10
146142

147143
msr sctlr_el1, x12
148-
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
149-
msr tpidr_el1, x13
150-
alternative_else
151-
msr tpidr_el2, x13
152-
alternative_endif
144+
set_this_cpu_offset x13
153145
msr sp_el0, x14
154146
/*
155147
* Restore oslsr_el1 by writing oslar_el1

0 commit comments

Comments
 (0)