Skip to content

Commit 8b5a0f9

Browse files
KAGA-KOKOPeter Zijlstra
authored andcommitted
x86/smpboot: Enable split CPU startup
The x86 CPU bringup state currently does AP wake-up, wait for AP to respond and then release it for full bringup. It is safe to be split into a wake-up and and a separate wait+release state. Provide the required functions and enable the split CPU bringup, which prepares for parallel bringup, where the bringup of the non-boot CPUs takes two iterations: One to prepare and wake all APs and the second to wait and release them. Depending on timing this can eliminate the wait time completely. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Helge Deller <deller@gmx.de> # parisc Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck Link: https://lore.kernel.org/r/20230512205257.133453992@linutronix.de
1 parent a631be9 commit 8b5a0f9

5 files changed

Lines changed: 10 additions & 15 deletions

File tree

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,8 +274,8 @@ config X86
274274
select HAVE_UNSTABLE_SCHED_CLOCK
275275
select HAVE_USER_RETURN_NOTIFIER
276276
select HAVE_GENERIC_VDSO
277-
select HOTPLUG_CORE_SYNC_FULL if SMP
278277
select HOTPLUG_SMT if SMP
278+
select HOTPLUG_SPLIT_STARTUP if SMP
279279
select IRQ_FORCED_THREADING
280280
select NEED_PER_CPU_EMBED_FIRST_CHUNK
281281
select NEED_PER_CPU_PAGE_FIRST_CHUNK

arch/x86/include/asm/smp.h

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ struct smp_ops {
4040

4141
void (*cleanup_dead_cpu)(unsigned cpu);
4242
void (*poll_sync_state)(void);
43-
int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
43+
int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle);
4444
int (*cpu_disable)(void);
4545
void (*cpu_die)(unsigned int cpu);
4646
void (*play_dead)(void);
@@ -80,11 +80,6 @@ static inline void smp_cpus_done(unsigned int max_cpus)
8080
smp_ops.smp_cpus_done(max_cpus);
8181
}
8282

83-
static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
84-
{
85-
return smp_ops.cpu_up(cpu, tidle);
86-
}
87-
8883
static inline int __cpu_disable(void)
8984
{
9085
return smp_ops.cpu_disable();
@@ -124,7 +119,7 @@ void native_smp_prepare_cpus(unsigned int max_cpus);
124119
void calculate_max_logical_packages(void);
125120
void native_smp_cpus_done(unsigned int max_cpus);
126121
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
127-
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
122+
int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
128123
int native_cpu_disable(void);
129124
void __noreturn hlt_play_dead(void);
130125
void native_play_dead(void);

arch/x86/kernel/smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ struct smp_ops smp_ops = {
268268
#endif
269269
.smp_send_reschedule = native_smp_send_reschedule,
270270

271-
.cpu_up = native_cpu_up,
271+
.kick_ap_alive = native_kick_ap,
272272
.cpu_disable = native_cpu_disable,
273273
.play_dead = native_play_dead,
274274

arch/x86/kernel/smpboot.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1052,7 +1052,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
10521052
return ret;
10531053
}
10541054

1055-
static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
1055+
int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
10561056
{
10571057
int apicid = apic->cpu_present_to_apicid(cpu);
10581058
int err;
@@ -1088,15 +1088,15 @@ static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
10881088
return err;
10891089
}
10901090

1091-
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1091+
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
10921092
{
1093-
return native_kick_ap(cpu, tidle);
1093+
return smp_ops.kick_ap_alive(cpu, tidle);
10941094
}
10951095

10961096
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
10971097
{
10981098
/* Cleanup possible dangling ends... */
1099-
if (smp_ops.cpu_up == native_cpu_up && x86_platform.legacy.warm_reset)
1099+
if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset)
11001100
smpboot_restore_warm_reset_vector();
11011101
}
11021102

arch/x86/xen/smp_pv.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
314314
return 0;
315315
}
316316

317-
static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
317+
static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
318318
{
319319
int rc;
320320

@@ -438,7 +438,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
438438
.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
439439
.smp_cpus_done = xen_smp_cpus_done,
440440

441-
.cpu_up = xen_pv_cpu_up,
441+
.kick_ap_alive = xen_pv_kick_ap,
442442
.cpu_die = xen_pv_cpu_die,
443443
.cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
444444
.poll_sync_state = xen_pv_poll_sync_state,

0 commit comments

Comments
 (0)