Skip to content

Commit 6a942f5

Browse files
Valentin SchneiderVasily Gorbik
authored andcommitted
s390: preempt: Fix preempt_count initialization
S390's init_idle_preempt_count(p, cpu) doesn't actually let us initialize the preempt_count of the requested CPU's idle task: it unconditionally writes to the current CPU's. This clearly conflicts with idle_threads_init(), which intends to initialize *all* the idle tasks, including their preempt_count (or their CPU's, if the arch uses a per-CPU preempt_count). Unfortunately, it seems the way s390 does things doesn't let us initialize every possible CPU's preempt_count early on, as the pages where this resides are only allocated when a CPU is brought up and are freed when it is brought down. Let the arch-specific code set a CPU's preempt_count when its lowcore is allocated, and turn init_idle_preempt_count() into an empty stub. Fixes: f1a0a37 ("sched/core: Initialize the idle task with preemption disabled") Reported-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Guenter Roeck <linux@roeck-us.net> Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Link: https://lore.kernel.org/r/20210707163338.1623014-1-valentin.schneider@arm.com Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
1 parent 4ee471f commit 6a942f5

3 files changed

Lines changed: 6 additions & 12 deletions

File tree

arch/s390/include/asm/preempt.h

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
2929
old, new) != old);
3030
}
3131

32-
#define init_task_preempt_count(p) do { } while (0)
33-
34-
#define init_idle_preempt_count(p, cpu) do { \
35-
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
36-
} while (0)
37-
3832
static inline void set_preempt_need_resched(void)
3933
{
4034
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
8882
S390_lowcore.preempt_count = pc;
8983
}
9084

91-
#define init_task_preempt_count(p) do { } while (0)
92-
93-
#define init_idle_preempt_count(p, cpu) do { \
94-
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
95-
} while (0)
96-
9785
static inline void set_preempt_need_resched(void)
9886
{
9987
}
@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
130118

131119
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
132120

121+
#define init_task_preempt_count(p) do { } while (0)
122+
/* Deferred to CPU bringup time */
123+
#define init_idle_preempt_count(p, cpu) do { } while (0)
124+
133125
#ifdef CONFIG_PREEMPTION
134126
extern void preempt_schedule(void);
135127
#define __preempt_schedule() preempt_schedule()

arch/s390/kernel/setup.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -442,6 +442,7 @@ static void __init setup_lowcore_dat_off(void)
442442
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
443443
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
444444
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
445+
lc->preempt_count = PREEMPT_DISABLED;
445446

446447
set_prefix((u32)(unsigned long) lc);
447448
lowcore_ptr[0] = lc;

arch/s390/kernel/smp.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
210210
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
211211
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
212212
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
213+
lc->preempt_count = PREEMPT_DISABLED;
213214
if (nmi_alloc_per_cpu(lc))
214215
goto out;
215216
lowcore_ptr[cpu] = lc;

0 commit comments

Comments
 (0)