Skip to content

Commit c7ed509

Browse files
Alexander Gordeevhcahca
authored andcommitted
s390/nmi: disable interrupts on extended save area update
Updating of the pointer to machine check extended save area on the IPL CPU needs the lowcore protection to be disabled. Disable interrupts while the protection is off to avoid unnoticed writes to the lowcore. Suggested-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
1 parent cff2d3a commit c7ed509

4 files changed

Lines changed: 29 additions & 33 deletions

File tree

arch/s390/include/asm/nmi.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,9 @@ struct mcesa {
9898

9999
struct pt_regs;
100100

101-
void nmi_alloc_boot_cpu(struct lowcore *lc);
102-
int nmi_alloc_per_cpu(struct lowcore *lc);
103-
void nmi_free_per_cpu(struct lowcore *lc);
101+
void nmi_alloc_mcesa_early(u64 *mcesad);
102+
int nmi_alloc_mcesa(u64 *mcesad);
103+
void nmi_free_mcesa(u64 *mcesad);
104104

105105
void s390_handle_mcck(void);
106106
void __s390_handle_mcck(void);

arch/s390/kernel/nmi.c

Lines changed: 16 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -58,68 +58,59 @@ static inline unsigned long nmi_get_mcesa_size(void)
5858

5959
/*
6060
* The initial machine check extended save area for the boot CPU.
61-
* It will be replaced by nmi_init() with an allocated structure.
62-
* The structure is required for machine check happening early in
63-
* the boot process.
61+
* It will be replaced on the boot CPU reinit with an allocated
62+
* structure. The structure is required for machine check happening
63+
* early in the boot process.
6464
*/
6565
static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
6666

67-
void __init nmi_alloc_boot_cpu(struct lowcore *lc)
67+
void __init nmi_alloc_mcesa_early(u64 *mcesad)
6868
{
6969
if (!nmi_needs_mcesa())
7070
return;
71-
lc->mcesad = __pa(&boot_mcesa);
71+
*mcesad = __pa(&boot_mcesa);
7272
if (MACHINE_HAS_GS)
73-
lc->mcesad |= ilog2(MCESA_MAX_SIZE);
73+
*mcesad |= ilog2(MCESA_MAX_SIZE);
7474
}
7575

76-
static int __init nmi_init(void)
76+
static void __init nmi_alloc_cache(void)
7777
{
78-
unsigned long origin, cr0, size;
78+
unsigned long size;
7979

8080
if (!nmi_needs_mcesa())
81-
return 0;
81+
return;
8282
size = nmi_get_mcesa_size();
8383
if (size > MCESA_MIN_SIZE)
8484
mcesa_origin_lc = ilog2(size);
8585
/* create slab cache for the machine-check-extended-save-areas */
8686
mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
8787
if (!mcesa_cache)
8888
panic("Couldn't create nmi save area cache");
89-
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
90-
if (!origin)
91-
panic("Couldn't allocate nmi save area");
92-
/* The pointer is stored with mcesa_bits ORed in */
93-
kmemleak_not_leak((void *) origin);
94-
__ctl_store(cr0, 0, 0);
95-
__ctl_clear_bit(0, 28); /* disable lowcore protection */
96-
/* Replace boot_mcesa on the boot CPU */
97-
S390_lowcore.mcesad = __pa(origin) | mcesa_origin_lc;
98-
__ctl_load(cr0, 0, 0);
99-
return 0;
10089
}
101-
early_initcall(nmi_init);
10290

103-
int nmi_alloc_per_cpu(struct lowcore *lc)
91+
int __ref nmi_alloc_mcesa(u64 *mcesad)
10492
{
10593
unsigned long origin;
10694

95+
*mcesad = 0;
10796
if (!nmi_needs_mcesa())
10897
return 0;
98+
if (!mcesa_cache)
99+
nmi_alloc_cache();
109100
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
110101
if (!origin)
111102
return -ENOMEM;
112103
/* The pointer is stored with mcesa_bits ORed in */
113104
kmemleak_not_leak((void *) origin);
114-
lc->mcesad = __pa(origin) | mcesa_origin_lc;
105+
*mcesad = __pa(origin) | mcesa_origin_lc;
115106
return 0;
116107
}
117108

118-
void nmi_free_per_cpu(struct lowcore *lc)
109+
void nmi_free_mcesa(u64 *mcesad)
119110
{
120111
if (!nmi_needs_mcesa())
121112
return;
122-
kmem_cache_free(mcesa_cache, __va(lc->mcesad & MCESA_ORIGIN_MASK));
113+
kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK));
123114
}
124115

125116
static notrace void s390_handle_damage(void)

arch/s390/kernel/setup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ static void __init setup_lowcore_dat_off(void)
445445
lc->lpp = LPP_MAGIC;
446446
lc->machine_flags = S390_lowcore.machine_flags;
447447
lc->preempt_count = S390_lowcore.preempt_count;
448-
nmi_alloc_boot_cpu(lc);
448+
nmi_alloc_mcesa_early(&lc->mcesad);
449449
lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
450450
lc->exit_timer = S390_lowcore.exit_timer;
451451
lc->user_timer = S390_lowcore.user_timer;

arch/s390/kernel/smp.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
212212
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
213213
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
214214
lc->preempt_count = PREEMPT_DISABLED;
215-
if (nmi_alloc_per_cpu(lc))
215+
if (nmi_alloc_mcesa(&lc->mcesad))
216216
goto out;
217217
lowcore_ptr[cpu] = lc;
218218
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
@@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
239239
mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
240240
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
241241
lowcore_ptr[cpu] = NULL;
242-
nmi_free_per_cpu(lc);
242+
nmi_free_mcesa(&lc->mcesad);
243243
stack_free(async_stack);
244244
stack_free(mcck_stack);
245245
free_pages(nodat_stack, THREAD_SIZE_ORDER);
@@ -1271,14 +1271,15 @@ static int __init smp_reinit_ipl_cpu(void)
12711271
{
12721272
unsigned long async_stack, nodat_stack, mcck_stack;
12731273
struct lowcore *lc, *lc_ipl;
1274-
unsigned long flags;
1274+
unsigned long flags, cr0;
1275+
u64 mcesad;
12751276

12761277
lc_ipl = lowcore_ptr[0];
12771278
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
12781279
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
12791280
async_stack = stack_alloc();
12801281
mcck_stack = stack_alloc();
1281-
if (!lc || !nodat_stack || !async_stack || !mcck_stack)
1282+
if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
12821283
panic("Couldn't allocate memory");
12831284

12841285
local_irq_save(flags);
@@ -1287,6 +1288,10 @@ static int __init smp_reinit_ipl_cpu(void)
12871288
S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
12881289
S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
12891290
S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
1291+
__ctl_store(cr0, 0, 0);
1292+
__ctl_clear_bit(0, 28); /* disable lowcore protection */
1293+
S390_lowcore.mcesad = mcesad;
1294+
__ctl_load(cr0, 0, 0);
12901295
lowcore_ptr[0] = lc;
12911296
local_mcck_enable();
12921297
local_irq_restore(flags);

0 commit comments

Comments
 (0)