Skip to content

Commit 9f722cb

Browse files
committed
Merge tag 'alpha-for-v7.0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/lindholm/alpha
Pull alpha update from Magnus Lindholm: - alpha: fix user-space corruption during memory compaction * tag 'alpha-for-v7.0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/lindholm/alpha: alpha: fix user-space corruption during memory compaction
2 parents 7e3a1e0 + dd5712f commit 9f722cb

4 files changed

Lines changed: 148 additions & 3 deletions

File tree

arch/alpha/include/asm/pgtable.h

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <asm/processor.h> /* For TASK_SIZE */
1818
#include <asm/machvec.h>
1919
#include <asm/setup.h>
20+
#include <linux/page_table_check.h>
2021

2122
struct mm_struct;
2223
struct vm_area_struct;
@@ -183,6 +184,9 @@ extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
183184
{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
184185

185186

187+
extern void migrate_flush_tlb_page(struct vm_area_struct *vma,
188+
unsigned long addr);
189+
186190
extern inline unsigned long
187191
pmd_page_vaddr(pmd_t pmd)
188192
{
@@ -202,7 +206,7 @@ extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
202206
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
203207
extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
204208
{
205-
pte_val(*ptep) = 0;
209+
WRITE_ONCE(pte_val(*ptep), 0);
206210
}
207211

208212
extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
@@ -264,6 +268,33 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
264268

265269
extern pgd_t swapper_pg_dir[1024];
266270

271+
#ifdef CONFIG_COMPACTION
272+
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
273+
274+
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
275+
unsigned long address,
276+
pte_t *ptep)
277+
{
278+
pte_t pte = READ_ONCE(*ptep);
279+
280+
pte_clear(mm, address, ptep);
281+
return pte;
282+
}
283+
284+
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
285+
286+
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
287+
unsigned long addr, pte_t *ptep)
288+
{
289+
struct mm_struct *mm = vma->vm_mm;
290+
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
291+
292+
page_table_check_pte_clear(mm, pte);
293+
migrate_flush_tlb_page(vma, addr);
294+
return pte;
295+
}
296+
297+
#endif
267298
/*
268299
* The Alpha doesn't have any external MMU info: the kernel page
269300
* tables contain all the necessary information.

arch/alpha/include/asm/tlbflush.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,9 @@ flush_tlb_other(struct mm_struct *mm)
5858
unsigned long *mmc = &mm->context[smp_processor_id()];
5959
/* Check it's not zero first to avoid cacheline ping pong
6060
when possible. */
61-
if (*mmc) *mmc = 0;
61+
62+
if (READ_ONCE(*mmc))
63+
WRITE_ONCE(*mmc, 0);
6264
}
6365

6466
#ifndef CONFIG_SMP

arch/alpha/mm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
# Makefile for the linux alpha-specific parts of the memory manager.
44
#
55

6-
obj-y := init.o fault.o
6+
obj-y := init.o fault.o tlbflush.o

arch/alpha/mm/tlbflush.c

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Alpha TLB shootdown helpers
4+
*
5+
* Copyright (C) 2025 Magnus Lindholm <linmag7@gmail.com>
6+
*
7+
* Alpha-specific TLB flush helpers that cannot be expressed purely
8+
* as inline functions.
9+
*
10+
* These helpers provide combined MM context handling (ASN rollover)
11+
* and immediate TLB invalidation for page migration and memory
12+
* compaction paths, where lazy shootdowns are insufficient.
13+
*/
14+
15+
#include <linux/mm.h>
16+
#include <linux/smp.h>
17+
#include <linux/sched.h>
18+
#include <asm/tlbflush.h>
19+
#include <asm/pal.h>
20+
#include <asm/mmu_context.h>
21+
22+
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
23+
24+
/*
25+
* Migration/compaction helper: combine mm context (ASN) handling with an
26+
* immediate per-page TLB invalidate and (for exec) an instruction barrier.
27+
*
28+
* This mirrors the SMP combined IPI handler semantics, but runs locally on UP.
29+
*/
30+
#ifndef CONFIG_SMP
31+
void migrate_flush_tlb_page(struct vm_area_struct *vma,
32+
unsigned long addr)
33+
{
34+
struct mm_struct *mm = vma->vm_mm;
35+
int tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2;
36+
37+
/*
38+
* First do the mm-context side:
39+
* If we're currently running this mm, reload a fresh context ASN.
40+
* Otherwise, mark context invalid.
41+
*
42+
* On UP, this is mostly about matching the SMP semantics and ensuring
43+
* exec/i-cache tagging assumptions hold when compaction migrates pages.
44+
*/
45+
if (mm == current->active_mm)
46+
flush_tlb_current(mm);
47+
else
48+
flush_tlb_other(mm);
49+
50+
/*
51+
* Then do the immediate translation kill for this VA.
52+
* For exec mappings, order instruction fetch after invalidation.
53+
*/
54+
tbi(tbi_type, addr);
55+
}
56+
57+
#else
58+
struct tlb_mm_and_addr {
59+
struct mm_struct *mm;
60+
unsigned long addr;
61+
int tbi_type; /* 2 = DTB, 3 = ITB+DTB */
62+
};
63+
64+
static void ipi_flush_mm_and_page(void *x)
65+
{
66+
struct tlb_mm_and_addr *d = x;
67+
68+
/* Part 1: mm context side (Alpha uses ASN/context as a key mechanism). */
69+
if (d->mm == current->active_mm && !asn_locked())
70+
__load_new_mm_context(d->mm);
71+
else
72+
flush_tlb_other(d->mm);
73+
74+
/* Part 2: immediate per-VA invalidation on this CPU. */
75+
tbi(d->tbi_type, d->addr);
76+
}
77+
78+
void migrate_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
79+
{
80+
struct mm_struct *mm = vma->vm_mm;
81+
struct tlb_mm_and_addr d = {
82+
.mm = mm,
83+
.addr = addr,
84+
.tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2,
85+
};
86+
87+
/*
88+
* One synchronous rendezvous: every CPU runs ipi_flush_mm_and_page().
89+
* This is the "combined" version of flush_tlb_mm + per-page invalidate.
90+
*/
91+
preempt_disable();
92+
on_each_cpu(ipi_flush_mm_and_page, &d, 1);
93+
94+
/*
95+
* mimic flush_tlb_mm()'s mm_users<=1 optimization.
96+
*/
97+
if (atomic_read(&mm->mm_users) <= 1) {
98+
99+
int cpu, this_cpu;
100+
this_cpu = smp_processor_id();
101+
102+
for (cpu = 0; cpu < NR_CPUS; cpu++) {
103+
if (!cpu_online(cpu) || cpu == this_cpu)
104+
continue;
105+
if (READ_ONCE(mm->context[cpu]))
106+
WRITE_ONCE(mm->context[cpu], 0);
107+
}
108+
}
109+
preempt_enable();
110+
}
111+
112+
#endif

0 commit comments

Comments
 (0)