Skip to content

Commit 22b8cc3

Browse files
committed
Merge tag 'x86_mm_for_6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 LAM (Linear Address Masking) support from Dave Hansen: "Add support for the new Linear Address Masking CPU feature. This is similar to ARM's Top Byte Ignore and allows userspace to store metadata in some bits of pointers without masking it out before use" * tag 'x86_mm_for_6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/iommu/sva: Do not allow to set FORCE_TAGGED_SVA bit from outside x86/mm/iommu/sva: Fix error code for LAM enabling failure due to SVA selftests/x86/lam: Add test cases for LAM vs thread creation selftests/x86/lam: Add ARCH_FORCE_TAGGED_SVA test cases for linear-address masking selftests/x86/lam: Add inherit test cases for linear-address masking selftests/x86/lam: Add io_uring test cases for linear-address masking selftests/x86/lam: Add mmap and SYSCALL test cases for linear-address masking selftests/x86/lam: Add malloc and tag-bits test cases for linear-address masking x86/mm/iommu/sva: Make LAM and SVA mutually exclusive iommu/sva: Replace pasid_valid() helper with mm_valid_pasid() mm: Expose untagging mask in /proc/$PID/status x86/mm: Provide arch_prctl() interface for LAM x86/mm: Reduce untagged_addr() overhead for systems without LAM x86/uaccess: Provide untagged_addr() and remove tags before address check mm: Introduce untagged_addr_remote() x86/mm: Handle LAM on context switch x86: CPUID and CR3/CR4 flags for Linear Address Masking x86: Allow atomic MM_CONTEXT flags setting x86/mm: Rework address range check in get_user() and put_user()
2 parents 7b664cc + 9774026 commit 22b8cc3

35 files changed

Lines changed: 1701 additions & 149 deletions

File tree

arch/arm64/include/asm/mmu_context.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,12 @@ void post_ttbr_update_workaround(void);
288288
unsigned long arm64_mm_context_get(struct mm_struct *mm);
289289
void arm64_mm_context_put(struct mm_struct *mm);
290290

291+
#define mm_untag_mask mm_untag_mask
292+
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
293+
{
294+
return -1UL >> 8;
295+
}
296+
291297
#include <asm-generic/mmu_context.h>
292298

293299
#endif /* !__ASSEMBLY__ */

arch/sparc/include/asm/mmu_context_64.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,12 @@ static inline void finish_arch_post_lock_switch(void)
185185
}
186186
}
187187

188+
#define mm_untag_mask mm_untag_mask
189+
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
190+
{
191+
return -1UL >> adi_nbits();
192+
}
193+
188194
#include <asm-generic/mmu_context.h>
189195

190196
#endif /* !(__ASSEMBLY__) */

arch/sparc/include/asm/uaccess_64.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,10 @@
88

99
#include <linux/compiler.h>
1010
#include <linux/string.h>
11+
#include <linux/mm_types.h>
1112
#include <asm/asi.h>
1213
#include <asm/spitfire.h>
14+
#include <asm/pgtable.h>
1315

1416
#include <asm/processor.h>
1517
#include <asm-generic/access_ok.h>

arch/x86/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2290,6 +2290,17 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
22902290

22912291
If unsure, leave at the default value.
22922292

2293+
config ADDRESS_MASKING
2294+
bool "Linear Address Masking support"
2295+
depends on X86_64
2296+
help
2297+
Linear Address Masking (LAM) modifies the checking that is applied
2298+
to 64-bit linear addresses, allowing software to use of the
2299+
untranslated address bits for metadata.
2300+
2301+
The capability can be used for efficient address sanitizers (ASAN)
2302+
implementation and for optimizations in JITs.
2303+
22932304
config HOTPLUG_CPU
22942305
def_bool y
22952306
depends on SMP

arch/x86/entry/vsyscall/vsyscall_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ static struct vm_area_struct gate_vma __ro_after_init = {
317317
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
318318
{
319319
#ifdef CONFIG_COMPAT
320-
if (!mm || !(mm->context.flags & MM_CONTEXT_HAS_VSYSCALL))
320+
if (!mm || !test_bit(MM_CONTEXT_HAS_VSYSCALL, &mm->context.flags))
321321
return NULL;
322322
#endif
323323
if (vsyscall_mode == NONE)

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,7 @@
321321
#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
322322
#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
323323
#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
324+
#define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */
324325

325326
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
326327
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */

arch/x86/include/asm/disabled-features.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,12 @@
7575
# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31))
7676
#endif
7777

78+
#ifdef CONFIG_ADDRESS_MASKING
79+
# define DISABLE_LAM 0
80+
#else
81+
# define DISABLE_LAM (1 << (X86_FEATURE_LAM & 31))
82+
#endif
83+
7884
#ifdef CONFIG_INTEL_IOMMU_SVM
7985
# define DISABLE_ENQCMD 0
8086
#else
@@ -115,7 +121,7 @@
115121
#define DISABLED_MASK10 0
116122
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
117123
DISABLE_CALL_DEPTH_TRACKING)
118-
#define DISABLED_MASK12 0
124+
#define DISABLED_MASK12 (DISABLE_LAM)
119125
#define DISABLED_MASK13 0
120126
#define DISABLED_MASK14 0
121127
#define DISABLED_MASK15 0

arch/x86/include/asm/mmu.h

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,13 @@
99
#include <linux/bits.h>
1010

1111
/* Uprobes on this MM assume 32-bit code */
12-
#define MM_CONTEXT_UPROBE_IA32 BIT(0)
12+
#define MM_CONTEXT_UPROBE_IA32 0
1313
/* vsyscall page is accessible on this MM */
14-
#define MM_CONTEXT_HAS_VSYSCALL BIT(1)
14+
#define MM_CONTEXT_HAS_VSYSCALL 1
15+
/* Do not allow changing LAM mode */
16+
#define MM_CONTEXT_LOCK_LAM 2
17+
/* Allow LAM and SVA coexisting */
18+
#define MM_CONTEXT_FORCE_TAGGED_SVA 3
1519

1620
/*
1721
* x86 has arch-specific MMU state beyond what lives in mm_struct.
@@ -39,7 +43,15 @@ typedef struct {
3943
#endif
4044

4145
#ifdef CONFIG_X86_64
42-
unsigned short flags;
46+
unsigned long flags;
47+
#endif
48+
49+
#ifdef CONFIG_ADDRESS_MASKING
50+
/* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
51+
unsigned long lam_cr3_mask;
52+
53+
/* Significant bits of the virtual address. Excludes tag bits. */
54+
u64 untag_mask;
4355
#endif
4456

4557
struct mutex lock;

arch/x86/include/asm/mmu_context.h

Lines changed: 48 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,51 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
8585
}
8686
#endif
8787

88+
#ifdef CONFIG_ADDRESS_MASKING
89+
static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
90+
{
91+
return mm->context.lam_cr3_mask;
92+
}
93+
94+
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
95+
{
96+
mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
97+
mm->context.untag_mask = oldmm->context.untag_mask;
98+
}
99+
100+
#define mm_untag_mask mm_untag_mask
101+
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
102+
{
103+
return mm->context.untag_mask;
104+
}
105+
106+
static inline void mm_reset_untag_mask(struct mm_struct *mm)
107+
{
108+
mm->context.untag_mask = -1UL;
109+
}
110+
111+
#define arch_pgtable_dma_compat arch_pgtable_dma_compat
112+
static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
113+
{
114+
return !mm_lam_cr3_mask(mm) ||
115+
test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags);
116+
}
117+
#else
118+
119+
static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
120+
{
121+
return 0;
122+
}
123+
124+
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
125+
{
126+
}
127+
128+
static inline void mm_reset_untag_mask(struct mm_struct *mm)
129+
{
130+
}
131+
#endif
132+
88133
#define enter_lazy_tlb enter_lazy_tlb
89134
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
90135

@@ -109,6 +154,7 @@ static inline int init_new_context(struct task_struct *tsk,
109154
mm->context.execute_only_pkey = -1;
110155
}
111156
#endif
157+
mm_reset_untag_mask(mm);
112158
init_new_context_ldt(mm);
113159
return 0;
114160
}
@@ -162,6 +208,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
162208
{
163209
arch_dup_pkeys(oldmm, mm);
164210
paravirt_enter_mmap(mm);
211+
dup_lam(oldmm, mm);
165212
return ldt_dup_context(oldmm, mm);
166213
}
167214

@@ -175,7 +222,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
175222
static inline bool is_64bit_mm(struct mm_struct *mm)
176223
{
177224
return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
178-
!(mm->context.flags & MM_CONTEXT_UPROBE_IA32);
225+
!test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags);
179226
}
180227
#else
181228
static inline bool is_64bit_mm(struct mm_struct *mm)

arch/x86/include/asm/processor-flags.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
* On systems with SME, one bit (in a variable position!) is stolen to indicate
2929
* that the top-level paging structure is encrypted.
3030
*
31+
* On systemms with LAM, bits 61 and 62 are used to indicate LAM mode.
32+
*
3133
* All of the remaining bits indicate the physical address of the top-level
3234
* paging structure.
3335
*

0 commit comments

Comments
 (0)