Skip to content

Commit 74c228d

Browse files
kirylhansendc
authored andcommitted
x86/uaccess: Provide untagged_addr() and remove tags before address check
untagged_addr() is a helper used by the core-mm to strip tag bits and get the address to the canonical shape based on rules of the current thread. It only handles userspace addresses. The untagging mask is stored in per-CPU variable and set on context switching to the task. The tags must not be included into check whether it's okay to access the userspace address. Strip tags in access_ok(). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Alexander Potapenko <glider@google.com> Link: https://lore.kernel.org/all/20230312112612.31869-7-kirill.shutemov%40linux.intel.com
1 parent 428e106 commit 74c228d

6 files changed

Lines changed: 69 additions & 2 deletions

File tree

arch/x86/include/asm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,9 @@ typedef struct {
4545
#ifdef CONFIG_ADDRESS_MASKING
4646
/* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
4747
unsigned long lam_cr3_mask;
48+
49+
/* Significant bits of the virtual address. Excludes tag bits. */
50+
u64 untag_mask;
4851
#endif
4952

5053
struct mutex lock;

arch/x86/include/asm/mmu_context.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,12 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
101101
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
102102
{
103103
mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
104+
mm->context.untag_mask = oldmm->context.untag_mask;
105+
}
106+
107+
static inline void mm_reset_untag_mask(struct mm_struct *mm)
108+
{
109+
mm->context.untag_mask = -1UL;
104110
}
105111

106112
#else
@@ -113,6 +119,10 @@ static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
113119
static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
114120
{
115121
}
122+
123+
static inline void mm_reset_untag_mask(struct mm_struct *mm)
124+
{
125+
}
116126
#endif
117127

118128
#define enter_lazy_tlb enter_lazy_tlb
@@ -139,6 +149,7 @@ static inline int init_new_context(struct task_struct *tsk,
139149
mm->context.execute_only_pkey = -1;
140150
}
141151
#endif
152+
mm_reset_untag_mask(mm);
142153
init_new_context_ldt(mm);
143154
return 0;
144155
}

arch/x86/include/asm/tlbflush.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,15 @@ static inline void cr4_clear_bits(unsigned long mask)
5454
local_irq_restore(flags);
5555
}
5656

57+
#ifdef CONFIG_ADDRESS_MASKING
58+
DECLARE_PER_CPU(u64, tlbstate_untag_mask);
59+
60+
static inline u64 current_untag_mask(void)
61+
{
62+
return this_cpu_read(tlbstate_untag_mask);
63+
}
64+
#endif
65+
5766
#ifndef MODULE
5867
/*
5968
* 6 because 6 should be plenty and struct tlb_state will fit in two cache
@@ -380,6 +389,7 @@ static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
380389
{
381390
this_cpu_write(cpu_tlbstate.lam,
382391
mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
392+
this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
383393
}
384394

385395
#else

arch/x86/include/asm/uaccess.h

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@
77
#include <linux/compiler.h>
88
#include <linux/instrumented.h>
99
#include <linux/kasan-checks.h>
10+
#include <linux/mm_types.h>
1011
#include <linux/string.h>
1112
#include <asm/asm.h>
1213
#include <asm/page.h>
1314
#include <asm/smap.h>
1415
#include <asm/extable.h>
16+
#include <asm/tlbflush.h>
1517

1618
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1719
static inline bool pagefault_disabled(void);
@@ -21,6 +23,39 @@ static inline bool pagefault_disabled(void);
2123
# define WARN_ON_IN_IRQ()
2224
#endif
2325

26+
#ifdef CONFIG_ADDRESS_MASKING
27+
/*
28+
* Mask out tag bits from the address.
29+
*
30+
* Magic with the 'sign' allows to untag userspace pointer without any branches
31+
* while leaving kernel addresses intact.
32+
*/
33+
static inline unsigned long __untagged_addr(unsigned long addr,
34+
unsigned long mask)
35+
{
36+
long sign = addr >> 63;
37+
38+
addr &= mask | sign;
39+
return addr;
40+
}
41+
42+
#define untagged_addr(addr) ({ \
43+
u64 __addr = (__force u64)(addr); \
44+
__addr = __untagged_addr(__addr, current_untag_mask()); \
45+
(__force __typeof__(addr))__addr; \
46+
})
47+
48+
#define untagged_addr_remote(mm, addr) ({ \
49+
u64 __addr = (__force u64)(addr); \
50+
mmap_assert_locked(mm); \
51+
__addr = __untagged_addr(__addr, (mm)->context.untag_mask); \
52+
(__force __typeof__(addr))__addr; \
53+
})
54+
55+
#else
56+
#define untagged_addr(addr) (addr)
57+
#endif
58+
2459
/**
2560
* access_ok - Checks if a user space pointer is valid
2661
* @addr: User space pointer to start of block to check
@@ -38,10 +73,10 @@ static inline bool pagefault_disabled(void);
3873
* Return: true (nonzero) if the memory block may be valid, false (zero)
3974
* if it is definitely invalid.
4075
*/
41-
#define access_ok(addr, size) \
76+
#define access_ok(addr, size) \
4277
({ \
4378
WARN_ON_IN_IRQ(); \
44-
likely(__access_ok(addr, size)); \
79+
likely(__access_ok(untagged_addr(addr), size)); \
4580
})
4681

4782
#include <asm-generic/access_ok.h>

arch/x86/kernel/process.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
#include <asm/frame.h>
4949
#include <asm/unwind.h>
5050
#include <asm/tdx.h>
51+
#include <asm/mmu_context.h>
5152

5253
#include "process.h"
5354

@@ -368,6 +369,8 @@ void arch_setup_new_exec(void)
368369
task_clear_spec_ssb_noexec(current);
369370
speculation_ctrl_update(read_thread_flags());
370371
}
372+
373+
mm_reset_untag_mask(current->mm);
371374
}
372375

373376
#ifdef CONFIG_X86_IOPL_IOPERM

arch/x86/mm/init.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,6 +1048,11 @@ __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
10481048
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
10491049
};
10501050

1051+
#ifdef CONFIG_ADDRESS_MASKING
1052+
DEFINE_PER_CPU(u64, tlbstate_untag_mask);
1053+
EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask);
1054+
#endif
1055+
10511056
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
10521057
{
10531058
/* entry 0 MUST be WB (hardwired to speed up translations) */

0 commit comments

Comments
 (0)