Skip to content

Commit 1dce506

Browse files
committed
Merge tag 'core-uaccess-2025-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scoped user access updates from Thomas Gleixner: "Scoped user mode access and related changes: - Implement the missing u64 user access function on ARM when CONFIG_CPU_SPECTRE=n. This makes it possible to access a 64bit value in generic code with [unsafe_]get_user(). All other architectures and ARM variants provide the relevant accessors already. - Ensure that ASM GOTO jump label usage in the user mode access helpers always goes through a local C scope label indirection inside the helpers. This is required because compilers are not supporting that a ASM GOTO target leaves a auto cleanup scope. GCC silently fails to emit the cleanup invocation and CLANG fails the build. [ Editor's note: gcc-16 will have fixed the code generation issue in commit f68fe3ddda4 ("eh: Invoke cleanups/destructors in asm goto jumps [PR122835]"). But we obviously have to deal with clang and older versions of gcc, so.. - Linus ] This provides generic wrapper macros and the conversion of affected architecture code to use them. - Scoped user mode access with auto cleanup Access to user mode memory can be required in hot code paths, but if it has to be done with user controlled pointers, the access is shielded with a speculation barrier, so that the CPU cannot speculate around the address range check. Those speculation barriers impact performance quite significantly. This cost can be avoided by "masking" the provided pointer so it is guaranteed to be in the valid user memory access range and otherwise to point to a guaranteed unpopulated address space. This has to be done without branches so it creates an address dependency for the access, which the CPU cannot speculate ahead. This results in repeating and error prone programming patterns: if (can_do_masked_user_access()) from = masked_user_read_access_begin((from)); else if (!user_read_access_begin(from, sizeof(*from))) return -EFAULT; unsafe_get_user(val, from, Efault); user_read_access_end(); return 0; Efault: user_read_access_end(); return -EFAULT; which can be replaced with scopes and automatic cleanup: scoped_user_read_access(from, Efault) unsafe_get_user(val, from, Efault); return 0; Efault: return -EFAULT; - Convert code which implements the above pattern over to scope_user.*.access(). This also corrects a couple of imbalanced masked_*_begin() instances which are harmless on most architectures, but prevent PowerPC from implementing the masking optimization. - Add a missing speculation barrier in copy_from_user_iter()" * tag 'core-uaccess-2025-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lib/strn*,uaccess: Use masked_user_{read/write}_access_begin when required scm: Convert put_cmsg() to scoped user access iov_iter: Add missing speculation barrier to copy_from_user_iter() iov_iter: Convert copy_from_user_iter() to masked user access select: Convert to scoped user access x86/futex: Convert to scoped user access futex: Convert to get/put_user_inline() uaccess: Provide put/get_user_inline() uaccess: Provide scoped user access regions arm64: uaccess: Use unsafe wrappers for ASM GOTO s390/uaccess: Use unsafe wrappers for ASM GOTO riscv/uaccess: Use unsafe wrappers for ASM GOTO powerpc/uaccess: Use unsafe wrappers for ASM GOTO x86/uaccess: Use unsafe wrappers for ASM GOTO uaccess: Provide ASM GOTO safe wrappers for unsafe_*_user() ARM: uaccess: Implement missing __get_user_asm_dword()
2 parents 4a26e70 + 4322c8f commit 1dce506

15 files changed

Lines changed: 421 additions & 150 deletions

File tree

arch/arm/include/asm/uaccess.h

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,10 +283,17 @@ extern int __put_user_8(void *, unsigned long long);
283283
__gu_err; \
284284
})
285285

286+
/*
287+
* This is a type: either unsigned long, if the argument fits into
288+
* that type, or otherwise unsigned long long.
289+
*/
290+
#define __long_type(x) \
291+
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
292+
286293
#define __get_user_err(x, ptr, err, __t) \
287294
do { \
288295
unsigned long __gu_addr = (unsigned long)(ptr); \
289-
unsigned long __gu_val; \
296+
__long_type(x) __gu_val; \
290297
unsigned int __ua_flags; \
291298
__chk_user_ptr(ptr); \
292299
might_fault(); \
@@ -295,6 +302,7 @@ do { \
295302
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
296303
case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
297304
case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
305+
case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \
298306
default: (__gu_val) = __get_user_bad(); \
299307
} \
300308
uaccess_restore(__ua_flags); \
@@ -353,6 +361,22 @@ do { \
353361
#define __get_user_asm_word(x, addr, err, __t) \
354362
__get_user_asm(x, addr, err, "ldr" __t)
355363

364+
#ifdef __ARMEB__
365+
#define __WORD0_OFFS 4
366+
#define __WORD1_OFFS 0
367+
#else
368+
#define __WORD0_OFFS 0
369+
#define __WORD1_OFFS 4
370+
#endif
371+
372+
#define __get_user_asm_dword(x, addr, err, __t) \
373+
({ \
374+
unsigned long __w0, __w1; \
375+
__get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \
376+
__get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \
377+
(x) = ((u64)__w1 << 32) | (u64) __w0; \
378+
})
379+
356380
#define __put_user_switch(x, ptr, __err, __fn) \
357381
do { \
358382
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \

arch/arm64/include/asm/uaccess.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -422,9 +422,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
422422
}
423423
#define user_access_begin(a,b) user_access_begin(a,b)
424424
#define user_access_end() uaccess_ttbr0_disable()
425-
#define unsafe_put_user(x, ptr, label) \
425+
#define arch_unsafe_put_user(x, ptr, label) \
426426
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
427-
#define unsafe_get_user(x, ptr, label) \
427+
#define arch_unsafe_get_user(x, ptr, label) \
428428
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
429429

430430
/*

arch/powerpc/include/asm/uaccess.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -451,15 +451,15 @@ user_write_access_begin(const void __user *ptr, size_t len)
451451
#define user_write_access_begin user_write_access_begin
452452
#define user_write_access_end prevent_current_write_to_user
453453

454-
#define unsafe_get_user(x, p, e) do { \
454+
#define arch_unsafe_get_user(x, p, e) do { \
455455
__long_type(*(p)) __gu_val; \
456456
__typeof__(*(p)) __user *__gu_addr = (p); \
457457
\
458458
__get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
459459
(x) = (__typeof__(*(p)))__gu_val; \
460460
} while (0)
461461

462-
#define unsafe_put_user(x, p, e) \
462+
#define arch_unsafe_put_user(x, p, e) \
463463
__put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
464464

465465
#define unsafe_copy_from_user(d, s, l, e) \
@@ -504,11 +504,11 @@ do { \
504504
unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
505505
} while (0)
506506

507-
#define __get_kernel_nofault(dst, src, type, err_label) \
507+
#define arch_get_kernel_nofault(dst, src, type, err_label) \
508508
__get_user_size_goto(*((type *)(dst)), \
509509
(__force type __user *)(src), sizeof(type), err_label)
510510

511-
#define __put_kernel_nofault(dst, src, type, err_label) \
511+
#define arch_put_kernel_nofault(dst, src, type, err_label) \
512512
__put_user_size_goto(*((type *)(src)), \
513513
(__force type __user *)(dst), sizeof(type), err_label)
514514

arch/riscv/include/asm/uaccess.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -437,10 +437,10 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
437437
__clear_user(untagged_addr(to), n) : n;
438438
}
439439

440-
#define __get_kernel_nofault(dst, src, type, err_label) \
440+
#define arch_get_kernel_nofault(dst, src, type, err_label) \
441441
__get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
442442

443-
#define __put_kernel_nofault(dst, src, type, err_label) \
443+
#define arch_put_kernel_nofault(dst, src, type, err_label) \
444444
__put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
445445

446446
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
@@ -460,10 +460,10 @@ static inline void user_access_restore(unsigned long enabled) { }
460460
* We want the unsafe accessors to always be inlined and use
461461
* the error labels - thus the macro games.
462462
*/
463-
#define unsafe_put_user(x, ptr, label) \
463+
#define arch_unsafe_put_user(x, ptr, label) \
464464
__put_user_nocheck(x, (ptr), label)
465465

466-
#define unsafe_get_user(x, ptr, label) do { \
466+
#define arch_unsafe_get_user(x, ptr, label) do { \
467467
__inttype(*(ptr)) __gu_val; \
468468
__get_user_nocheck(__gu_val, (ptr), label); \
469469
(x) = (__force __typeof__(*(ptr)))__gu_val; \

arch/s390/include/asm/uaccess.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -468,8 +468,8 @@ do { \
468468

469469
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
470470

471-
#define __get_kernel_nofault __mvc_kernel_nofault
472-
#define __put_kernel_nofault __mvc_kernel_nofault
471+
#define arch_get_kernel_nofault __mvc_kernel_nofault
472+
#define arch_put_kernel_nofault __mvc_kernel_nofault
473473

474474
void __cmpxchg_user_key_called_with_bad_pointer(void);
475475

arch/x86/include/asm/futex.h

Lines changed: 33 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -46,38 +46,31 @@ do { \
4646
} while(0)
4747

4848
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
49-
u32 __user *uaddr)
49+
u32 __user *uaddr)
5050
{
51-
if (can_do_masked_user_access())
52-
uaddr = masked_user_access_begin(uaddr);
53-
else if (!user_access_begin(uaddr, sizeof(u32)))
54-
return -EFAULT;
55-
56-
switch (op) {
57-
case FUTEX_OP_SET:
58-
unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
59-
break;
60-
case FUTEX_OP_ADD:
61-
unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
62-
uaddr, oparg, Efault);
63-
break;
64-
case FUTEX_OP_OR:
65-
unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
66-
break;
67-
case FUTEX_OP_ANDN:
68-
unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
69-
break;
70-
case FUTEX_OP_XOR:
71-
unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
72-
break;
73-
default:
74-
user_access_end();
75-
return -ENOSYS;
51+
scoped_user_rw_access(uaddr, Efault) {
52+
switch (op) {
53+
case FUTEX_OP_SET:
54+
unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
55+
break;
56+
case FUTEX_OP_ADD:
57+
unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault);
58+
break;
59+
case FUTEX_OP_OR:
60+
unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
61+
break;
62+
case FUTEX_OP_ANDN:
63+
unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
64+
break;
65+
case FUTEX_OP_XOR:
66+
unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
67+
break;
68+
default:
69+
return -ENOSYS;
70+
}
7671
}
77-
user_access_end();
7872
return 0;
7973
Efault:
80-
user_access_end();
8174
return -EFAULT;
8275
}
8376

@@ -86,21 +79,19 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8679
{
8780
int ret = 0;
8881

89-
if (can_do_masked_user_access())
90-
uaddr = masked_user_access_begin(uaddr);
91-
else if (!user_access_begin(uaddr, sizeof(u32)))
92-
return -EFAULT;
93-
asm volatile("\n"
94-
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
95-
"2:\n"
96-
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
97-
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
98-
: "r" (newval), "1" (oldval)
99-
: "memory"
100-
);
101-
user_access_end();
102-
*uval = oldval;
82+
scoped_user_rw_access(uaddr, Efault) {
83+
asm_inline volatile("\n"
84+
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
85+
"2:\n"
86+
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0)
87+
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
88+
: "r" (newval), "1" (oldval)
89+
: "memory");
90+
*uval = oldval;
91+
}
10392
return ret;
93+
Efault:
94+
return -EFAULT;
10495
}
10596

10697
#endif

arch/x86/include/asm/uaccess.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -528,18 +528,18 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
528528
#define user_access_save() smap_save()
529529
#define user_access_restore(x) smap_restore(x)
530530

531-
#define unsafe_put_user(x, ptr, label) \
531+
#define arch_unsafe_put_user(x, ptr, label) \
532532
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
533533

534534
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
535-
#define unsafe_get_user(x, ptr, err_label) \
535+
#define arch_unsafe_get_user(x, ptr, err_label) \
536536
do { \
537537
__inttype(*(ptr)) __gu_val; \
538538
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
539539
(x) = (__force __typeof__(*(ptr)))__gu_val; \
540540
} while (0)
541541
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
542-
#define unsafe_get_user(x, ptr, err_label) \
542+
#define arch_unsafe_get_user(x, ptr, err_label) \
543543
do { \
544544
int __gu_err; \
545545
__inttype(*(ptr)) __gu_val; \
@@ -618,11 +618,11 @@ do { \
618618
} while (0)
619619

620620
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
621-
#define __get_kernel_nofault(dst, src, type, err_label) \
621+
#define arch_get_kernel_nofault(dst, src, type, err_label) \
622622
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
623623
sizeof(type), err_label)
624624
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
625-
#define __get_kernel_nofault(dst, src, type, err_label) \
625+
#define arch_get_kernel_nofault(dst, src, type, err_label) \
626626
do { \
627627
int __kr_err; \
628628
\
@@ -633,7 +633,7 @@ do { \
633633
} while (0)
634634
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
635635

636-
#define __put_kernel_nofault(dst, src, type, err_label) \
636+
#define arch_put_kernel_nofault(dst, src, type, err_label) \
637637
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
638638
sizeof(type), err_label)
639639

fs/select.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -776,17 +776,13 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
776776
{
777777
// the path is hot enough for overhead of copy_from_user() to matter
778778
if (from) {
779-
if (can_do_masked_user_access())
780-
from = masked_user_access_begin(from);
781-
else if (!user_read_access_begin(from, sizeof(*from)))
782-
return -EFAULT;
783-
unsafe_get_user(to->p, &from->p, Efault);
784-
unsafe_get_user(to->size, &from->size, Efault);
785-
user_read_access_end();
779+
scoped_user_read_access(from, Efault) {
780+
unsafe_get_user(to->p, &from->p, Efault);
781+
unsafe_get_user(to->size, &from->size, Efault);
782+
}
786783
}
787784
return 0;
788785
Efault:
789-
user_read_access_end();
790786
return -EFAULT;
791787
}
792788

0 commit comments

Comments
 (0)