Skip to content

Commit b9bd9f6

Browse files
committed
x86: uaccess: move 32-bit and 64-bit parts into proper <asm/uaccess_N.h> header
The x86 <asm/uaccess.h> file has grown features that are specific to x86-64 like LAM support and the related access_ok() changes. They really should be in the <asm/uaccess_64.h> file and not pollute the generic x86 header. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 6ccdc91 commit b9bd9f6

3 files changed

Lines changed: 82 additions & 85 deletions

File tree

arch/x86/include/asm/uaccess.h

Lines changed: 3 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -16,83 +16,10 @@
1616
#include <asm/extable.h>
1717
#include <asm/tlbflush.h>
1818

19-
#ifdef CONFIG_ADDRESS_MASKING
20-
/*
21-
* Mask out tag bits from the address.
22-
*
23-
* Magic with the 'sign' allows to untag userspace pointer without any branches
24-
* while leaving kernel addresses intact.
25-
*/
26-
static inline unsigned long __untagged_addr(unsigned long addr)
27-
{
28-
long sign;
29-
30-
/*
31-
* Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
32-
* in alternative instructions. The relocation gets wrong when gets
33-
* copied to the target place.
34-
*/
35-
asm (ALTERNATIVE("",
36-
"sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
37-
"or %%gs:tlbstate_untag_mask, %[sign]\n\t"
38-
"and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
39-
: [addr] "+r" (addr), [sign] "=r" (sign)
40-
: "m" (tlbstate_untag_mask), "[sign]" (addr));
41-
42-
return addr;
43-
}
44-
45-
#define untagged_addr(addr) ({ \
46-
unsigned long __addr = (__force unsigned long)(addr); \
47-
(__force __typeof__(addr))__untagged_addr(__addr); \
48-
})
49-
50-
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
51-
unsigned long addr)
52-
{
53-
long sign = addr >> 63;
54-
55-
mmap_assert_locked(mm);
56-
addr &= (mm)->context.untag_mask | sign;
57-
58-
return addr;
59-
}
60-
61-
#define untagged_addr_remote(mm, addr) ({ \
62-
unsigned long __addr = (__force unsigned long)(addr); \
63-
(__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
64-
})
65-
19+
#ifdef CONFIG_X86_32
20+
# include <asm/uaccess_32.h>
6621
#else
67-
#define untagged_addr(addr) (addr)
68-
#endif
69-
70-
#ifdef CONFIG_X86_64
71-
/*
72-
* On x86-64, we may have tag bits in the user pointer. Rather than
73-
* mask them off, just change the rules for __access_ok().
74-
*
75-
* Make the rule be that 'ptr+size' must not overflow, and must not
76-
* have the high bit set. Compilers generally understand about
77-
* unsigned overflow and the CF bit and generate reasonable code for
78-
* this. Although it looks like the combination confuses at least
79-
* clang (and instead of just doing an "add" followed by a test of
80-
* SF and CF, you'll see that unnecessary comparison).
81-
*
82-
* For the common case of small sizes that can be checked at compile
83-
* time, don't even bother with the addition, and just check that the
84-
* base pointer is ok.
85-
*/
86-
static inline bool __access_ok(const void __user *ptr, unsigned long size)
87-
{
88-
if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
89-
return (long)ptr >= 0;
90-
} else {
91-
unsigned long sum = size + (unsigned long)ptr;
92-
return (long) sum >= 0 && sum >= (unsigned long)ptr;
93-
}
94-
}
95-
#define __access_ok __access_ok
22+
# include <asm/uaccess_64.h>
9623
#endif
9724

9825
#include <asm-generic/access_ok.h>
@@ -583,14 +510,6 @@ extern struct movsl_mask {
583510

584511
#define ARCH_HAS_NOCACHE_UACCESS 1
585512

586-
#ifdef CONFIG_X86_32
587-
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
588-
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
589-
# include <asm/uaccess_32.h>
590-
#else
591-
# include <asm/uaccess_64.h>
592-
#endif
593-
594513
/*
595514
* The "unsafe" user accesses aren't really "unsafe", but the naming
596515
* is a big fat warning: you have to not only do the access_ok()

arch/x86/include/asm/uaccess_32.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,4 +33,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
3333
return __copy_from_user_ll_nocache_nozero(to, from, n);
3434
}
3535

36+
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
37+
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
38+
3639
#endif /* _ASM_X86_UACCESS_32_H */

arch/x86/include/asm/uaccess_64.h

Lines changed: 76 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,81 @@
1212
#include <asm/cpufeatures.h>
1313
#include <asm/page.h>
1414

15+
#ifdef CONFIG_ADDRESS_MASKING
16+
/*
17+
* Mask out tag bits from the address.
18+
*
19+
* Magic with the 'sign' allows to untag userspace pointer without any branches
20+
* while leaving kernel addresses intact.
21+
*/
22+
static inline unsigned long __untagged_addr(unsigned long addr)
23+
{
24+
long sign;
25+
26+
/*
27+
* Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
28+
* in alternative instructions. The relocation gets wrong when gets
29+
* copied to the target place.
30+
*/
31+
asm (ALTERNATIVE("",
32+
"sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
33+
"or %%gs:tlbstate_untag_mask, %[sign]\n\t"
34+
"and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
35+
: [addr] "+r" (addr), [sign] "=r" (sign)
36+
: "m" (tlbstate_untag_mask), "[sign]" (addr));
37+
38+
return addr;
39+
}
40+
41+
#define untagged_addr(addr) ({ \
42+
unsigned long __addr = (__force unsigned long)(addr); \
43+
(__force __typeof__(addr))__untagged_addr(__addr); \
44+
})
45+
46+
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
47+
unsigned long addr)
48+
{
49+
long sign = addr >> 63;
50+
51+
mmap_assert_locked(mm);
52+
addr &= (mm)->context.untag_mask | sign;
53+
54+
return addr;
55+
}
56+
57+
#define untagged_addr_remote(mm, addr) ({ \
58+
unsigned long __addr = (__force unsigned long)(addr); \
59+
(__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
60+
})
61+
62+
#endif
63+
64+
/*
65+
* On x86-64, we may have tag bits in the user pointer. Rather than
66+
* mask them off, just change the rules for __access_ok().
67+
*
68+
* Make the rule be that 'ptr+size' must not overflow, and must not
69+
* have the high bit set. Compilers generally understand about
70+
* unsigned overflow and the CF bit and generate reasonable code for
71+
* this. Although it looks like the combination confuses at least
72+
* clang (and instead of just doing an "add" followed by a test of
73+
* SF and CF, you'll see that unnecessary comparison).
74+
*
75+
* For the common case of small sizes that can be checked at compile
76+
* time, don't even bother with the addition, and just check that the
77+
* base pointer is ok.
78+
*/
79+
static inline bool __access_ok(const void __user *ptr, unsigned long size)
80+
{
81+
if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
82+
return (long)ptr >= 0;
83+
} else {
84+
unsigned long sum = size + (unsigned long)ptr;
85+
return (long) sum >= 0 && sum >= (unsigned long)ptr;
86+
}
87+
}
88+
#define __access_ok __access_ok
89+
1590
/*
1691
* Copy To/From Userspace
1792
*/
@@ -106,7 +181,7 @@ static __always_inline __must_check unsigned long __clear_user(void __user *addr
106181

107182
static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
108183
{
109-
if (access_ok(to, n))
184+
if (__access_ok(to, n))
110185
return __clear_user(to, n);
111186
return n;
112187
}

0 commit comments

Comments
 (0)