|
12 | 12 | #include <asm/cpufeatures.h> |
13 | 13 | #include <asm/page.h> |
14 | 14 | #include <asm/percpu.h> |
| 15 | +#include <asm/runtime-const.h> |
| 16 | + |
| 17 | +/* |
| 18 | + * Virtual variable: there's no actual backing store for this, |
| 19 | + * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' |
| 20 | + */ |
| 21 | +extern unsigned long USER_PTR_MAX; |
15 | 22 |
|
16 | 23 | #ifdef CONFIG_ADDRESS_MASKING |
17 | 24 | /* |
@@ -46,43 +53,41 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, |
46 | 53 |
|
47 | 54 | #endif |
48 | 55 |
|
49 | | -/* |
50 | | - * The virtual address space space is logically divided into a kernel |
51 | | - * half and a user half. When cast to a signed type, user pointers |
52 | | - * are positive and kernel pointers are negative. |
53 | | - */ |
54 | | -#define valid_user_address(x) ((__force long)(x) >= 0) |
| 56 | +#define valid_user_address(x) \ |
| 57 | + ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) |
55 | 58 |
|
56 | 59 | /* |
57 | 60 | * Masking the user address is an alternative to a conditional |
58 | 61 | * user_access_begin that can avoid the fencing. This only works |
59 | 62 | * for dense accesses starting at the address. |
60 | 63 | */ |
61 | | -#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63))) |
| 64 | +static inline void __user *mask_user_address(const void __user *ptr) |
| 65 | +{ |
| 66 | + unsigned long mask; |
| 67 | + asm("cmp %1,%0\n\t" |
| 68 | + "sbb %0,%0" |
| 69 | + :"=r" (mask) |
| 70 | + :"r" (ptr), |
| 71 | + "0" (runtime_const_ptr(USER_PTR_MAX))); |
| 72 | + return (__force void __user *)(mask | (__force unsigned long)ptr); |
| 73 | +} |
62 | 74 | #define masked_user_access_begin(x) ({ __uaccess_begin(); mask_user_address(x); }) |
63 | 75 |
|
64 | 76 | /* |
65 | 77 | * User pointers can have tag bits on x86-64. This scheme tolerates |
66 | 78 | * arbitrary values in those bits rather then masking them off. |
67 | 79 | * |
68 | 80 | * Enforce two rules: |
69 | | - * 1. 'ptr' must be in the user half of the address space |
| 81 | + * 1. 'ptr' must be in the user part of the address space |
70 | 82 | * 2. 'ptr+size' must not overflow into kernel addresses |
71 | 83 | * |
72 | | - * Note that addresses around the sign change are not valid addresses, |
73 | | - * and will GP-fault even with LAM enabled if the sign bit is set (see |
74 | | - * "CR3.LAM_SUP" that can narrow the canonicality check if we ever |
75 | | - * enable it, but not remove it entirely). |
76 | | - * |
77 | | - * So the "overflow into kernel addresses" does not imply some sudden |
78 | | - * exact boundary at the sign bit, and we can allow a lot of slop on the |
79 | | - * size check. |
| 84 | + * Note that we always have at least one guard page between the |
| 85 | + * max user address and the non-canonical gap, allowing us to |
| 86 | + * ignore small sizes entirely. |
80 | 87 | * |
81 | 88 | * In fact, we could probably remove the size check entirely, since |
82 | 89 | * any kernel accesses will be in increasing address order starting |
83 | | - * at 'ptr', and even if the end might be in kernel space, we'll |
84 | | - * hit the GP faults for non-canonical accesses before we ever get |
85 | | - * there. |
| 90 | + * at 'ptr'. |
86 | 91 | * |
87 | 92 | * That's a separate optimization, for now just handle the small |
88 | 93 | * constant case. |
|
0 commit comments