|
17 | 17 | #include <linux/compat.h> |
18 | 18 | #include <linux/sizes.h> |
19 | 19 | #include <linux/user.h> |
| 20 | +#include <linux/syscalls.h> |
20 | 21 | #include <asm/msr.h> |
21 | 22 | #include <asm/fpu/xstate.h> |
22 | 23 | #include <asm/fpu/types.h> |
@@ -71,19 +72,31 @@ static int create_rstor_token(unsigned long ssp, unsigned long *token_addr) |
71 | 72 | return 0; |
72 | 73 | } |
73 | 74 |
|
74 | | -static unsigned long alloc_shstk(unsigned long size) |
| 75 | +static unsigned long alloc_shstk(unsigned long addr, unsigned long size, |
| 76 | + unsigned long token_offset, bool set_res_tok) |
75 | 77 | { |
76 | 78 | int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_ABOVE4G; |
77 | 79 | struct mm_struct *mm = current->mm; |
78 | | - unsigned long addr, unused; |
| 80 | + unsigned long mapped_addr, unused; |
79 | 81 |
|
80 | | - mmap_write_lock(mm); |
81 | | - addr = do_mmap(NULL, 0, size, PROT_READ, flags, |
82 | | - VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL); |
| 82 | + if (addr) |
| 83 | + flags |= MAP_FIXED_NOREPLACE; |
83 | 84 |
|
| 85 | + mmap_write_lock(mm); |
| 86 | + mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags, |
| 87 | + VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL); |
84 | 88 | mmap_write_unlock(mm); |
85 | 89 |
|
86 | | - return addr; |
| 90 | + if (!set_res_tok || IS_ERR_VALUE(mapped_addr)) |
| 91 | + goto out; |
| 92 | + |
| 93 | + if (create_rstor_token(mapped_addr + token_offset, NULL)) { |
| 94 | + vm_munmap(mapped_addr, size); |
| 95 | + return -EINVAL; |
| 96 | + } |
| 97 | + |
| 98 | +out: |
| 99 | + return mapped_addr; |
87 | 100 | } |
88 | 101 |
|
89 | 102 | static unsigned long adjust_shstk_size(unsigned long size) |
@@ -134,7 +147,7 @@ static int shstk_setup(void) |
134 | 147 | return -EOPNOTSUPP; |
135 | 148 |
|
136 | 149 | size = adjust_shstk_size(0); |
137 | | - addr = alloc_shstk(size); |
| 150 | + addr = alloc_shstk(0, size, 0, false); |
138 | 151 | if (IS_ERR_VALUE(addr)) |
139 | 152 | return PTR_ERR((void *)addr); |
140 | 153 |
|
@@ -178,7 +191,7 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long cl |
178 | 191 | return 0; |
179 | 192 |
|
180 | 193 | size = adjust_shstk_size(stack_size); |
181 | | - addr = alloc_shstk(size); |
| 194 | + addr = alloc_shstk(0, size, 0, false); |
182 | 195 | if (IS_ERR_VALUE(addr)) |
183 | 196 | return addr; |
184 | 197 |
|
@@ -398,6 +411,36 @@ static int shstk_disable(void) |
398 | 411 | return 0; |
399 | 412 | } |
400 | 413 |
|
| 414 | +SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags) |
| 415 | +{ |
| 416 | + bool set_tok = flags & SHADOW_STACK_SET_TOKEN; |
| 417 | + unsigned long aligned_size; |
| 418 | + |
| 419 | + if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) |
| 420 | + return -EOPNOTSUPP; |
| 421 | + |
| 422 | + if (flags & ~SHADOW_STACK_SET_TOKEN) |
| 423 | + return -EINVAL; |
| 424 | + |
| 425 | + /* If there isn't space for a token */ |
| 426 | + if (set_tok && size < 8) |
| 427 | + return -ENOSPC; |
| 428 | + |
| 429 | + if (addr && addr < SZ_4G) |
| 430 | + return -ERANGE; |
| 431 | + |
| 432 | + /* |
| 433 | + * An overflow would result in attempting to write the restore token |
| 434 | + * to the wrong location. Not catastrophic, but just return the right |
| 435 | + * error code and block it. |
| 436 | + */ |
| 437 | + aligned_size = PAGE_ALIGN(size); |
| 438 | + if (aligned_size < size) |
| 439 | + return -EOVERFLOW; |
| 440 | + |
| 441 | + return alloc_shstk(addr, aligned_size, size, set_tok); |
| 442 | +} |
| 443 | + |
401 | 444 | long shstk_prctl(struct task_struct *task, int option, unsigned long features) |
402 | 445 | { |
403 | 446 | if (option == ARCH_SHSTK_LOCK) { |
|
0 commit comments