Skip to content

Commit acfdf34

Browse files
committed
Merge branch for-next/module-alloc into kvmarm/next
* for-next/module-alloc: : Drag in module VA rework to handle conflicts w/ sw feature refactor arm64: module: rework module VA range selection arm64: module: mandate MODULE_PLTS arm64: module: move module randomization to module.c arm64: kaslr: split kaslr/module initialization arm64: kasan: remove !KASAN_VMALLOC remnants arm64: module: remove old !KASAN_VMALLOC logic Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents b710fe0 + 3e35d30 commit acfdf34

11 files changed

Lines changed: 159 additions & 175 deletions

File tree

Documentation/arm64/memory.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@ AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
3333
0000000000000000 0000ffffffffffff 256TB user
3434
ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map
3535
[ffff600000000000 ffff7fffffffffff] 32TB [kasan shadow region]
36-
ffff800000000000 ffff800007ffffff 128MB modules
37-
ffff800008000000 fffffbffefffffff 124TB vmalloc
36+
ffff800000000000 ffff80007fffffff 2GB modules
37+
ffff800080000000 fffffbffefffffff 124TB vmalloc
3838
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
3939
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
4040
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
@@ -50,8 +50,8 @@ AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):
5050
0000000000000000 000fffffffffffff 4PB user
5151
fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map
5252
[fffd800000000000 ffff7fffffffffff] 512TB [kasan shadow region]
53-
ffff800000000000 ffff800007ffffff 128MB modules
54-
ffff800008000000 fffffbffefffffff 124TB vmalloc
53+
ffff800000000000 ffff80007fffffff 2GB modules
54+
ffff800080000000 fffffbffefffffff 124TB vmalloc
5555
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
5656
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
5757
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space

arch/arm64/Kconfig

Lines changed: 3 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ config ARM64
207207
select HAVE_IOREMAP_PROT
208208
select HAVE_IRQ_TIME_ACCOUNTING
209209
select HAVE_KVM
210+
select HAVE_MOD_ARCH_SPECIFIC
210211
select HAVE_NMI
211212
select HAVE_PERF_EVENTS
212213
select HAVE_PERF_REGS
@@ -577,7 +578,6 @@ config ARM64_ERRATUM_845719
577578
config ARM64_ERRATUM_843419
578579
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
579580
default y
580-
select ARM64_MODULE_PLTS if MODULES
581581
help
582582
This option links the kernel with '--fix-cortex-a53-843419' and
583583
enables PLT support to replace certain ADRP instructions, which can
@@ -2107,26 +2107,6 @@ config ARM64_SME
21072107
register state capable of holding two dimensional matrix tiles to
21082108
enable various matrix operations.
21092109

2110-
config ARM64_MODULE_PLTS
2111-
bool "Use PLTs to allow module memory to spill over into vmalloc area"
2112-
depends on MODULES
2113-
select HAVE_MOD_ARCH_SPECIFIC
2114-
help
2115-
Allocate PLTs when loading modules so that jumps and calls whose
2116-
targets are too far away for their relative offsets to be encoded
2117-
in the instructions themselves can be bounced via veneers in the
2118-
module's PLT. This allows modules to be allocated in the generic
2119-
vmalloc area after the dedicated module memory area has been
2120-
exhausted.
2121-
2122-
When running with address space randomization (KASLR), the module
2123-
region itself may be too far away for ordinary relative jumps and
2124-
calls, and so in that case, module PLTs are required and cannot be
2125-
disabled.
2126-
2127-
Specific errata workaround(s) might also force module PLTs to be
2128-
enabled (ARM64_ERRATUM_843419).
2129-
21302110
config ARM64_PSEUDO_NMI
21312111
bool "Support for NMI-like interrupts"
21322112
select ARM_GIC_V3
@@ -2167,7 +2147,6 @@ config RELOCATABLE
21672147

21682148
config RANDOMIZE_BASE
21692149
bool "Randomize the address of the kernel image"
2170-
select ARM64_MODULE_PLTS if MODULES
21712150
select RELOCATABLE
21722151
help
21732152
Randomizes the virtual address at which the kernel image is
@@ -2198,9 +2177,8 @@ config RANDOMIZE_MODULE_REGION_FULL
21982177
When this option is not set, the module region will be randomized over
21992178
a limited range that contains the [_stext, _etext] interval of the
22002179
core kernel, so branch relocations are almost always in range unless
2201-
ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
2202-
particular case of region exhaustion, modules might be able to fall
2203-
back to a larger 2GB area.
2180+
the region is exhausted. In this particular case of region
2181+
exhaustion, modules might be able to fall back to a larger 2GB area.
22042182

22052183
config CC_HAVE_STACKPROTECTOR_SYSREG
22062184
def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)

arch/arm64/include/asm/memory.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
#define KIMAGE_VADDR (MODULES_END)
4747
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
4848
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
49-
#define MODULES_VSIZE (SZ_128M)
49+
#define MODULES_VSIZE (SZ_2G)
5050
#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
5151
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
5252
#define PCI_IO_END (VMEMMAP_START - SZ_8M)
@@ -204,15 +204,17 @@ static inline unsigned long kaslr_offset(void)
204204
return kimage_vaddr - KIMAGE_VADDR;
205205
}
206206

207+
#ifdef CONFIG_RANDOMIZE_BASE
208+
void kaslr_init(void);
207209
static inline bool kaslr_enabled(void)
208210
{
209-
/*
210-
* The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
211-
* placement of the image rather than from the seed, so a displacement
212-
* of less than MIN_KIMG_ALIGN means that no seed was provided.
213-
*/
214-
return kaslr_offset() >= MIN_KIMG_ALIGN;
211+
extern bool __kaslr_is_enabled;
212+
return __kaslr_is_enabled;
215213
}
214+
#else
215+
static inline void kaslr_init(void) { }
216+
static inline bool kaslr_enabled(void) { return false; }
217+
#endif
216218

217219
/*
218220
* Allow all memory at the discovery stage. We will clip it later.

arch/arm64/include/asm/module.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
#include <asm-generic/module.h>
99

10-
#ifdef CONFIG_ARM64_MODULE_PLTS
1110
struct mod_plt_sec {
1211
int plt_shndx;
1312
int plt_num_entries;
@@ -21,7 +20,6 @@ struct mod_arch_specific {
2120
/* for CONFIG_DYNAMIC_FTRACE */
2221
struct plt_entry *ftrace_trampolines;
2322
};
24-
#endif
2523

2624
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
2725
void *loc, const Elf64_Rela *rela,
@@ -30,12 +28,6 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
3028
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
3129
void *loc, u64 val);
3230

33-
#ifdef CONFIG_RANDOMIZE_BASE
34-
extern u64 module_alloc_base;
35-
#else
36-
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
37-
#endif
38-
3931
struct plt_entry {
4032
/*
4133
* A program that conforms to the AArch64 Procedure Call Standard

arch/arm64/include/asm/module.lds.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
SECTIONS {
2-
#ifdef CONFIG_ARM64_MODULE_PLTS
32
.plt 0 : { BYTE(0) }
43
.init.plt 0 : { BYTE(0) }
54
.text.ftrace_trampoline 0 : { BYTE(0) }
6-
#endif
75

86
#ifdef CONFIG_KASAN_SW_TAGS
97
/*

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,7 @@ obj-$(CONFIG_COMPAT) += sigreturn32.o
4242
obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o
4343
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
4444
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
45-
obj-$(CONFIG_MODULES) += module.o
46-
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
45+
obj-$(CONFIG_MODULES) += module.o module-plts.o
4746
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
4847
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
4948
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o

arch/arm64/kernel/ftrace.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
197197

198198
static struct plt_entry *get_ftrace_plt(struct module *mod)
199199
{
200-
#ifdef CONFIG_ARM64_MODULE_PLTS
200+
#ifdef CONFIG_MODULES
201201
struct plt_entry *plt = mod->arch.ftrace_trampolines;
202202

203203
return &plt[FTRACE_PLT_IDX];
@@ -249,7 +249,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
249249
* must use a PLT to reach it. We can only place PLTs for modules, and
250250
* only when module PLT support is built-in.
251251
*/
252-
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
252+
if (!IS_ENABLED(CONFIG_MODULES))
253253
return false;
254254

255255
/*
@@ -431,10 +431,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
431431
*
432432
* Note: 'mod' is only set at module load time.
433433
*/
434-
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
435-
IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
434+
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
436435
return aarch64_insn_patch_text_nosync((void *)pc, new);
437-
}
438436

439437
if (!ftrace_find_callable_addr(rec, mod, &addr))
440438
return -EINVAL;

arch/arm64/kernel/kaslr.c

Lines changed: 14 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -4,90 +4,35 @@
44
*/
55

66
#include <linux/cache.h>
7-
#include <linux/crc32.h>
87
#include <linux/init.h>
9-
#include <linux/libfdt.h>
10-
#include <linux/mm_types.h>
11-
#include <linux/sched.h>
12-
#include <linux/types.h>
13-
#include <linux/pgtable.h>
14-
#include <linux/random.h>
8+
#include <linux/printk.h>
159

16-
#include <asm/fixmap.h>
17-
#include <asm/kernel-pgtable.h>
10+
#include <asm/cpufeature.h>
1811
#include <asm/memory.h>
19-
#include <asm/mmu.h>
20-
#include <asm/sections.h>
21-
#include <asm/setup.h>
2212

23-
u64 __ro_after_init module_alloc_base;
2413
u16 __initdata memstart_offset_seed;
2514

26-
static int __init kaslr_init(void)
27-
{
28-
u64 module_range;
29-
u32 seed;
30-
31-
/*
32-
* Set a reasonable default for module_alloc_base in case
33-
* we end up running with module randomization disabled.
34-
*/
35-
module_alloc_base = (u64)_etext - MODULES_VSIZE;
15+
bool __ro_after_init __kaslr_is_enabled = false;
3616

17+
void __init kaslr_init(void)
18+
{
3719
if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val &
3820
arm64_sw_feature_override.mask,
3921
ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) {
4022
pr_info("KASLR disabled on command line\n");
41-
return 0;
42-
}
43-
44-
if (!kaslr_enabled()) {
45-
pr_warn("KASLR disabled due to lack of seed\n");
46-
return 0;
23+
return;
4724
}
4825

49-
pr_info("KASLR enabled\n");
50-
5126
/*
52-
* KASAN without KASAN_VMALLOC does not expect the module region to
53-
* intersect the vmalloc region, since shadow memory is allocated for
54-
* each module at load time, whereas the vmalloc region will already be
55-
* shadowed by KASAN zero pages.
27+
* The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
28+
* placement of the image rather than from the seed, so a displacement
29+
* of less than MIN_KIMG_ALIGN means that no seed was provided.
5630
*/
57-
BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
58-
IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
59-
!IS_ENABLED(CONFIG_KASAN_VMALLOC));
60-
61-
seed = get_random_u32();
62-
63-
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
64-
/*
65-
* Randomize the module region over a 2 GB window covering the
66-
* kernel. This reduces the risk of modules leaking information
67-
* about the address of the kernel itself, but results in
68-
* branches between modules and the core kernel that are
69-
* resolved via PLTs. (Branches between modules will be
70-
* resolved normally.)
71-
*/
72-
module_range = SZ_2G - (u64)(_end - _stext);
73-
module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
74-
} else {
75-
/*
76-
* Randomize the module region by setting module_alloc_base to
77-
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
78-
* _stext) . This guarantees that the resulting region still
79-
* covers [_stext, _etext], and that all relative branches can
80-
* be resolved without veneers unless this region is exhausted
81-
* and we fall back to a larger 2GB window in module_alloc()
82-
* when ARM64_MODULE_PLTS is enabled.
83-
*/
84-
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
31+
if (kaslr_offset() < MIN_KIMG_ALIGN) {
32+
pr_warn("KASLR disabled due to lack of seed\n");
33+
return;
8534
}
8635

87-
/* use the lower 21 bits to randomize the base of the module region */
88-
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
89-
module_alloc_base &= PAGE_MASK;
90-
91-
return 0;
36+
pr_info("KASLR enabled\n");
37+
__kaslr_is_enabled = true;
9238
}
93-
subsys_initcall(kaslr_init)

0 commit comments

Comments
 (0)