Skip to content

Commit 77dfca7

Browse files
committed
Merge branch 'for-next/mm' into for-next/core
* for-next/mm: arm64: map [_text, _stext) virtual address range non-executable+read-only arm64: Enable vmalloc-huge with ptdump arm64: mm: split linear mapping if BBML2 unsupported on secondary CPUs arm64: mm: support large block mapping when rodata=full arm64: Enable permission change on arm64 kernel block mappings arm64/Kconfig: Remove CONFIG_RODATA_FULL_DEFAULT_ENABLED arm64: mm: Rework the 'rodata=' options arm64: mm: Represent physical memory with phys_addr_t and resource_size_t arm64: mm: Make map_fdt() return mapped pointer arm64: mm: Cast start/end markers to char *, not u64
2 parents 30f9386 + 5973a62 commit 77dfca7

20 files changed

Lines changed: 692 additions & 146 deletions

File tree

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6405,8 +6405,9 @@
64056405
rodata= [KNL,EARLY]
64066406
on Mark read-only kernel memory as read-only (default).
64076407
off Leave read-only kernel memory writable for debugging.
6408-
full Mark read-only kernel memory and aliases as read-only
6409-
[arm64]
6408+
noalias Mark read-only kernel memory as read-only but retain
6409+
writable aliases in the direct map for regions outside
6410+
of the kernel image. [arm64]
64106411

64116412
rockchip.usb_uart
64126413
[EARLY]

arch/arm64/Kconfig

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1700,20 +1700,6 @@ config MITIGATE_SPECTRE_BRANCH_HISTORY
17001700
When taking an exception from user-space, a sequence of branches
17011701
or a firmware call overwrites the branch history.
17021702

1703-
config RODATA_FULL_DEFAULT_ENABLED
1704-
bool "Apply r/o permissions of VM areas also to their linear aliases"
1705-
default y
1706-
help
1707-
Apply read-only attributes of VM areas to the linear alias of
1708-
the backing pages as well. This prevents code or read-only data
1709-
from being modified (inadvertently or intentionally) via another
1710-
mapping of the same memory page. This additional enhancement can
1711-
be turned off at runtime by passing rodata=[off|on] (and turned on
1712-
with rodata=full if this option is set to 'n')
1713-
1714-
This requires the linear region to be mapped down to pages,
1715-
which may adversely affect performance in some cases.
1716-
17171703
config ARM64_SW_TTBR0_PAN
17181704
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
17191705
depends on !KCSAN

arch/arm64/include/asm/cpufeature.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -871,6 +871,8 @@ static inline bool system_supports_pmuv3(void)
871871
return cpus_have_final_cap(ARM64_HAS_PMUV3);
872872
}
873873

874+
bool cpu_supports_bbml2_noabort(void);
875+
874876
static inline bool system_supports_bbml2_noabort(void)
875877
{
876878
return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);

arch/arm64/include/asm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
7878
pgprot_t prot, bool page_mappings_only);
7979
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
8080
extern void mark_linear_text_alias_ro(void);
81+
extern int split_kernel_leaf_mapping(unsigned long start, unsigned long end);
82+
extern void init_idmap_kpti_bbml2_flag(void);
83+
extern void linear_map_maybe_split_to_ptes(void);
8184

8285
/*
8386
* This check is triggered during the early boot before the cpufeature

arch/arm64/include/asm/pgtable.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
371371
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
372372
}
373373

374+
static inline pmd_t pmd_mknoncont(pmd_t pmd)
375+
{
376+
return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
377+
}
378+
374379
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
375380
static inline int pte_uffd_wp(pte_t pte)
376381
{

arch/arm64/include/asm/ptdump.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77

88
#include <linux/ptdump.h>
99

10+
DECLARE_STATIC_KEY_FALSE(arm64_ptdump_lock_key);
11+
1012
#ifdef CONFIG_PTDUMP
1113

1214
#include <linux/mm_types.h>

arch/arm64/include/asm/setup.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ static inline bool arch_parse_debug_rodata(char *arg)
2121
if (!arg)
2222
return false;
2323

24-
if (!strcmp(arg, "full")) {
24+
if (!strcmp(arg, "on")) {
2525
rodata_enabled = rodata_full = true;
2626
return true;
2727
}
@@ -31,7 +31,7 @@ static inline bool arch_parse_debug_rodata(char *arg)
3131
return true;
3232
}
3333

34-
if (!strcmp(arg, "on")) {
34+
if (!strcmp(arg, "noalias")) {
3535
rodata_enabled = true;
3636
rodata_full = false;
3737
return true;

arch/arm64/include/asm/vmalloc.h

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,18 +9,13 @@
99
#define arch_vmap_pud_supported arch_vmap_pud_supported
1010
static inline bool arch_vmap_pud_supported(pgprot_t prot)
1111
{
12-
/*
13-
* SW table walks can't handle removal of intermediate entries.
14-
*/
15-
return pud_sect_supported() &&
16-
!IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
12+
return pud_sect_supported();
1713
}
1814

1915
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
2016
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
2117
{
22-
/* See arch_vmap_pud_supported() */
23-
return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
18+
return true;
2419
}
2520

2621
#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size

arch/arm64/kernel/cpufeature.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@
8686
#include <asm/kvm_host.h>
8787
#include <asm/mmu.h>
8888
#include <asm/mmu_context.h>
89+
#include <asm/mmu.h>
8990
#include <asm/mte.h>
9091
#include <asm/hypervisor.h>
9192
#include <asm/processor.h>
@@ -2029,6 +2030,7 @@ static void __init kpti_install_ng_mappings(void)
20292030
if (arm64_use_ng_mappings)
20302031
return;
20312032

2033+
init_idmap_kpti_bbml2_flag();
20322034
stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
20332035
}
20342036

@@ -2219,7 +2221,7 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
22192221
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
22202222
}
22212223

2222-
static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
2224+
bool cpu_supports_bbml2_noabort(void)
22232225
{
22242226
/*
22252227
* We want to allow usage of BBML2 in as wide a range of kernel contexts
@@ -2255,6 +2257,11 @@ static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int sco
22552257
return true;
22562258
}
22572259

2260+
static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
2261+
{
2262+
return cpu_supports_bbml2_noabort();
2263+
}
2264+
22582265
#ifdef CONFIG_ARM64_PAN
22592266
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
22602267
{
@@ -3930,6 +3937,7 @@ void __init setup_system_features(void)
39303937
{
39313938
setup_system_capabilities();
39323939

3940+
linear_map_maybe_split_to_ptes();
39333941
kpti_install_ng_mappings();
39343942

39353943
sve_setup();

arch/arm64/kernel/pi/map_kernel.c

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818

1919
extern const u8 __eh_frame_start[], __eh_frame_end[];
2020

21-
extern void idmap_cpu_replace_ttbr1(void *pgdir);
21+
extern void idmap_cpu_replace_ttbr1(phys_addr_t pgdir);
2222

23-
static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset,
23+
static void __init map_segment(pgd_t *pg_dir, phys_addr_t *pgd, u64 va_offset,
2424
void *start, void *end, pgprot_t prot,
2525
bool may_use_cont, int root_level)
2626
{
@@ -40,7 +40,7 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
4040
{
4141
bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS);
4242
bool twopass = IS_ENABLED(CONFIG_RELOCATABLE);
43-
u64 pgdp = (u64)init_pg_dir + PAGE_SIZE;
43+
phys_addr_t pgdp = (phys_addr_t)init_pg_dir + PAGE_SIZE;
4444
pgprot_t text_prot = PAGE_KERNEL_ROX;
4545
pgprot_t data_prot = PAGE_KERNEL;
4646
pgprot_t prot;
@@ -78,6 +78,12 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
7878
twopass |= enable_scs;
7979
prot = twopass ? data_prot : text_prot;
8080

81+
/*
82+
* [_stext, _text) isn't executed after boot and contains some
83+
* non-executable, unpredictable data, so map it non-executable.
84+
*/
85+
map_segment(init_pg_dir, &pgdp, va_offset, _text, _stext, data_prot,
86+
false, root_level);
8187
map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
8288
!twopass, root_level);
8389
map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
@@ -90,7 +96,7 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
9096
true, root_level);
9197
dsb(ishst);
9298

93-
idmap_cpu_replace_ttbr1(init_pg_dir);
99+
idmap_cpu_replace_ttbr1((phys_addr_t)init_pg_dir);
94100

95101
if (twopass) {
96102
if (IS_ENABLED(CONFIG_RELOCATABLE))
@@ -129,10 +135,10 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
129135
/* Copy the root page table to its final location */
130136
memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
131137
dsb(ishst);
132-
idmap_cpu_replace_ttbr1(swapper_pg_dir);
138+
idmap_cpu_replace_ttbr1((phys_addr_t)swapper_pg_dir);
133139
}
134140

135-
static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
141+
static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(phys_addr_t ttbr)
136142
{
137143
u64 sctlr = read_sysreg(sctlr_el1);
138144
u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
@@ -172,30 +178,30 @@ static void __init remap_idmap_for_lpa2(void)
172178
*/
173179
create_init_idmap(init_pg_dir, mask);
174180
dsb(ishst);
175-
set_ttbr0_for_lpa2((u64)init_pg_dir);
181+
set_ttbr0_for_lpa2((phys_addr_t)init_pg_dir);
176182

177183
/*
178184
* Recreate the initial ID map with the same granularity as before.
179185
* Don't bother with the FDT, we no longer need it after this.
180186
*/
181187
memset(init_idmap_pg_dir, 0,
182-
(u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
188+
(char *)init_idmap_pg_end - (char *)init_idmap_pg_dir);
183189

184190
create_init_idmap(init_idmap_pg_dir, mask);
185191
dsb(ishst);
186192

187193
/* switch back to the updated initial ID map */
188-
set_ttbr0_for_lpa2((u64)init_idmap_pg_dir);
194+
set_ttbr0_for_lpa2((phys_addr_t)init_idmap_pg_dir);
189195

190196
/* wipe the temporary ID map from memory */
191-
memset(init_pg_dir, 0, (u64)init_pg_end - (u64)init_pg_dir);
197+
memset(init_pg_dir, 0, (char *)init_pg_end - (char *)init_pg_dir);
192198
}
193199

194-
static void __init map_fdt(u64 fdt)
200+
static void *__init map_fdt(phys_addr_t fdt)
195201
{
196202
static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
197-
u64 efdt = fdt + MAX_FDT_SIZE;
198-
u64 ptep = (u64)ptes;
203+
phys_addr_t efdt = fdt + MAX_FDT_SIZE;
204+
phys_addr_t ptep = (phys_addr_t)ptes; /* We're idmapped when called */
199205

200206
/*
201207
* Map up to MAX_FDT_SIZE bytes, but avoid overlap with
@@ -205,6 +211,8 @@ static void __init map_fdt(u64 fdt)
205211
fdt, PAGE_KERNEL, IDMAP_ROOT_LEVEL,
206212
(pte_t *)init_idmap_pg_dir, false, 0);
207213
dsb(ishst);
214+
215+
return (void *)fdt;
208216
}
209217

210218
/*
@@ -230,23 +238,22 @@ static bool __init ng_mappings_allowed(void)
230238
return true;
231239
}
232240

233-
asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
241+
asmlinkage void __init early_map_kernel(u64 boot_status, phys_addr_t fdt)
234242
{
235243
static char const chosen_str[] __initconst = "/chosen";
236244
u64 va_base, pa_base = (u64)&_text;
237245
u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN;
238246
int root_level = 4 - CONFIG_PGTABLE_LEVELS;
239247
int va_bits = VA_BITS;
240248
int chosen;
241-
242-
map_fdt((u64)fdt);
249+
void *fdt_mapped = map_fdt(fdt);
243250

244251
/* Clear BSS and the initial page tables */
245-
memset(__bss_start, 0, (u64)init_pg_end - (u64)__bss_start);
252+
memset(__bss_start, 0, (char *)init_pg_end - (char *)__bss_start);
246253

247254
/* Parse the command line for CPU feature overrides */
248-
chosen = fdt_path_offset(fdt, chosen_str);
249-
init_feature_override(boot_status, fdt, chosen);
255+
chosen = fdt_path_offset(fdt_mapped, chosen_str);
256+
init_feature_override(boot_status, fdt_mapped, chosen);
250257

251258
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && !cpu_has_lva()) {
252259
va_bits = VA_BITS_MIN;
@@ -266,7 +273,7 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
266273
* fill in the high bits from the seed.
267274
*/
268275
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
269-
u64 kaslr_seed = kaslr_early_init(fdt, chosen);
276+
u64 kaslr_seed = kaslr_early_init(fdt_mapped, chosen);
270277

271278
if (kaslr_seed && kaslr_requires_kpti())
272279
arm64_use_ng_mappings = ng_mappings_allowed();

0 commit comments

Comments
 (0)