Skip to content

Commit 3f35db4

Browse files
committed
Merge branch 'for-next/cpufeature' into for-next/core
* for-next/cpufeature: arm64: Align boot cpucap handling with system cpucap handling arm64: Cleanup system cpucap handling arm64: Kconfig: drop KAISER reference from KPTI option description arm64: mm: Only map KPTI trampoline if it is going to be used arm64: Get rid of ARM64_HAS_NO_HW_PREFETCH
2 parents 2cc14f5 + eb15d70 commit 3f35db4

8 files changed

Lines changed: 67 additions & 86 deletions

File tree

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1549,7 +1549,7 @@ config ARCH_FORCE_MAX_ORDER
15491549
Don't change if unsure.
15501550

15511551
config UNMAP_KERNEL_AT_EL0
1552-
bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
1552+
bool "Unmap kernel when running in userspace (KPTI)" if EXPERT
15531553
default y
15541554
help
15551555
Speculation attacks against some high-performance processors can

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -617,6 +617,7 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
617617
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
618618
}
619619

620+
void __init setup_boot_cpu_features(void);
620621
void __init setup_system_features(void);
621622
void __init setup_user_features(void);
622623

arch/arm64/kernel/cpufeature.c

Lines changed: 57 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -1081,25 +1081,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
10811081

10821082
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
10831083
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
1084-
1085-
/*
1086-
* Initialize the indirect array of CPU capabilities pointers before we
1087-
* handle the boot CPU below.
1088-
*/
1089-
init_cpucap_indirect_list();
1090-
1091-
/*
1092-
* Detect broken pseudo-NMI. Must be called _before_ the call to
1093-
* setup_boot_cpu_capabilities() since it interacts with
1094-
* can_use_gic_priorities().
1095-
*/
1096-
detect_system_supports_pseudo_nmi();
1097-
1098-
/*
1099-
* Detect and enable early CPU capabilities based on the boot CPU,
1100-
* after we have initialised the CPU feature infrastructure.
1101-
*/
1102-
setup_boot_cpu_capabilities();
11031084
}
11041085

11051086
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -1584,16 +1565,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry,
15841565
return has_sre;
15851566
}
15861567

1587-
static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
1588-
{
1589-
u32 midr = read_cpuid_id();
1590-
1591-
/* Cavium ThunderX pass 1.x and 2.x */
1592-
return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
1593-
MIDR_CPU_VAR_REV(0, 0),
1594-
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
1595-
}
1596-
15971568
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
15981569
int scope)
15991570
{
@@ -2321,12 +2292,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
23212292
ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP)
23222293
},
23232294
#endif /* CONFIG_ARM64_LSE_ATOMICS */
2324-
{
2325-
.desc = "Software prefetching using PRFM",
2326-
.capability = ARM64_HAS_NO_HW_PREFETCH,
2327-
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2328-
.matches = has_no_hw_prefetch,
2329-
},
23302295
{
23312296
.desc = "Virtualization Host Extensions",
23322297
.capability = ARM64_HAS_VIRT_HOST_EXTN,
@@ -3271,14 +3236,6 @@ void check_local_cpu_capabilities(void)
32713236
verify_local_cpu_capabilities();
32723237
}
32733238

3274-
static void __init setup_boot_cpu_capabilities(void)
3275-
{
3276-
/* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
3277-
update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
3278-
/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
3279-
enable_cpu_capabilities(SCOPE_BOOT_CPU);
3280-
}
3281-
32823239
bool this_cpu_has_cap(unsigned int n)
32833240
{
32843241
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
@@ -3334,44 +3291,82 @@ unsigned long cpu_get_elf_hwcap2(void)
33343291
return elf_hwcap[1];
33353292
}
33363293

3337-
void __init setup_system_features(void)
3294+
static void __init setup_boot_cpu_capabilities(void)
33383295
{
3339-
int i;
33403296
/*
3341-
* The system-wide safe feature feature register values have been
3342-
* finalized. Finalize and log the available system capabilities.
3297+
* The boot CPU's feature register values have been recorded. Detect
3298+
* boot cpucaps and local cpucaps for the boot CPU, then enable and
3299+
* patch alternatives for the available boot cpucaps.
33433300
*/
3344-
update_cpu_capabilities(SCOPE_SYSTEM);
3345-
if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
3346-
!cpus_have_cap(ARM64_HAS_PAN))
3347-
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
3301+
update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
3302+
enable_cpu_capabilities(SCOPE_BOOT_CPU);
3303+
apply_boot_alternatives();
3304+
}
33483305

3306+
void __init setup_boot_cpu_features(void)
3307+
{
33493308
/*
3350-
* Enable all the available capabilities which have not been enabled
3351-
* already.
3309+
* Initialize the indirect array of CPU capabilities pointers before we
3310+
* handle the boot CPU.
33523311
*/
3353-
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
3312+
init_cpucap_indirect_list();
33543313

3355-
kpti_install_ng_mappings();
3314+
/*
3315+
* Detect broken pseudo-NMI. Must be called _before_ the call to
3316+
* setup_boot_cpu_capabilities() since it interacts with
3317+
* can_use_gic_priorities().
3318+
*/
3319+
detect_system_supports_pseudo_nmi();
33563320

3357-
sve_setup();
3358-
sme_setup();
3321+
setup_boot_cpu_capabilities();
3322+
}
33593323

3324+
static void __init setup_system_capabilities(void)
3325+
{
33603326
/*
3361-
* Check for sane CTR_EL0.CWG value.
3327+
* The system-wide safe feature register values have been finalized.
3328+
* Detect, enable, and patch alternatives for the available system
3329+
* cpucaps.
33623330
*/
3363-
if (!cache_type_cwg())
3364-
pr_warn("No Cache Writeback Granule information, assuming %d\n",
3365-
ARCH_DMA_MINALIGN);
3331+
update_cpu_capabilities(SCOPE_SYSTEM);
3332+
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
3333+
apply_alternatives_all();
33663334

3367-
for (i = 0; i < ARM64_NCAPS; i++) {
3335+
/*
3336+
* Log any cpucaps with a cpumask as these aren't logged by
3337+
* update_cpu_capabilities().
3338+
*/
3339+
for (int i = 0; i < ARM64_NCAPS; i++) {
33683340
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
33693341

33703342
if (caps && caps->cpus && caps->desc &&
33713343
cpumask_any(caps->cpus) < nr_cpu_ids)
33723344
pr_info("detected: %s on CPU%*pbl\n",
33733345
caps->desc, cpumask_pr_args(caps->cpus));
33743346
}
3347+
3348+
/*
3349+
* TTBR0 PAN doesn't have its own cpucap, so log it manually.
3350+
*/
3351+
if (system_uses_ttbr0_pan())
3352+
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
3353+
}
3354+
3355+
void __init setup_system_features(void)
3356+
{
3357+
setup_system_capabilities();
3358+
3359+
kpti_install_ng_mappings();
3360+
3361+
sve_setup();
3362+
sme_setup();
3363+
3364+
/*
3365+
* Check for sane CTR_EL0.CWG value.
3366+
*/
3367+
if (!cache_type_cwg())
3368+
pr_warn("No Cache Writeback Granule information, assuming %d\n",
3369+
ARCH_DMA_MINALIGN);
33753370
}
33763371

33773372
void __init setup_user_features(void)

arch/arm64/kernel/fpsimd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1171,7 +1171,7 @@ void __init sve_setup(void)
11711171
unsigned long b;
11721172
int max_bit;
11731173

1174-
if (!cpus_have_cap(ARM64_SVE))
1174+
if (!system_supports_sve())
11751175
return;
11761176

11771177
/*
@@ -1301,7 +1301,7 @@ void __init sme_setup(void)
13011301
struct vl_info *info = &vl_info[ARM64_VEC_SME];
13021302
int min_bit, max_bit;
13031303

1304-
if (!cpus_have_cap(ARM64_SME))
1304+
if (!system_supports_sme())
13051305
return;
13061306

13071307
/*

arch/arm64/kernel/smp.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -439,9 +439,8 @@ static void __init hyp_mode_check(void)
439439
void __init smp_cpus_done(unsigned int max_cpus)
440440
{
441441
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
442-
setup_system_features();
443442
hyp_mode_check();
444-
apply_alternatives_all();
443+
setup_system_features();
445444
setup_user_features();
446445
mark_linear_text_alias_ro();
447446
}
@@ -454,14 +453,9 @@ void __init smp_prepare_boot_cpu(void)
454453
* freed shortly, so we must move over to the runtime per-cpu area.
455454
*/
456455
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
457-
cpuinfo_store_boot_cpu();
458456

459-
/*
460-
* We now know enough about the boot CPU to apply the
461-
* alternatives that cannot wait until interrupt handling
462-
* and/or scheduling is enabled.
463-
*/
464-
apply_boot_alternatives();
457+
cpuinfo_store_boot_cpu();
458+
setup_boot_cpu_features();
465459

466460
/* Conditionally switch to GIC PMR for interrupt masking */
467461
if (system_uses_irq_prio_masking())

arch/arm64/lib/copy_page.S

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,6 @@
1818
* x1 - src
1919
*/
2020
SYM_FUNC_START(__pi_copy_page)
21-
alternative_if ARM64_HAS_NO_HW_PREFETCH
22-
// Prefetch three cache lines ahead.
23-
prfm pldl1strm, [x1, #128]
24-
prfm pldl1strm, [x1, #256]
25-
prfm pldl1strm, [x1, #384]
26-
alternative_else_nop_endif
27-
2821
ldp x2, x3, [x1]
2922
ldp x4, x5, [x1, #16]
3023
ldp x6, x7, [x1, #32]
@@ -39,10 +32,6 @@ alternative_else_nop_endif
3932
1:
4033
tst x0, #(PAGE_SIZE - 1)
4134

42-
alternative_if ARM64_HAS_NO_HW_PREFETCH
43-
prfm pldl1strm, [x1, #384]
44-
alternative_else_nop_endif
45-
4635
stnp x2, x3, [x0, #-256]
4736
ldp x2, x3, [x1]
4837
stnp x4, x5, [x0, #16 - 256]

arch/arm64/mm/mmu.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -674,6 +674,9 @@ static int __init map_entry_trampoline(void)
674674
{
675675
int i;
676676

677+
if (!arm64_kernel_unmapped_at_el0())
678+
return 0;
679+
677680
pgprot_t prot = kernel_exec_prot();
678681
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
679682

arch/arm64/tools/cpucaps

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ HAS_LDAPR
4040
HAS_LSE_ATOMICS
4141
HAS_MOPS
4242
HAS_NESTED_VIRT
43-
HAS_NO_HW_PREFETCH
4443
HAS_PAN
4544
HAS_S1PIE
4645
HAS_RAS_EXTN

0 commit comments

Comments
 (0)