Skip to content

Commit 71c54b3

Browse files
clementlegerpalmer-dabbelt
authored andcommitted
riscv: report misaligned accesses emulation to hwprobe
hwprobe provides a way to report if misaligned access are emulated. In order to correctly populate that feature, we can check if it actually traps when doing a misaligned access. This can be checked using an exception table entry which will actually be used when a misaligned access is done from kernel mode. Signed-off-by: Clément Léger <cleger@rivosinc.com> Link: https://lore.kernel.org/r/20231004151405.521596-8-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 90b11b4 commit 71c54b3

4 files changed

Lines changed: 79 additions & 1 deletion

File tree

arch/riscv/include/asm/cpufeature.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,22 @@ extern struct riscv_isainfo hart_isa[NR_CPUS];
3232

3333
void check_unaligned_access(int cpu);
3434

35+
#ifdef CONFIG_RISCV_MISALIGNED
36+
bool unaligned_ctl_available(void);
37+
bool check_unaligned_access_emulated(int cpu);
38+
void unaligned_emulation_finish(void);
39+
#else
40+
static inline bool unaligned_ctl_available(void)
41+
{
42+
return false;
43+
}
44+
45+
static inline bool check_unaligned_access_emulated(int cpu)
46+
{
47+
return false;
48+
}
49+
50+
static inline void unaligned_emulation_finish(void) {}
51+
#endif
52+
3553
#endif

arch/riscv/kernel/cpufeature.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -568,6 +568,9 @@ void check_unaligned_access(int cpu)
568568
void *src;
569569
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
570570

571+
if (check_unaligned_access_emulated(cpu))
572+
return;
573+
571574
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
572575
if (!page) {
573576
pr_warn("Can't alloc pages to measure memcpy performance");
@@ -648,6 +651,7 @@ void check_unaligned_access(int cpu)
648651
static int __init check_unaligned_access_boot_cpu(void)
649652
{
650653
check_unaligned_access(0);
654+
unaligned_emulation_finish();
651655
return 0;
652656
}
653657

arch/riscv/kernel/smpboot.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,8 +245,8 @@ asmlinkage __visible void smp_callin(void)
245245
riscv_ipi_enable();
246246

247247
numa_add_cpu(curr_cpuid);
248-
set_cpu_online(curr_cpuid, 1);
249248
check_unaligned_access(curr_cpuid);
249+
set_cpu_online(curr_cpuid, 1);
250250

251251
if (has_vector()) {
252252
if (riscv_v_setup_vsize())

arch/riscv/kernel/traps_misaligned.c

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
#include <asm/ptrace.h>
1515
#include <asm/csr.h>
1616
#include <asm/entry-common.h>
17+
#include <asm/hwprobe.h>
18+
#include <asm/cpufeature.h>
1719

1820
#define INSN_MATCH_LB 0x3
1921
#define INSN_MASK_LB 0x707f
@@ -396,6 +398,8 @@ union reg_data {
396398
u64 data_u64;
397399
};
398400

401+
static bool unaligned_ctl __read_mostly;
402+
399403
/* sysctl hooks */
400404
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
401405

@@ -409,6 +413,8 @@ int handle_misaligned_load(struct pt_regs *regs)
409413

410414
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
411415

416+
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
417+
412418
if (!unaligned_enabled)
413419
return -1;
414420

@@ -585,3 +591,53 @@ int handle_misaligned_store(struct pt_regs *regs)
585591

586592
return 0;
587593
}
594+
595+
bool check_unaligned_access_emulated(int cpu)
596+
{
597+
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
598+
unsigned long tmp_var, tmp_val;
599+
bool misaligned_emu_detected;
600+
601+
*mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
602+
603+
__asm__ __volatile__ (
604+
" "REG_L" %[tmp], 1(%[ptr])\n"
605+
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
606+
607+
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
608+
/*
609+
* If unaligned_ctl is already set, this means that we detected that all
610+
* CPUS uses emulated misaligned access at boot time. If that changed
611+
* when hotplugging the new cpu, this is something we don't handle.
612+
*/
613+
if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
614+
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
615+
while (true)
616+
cpu_relax();
617+
}
618+
619+
return misaligned_emu_detected;
620+
}
621+
622+
void __init unaligned_emulation_finish(void)
623+
{
624+
int cpu;
625+
626+
/*
627+
* We can only support PR_UNALIGN controls if all CPUs have misaligned
628+
* accesses emulated since tasks requesting such control can run on any
629+
* CPU.
630+
*/
631+
for_each_present_cpu(cpu) {
632+
if (per_cpu(misaligned_access_speed, cpu) !=
633+
RISCV_HWPROBE_MISALIGNED_EMULATED) {
634+
return;
635+
}
636+
}
637+
unaligned_ctl = true;
638+
}
639+
640+
bool unaligned_ctl_available(void)
641+
{
642+
return unaligned_ctl;
643+
}

0 commit comments

Comments
 (0)