|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * The hwprobe interface, for allowing userspace to probe to see which features |
| 4 | + * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for |
| 5 | + * more details. |
| 6 | + */ |
| 7 | +#include <linux/syscalls.h> |
| 8 | +#include <asm/cacheflush.h> |
| 9 | +#include <asm/cpufeature.h> |
| 10 | +#include <asm/hwprobe.h> |
| 11 | +#include <asm/sbi.h> |
| 12 | +#include <asm/switch_to.h> |
| 13 | +#include <asm/uaccess.h> |
| 14 | +#include <asm/unistd.h> |
| 15 | +#include <asm/vector.h> |
| 16 | +#include <vdso/vsyscall.h> |
| 17 | + |
| 18 | + |
| 19 | +static void hwprobe_arch_id(struct riscv_hwprobe *pair, |
| 20 | + const struct cpumask *cpus) |
| 21 | +{ |
| 22 | + u64 id = -1ULL; |
| 23 | + bool first = true; |
| 24 | + int cpu; |
| 25 | + |
| 26 | + for_each_cpu(cpu, cpus) { |
| 27 | + u64 cpu_id; |
| 28 | + |
| 29 | + switch (pair->key) { |
| 30 | + case RISCV_HWPROBE_KEY_MVENDORID: |
| 31 | + cpu_id = riscv_cached_mvendorid(cpu); |
| 32 | + break; |
| 33 | + case RISCV_HWPROBE_KEY_MIMPID: |
| 34 | + cpu_id = riscv_cached_mimpid(cpu); |
| 35 | + break; |
| 36 | + case RISCV_HWPROBE_KEY_MARCHID: |
| 37 | + cpu_id = riscv_cached_marchid(cpu); |
| 38 | + break; |
| 39 | + } |
| 40 | + |
| 41 | + if (first) { |
| 42 | + id = cpu_id; |
| 43 | + first = false; |
| 44 | + } |
| 45 | + |
| 46 | + /* |
| 47 | + * If there's a mismatch for the given set, return -1 in the |
| 48 | + * value. |
| 49 | + */ |
| 50 | + if (id != cpu_id) { |
| 51 | + id = -1ULL; |
| 52 | + break; |
| 53 | + } |
| 54 | + } |
| 55 | + |
| 56 | + pair->value = id; |
| 57 | +} |
| 58 | + |
| 59 | +static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, |
| 60 | + const struct cpumask *cpus) |
| 61 | +{ |
| 62 | + int cpu; |
| 63 | + u64 missing = 0; |
| 64 | + |
| 65 | + pair->value = 0; |
| 66 | + if (has_fpu()) |
| 67 | + pair->value |= RISCV_HWPROBE_IMA_FD; |
| 68 | + |
| 69 | + if (riscv_isa_extension_available(NULL, c)) |
| 70 | + pair->value |= RISCV_HWPROBE_IMA_C; |
| 71 | + |
| 72 | + if (has_vector()) |
| 73 | + pair->value |= RISCV_HWPROBE_IMA_V; |
| 74 | + |
| 75 | + /* |
| 76 | + * Loop through and record extensions that 1) anyone has, and 2) anyone |
| 77 | + * doesn't have. |
| 78 | + */ |
| 79 | + for_each_cpu(cpu, cpus) { |
| 80 | + struct riscv_isainfo *isainfo = &hart_isa[cpu]; |
| 81 | + |
| 82 | +#define EXT_KEY(ext) \ |
| 83 | + do { \ |
| 84 | + if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ |
| 85 | + pair->value |= RISCV_HWPROBE_EXT_##ext; \ |
| 86 | + else \ |
| 87 | + missing |= RISCV_HWPROBE_EXT_##ext; \ |
| 88 | + } while (false) |
| 89 | + |
| 90 | + /* |
| 91 | + * Only use EXT_KEY() for extensions which can be exposed to userspace, |
| 92 | + * regardless of the kernel's configuration, as no other checks, besides |
| 93 | + * presence in the hart_isa bitmap, are made. |
| 94 | + */ |
| 95 | + EXT_KEY(ZBA); |
| 96 | + EXT_KEY(ZBB); |
| 97 | + EXT_KEY(ZBS); |
| 98 | + EXT_KEY(ZICBOZ); |
| 99 | +#undef EXT_KEY |
| 100 | + } |
| 101 | + |
| 102 | + /* Now turn off reporting features if any CPU is missing it. */ |
| 103 | + pair->value &= ~missing; |
| 104 | +} |
| 105 | + |
| 106 | +static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) |
| 107 | +{ |
| 108 | + struct riscv_hwprobe pair; |
| 109 | + |
| 110 | + hwprobe_isa_ext0(&pair, cpus); |
| 111 | + return (pair.value & ext); |
| 112 | +} |
| 113 | + |
| 114 | +static u64 hwprobe_misaligned(const struct cpumask *cpus) |
| 115 | +{ |
| 116 | + int cpu; |
| 117 | + u64 perf = -1ULL; |
| 118 | + |
| 119 | + for_each_cpu(cpu, cpus) { |
| 120 | + int this_perf = per_cpu(misaligned_access_speed, cpu); |
| 121 | + |
| 122 | + if (perf == -1ULL) |
| 123 | + perf = this_perf; |
| 124 | + |
| 125 | + if (perf != this_perf) { |
| 126 | + perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; |
| 127 | + break; |
| 128 | + } |
| 129 | + } |
| 130 | + |
| 131 | + if (perf == -1ULL) |
| 132 | + return RISCV_HWPROBE_MISALIGNED_UNKNOWN; |
| 133 | + |
| 134 | + return perf; |
| 135 | +} |
| 136 | + |
| 137 | +static void hwprobe_one_pair(struct riscv_hwprobe *pair, |
| 138 | + const struct cpumask *cpus) |
| 139 | +{ |
| 140 | + switch (pair->key) { |
| 141 | + case RISCV_HWPROBE_KEY_MVENDORID: |
| 142 | + case RISCV_HWPROBE_KEY_MARCHID: |
| 143 | + case RISCV_HWPROBE_KEY_MIMPID: |
| 144 | + hwprobe_arch_id(pair, cpus); |
| 145 | + break; |
| 146 | + /* |
| 147 | + * The kernel already assumes that the base single-letter ISA |
| 148 | + * extensions are supported on all harts, and only supports the |
| 149 | + * IMA base, so just cheat a bit here and tell that to |
| 150 | + * userspace. |
| 151 | + */ |
| 152 | + case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: |
| 153 | + pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; |
| 154 | + break; |
| 155 | + |
| 156 | + case RISCV_HWPROBE_KEY_IMA_EXT_0: |
| 157 | + hwprobe_isa_ext0(pair, cpus); |
| 158 | + break; |
| 159 | + |
| 160 | + case RISCV_HWPROBE_KEY_CPUPERF_0: |
| 161 | + pair->value = hwprobe_misaligned(cpus); |
| 162 | + break; |
| 163 | + |
| 164 | + case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: |
| 165 | + pair->value = 0; |
| 166 | + if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) |
| 167 | + pair->value = riscv_cboz_block_size; |
| 168 | + break; |
| 169 | + |
| 170 | + /* |
| 171 | + * For forward compatibility, unknown keys don't fail the whole |
| 172 | + * call, but get their element key set to -1 and value set to 0 |
| 173 | + * indicating they're unrecognized. |
| 174 | + */ |
| 175 | + default: |
| 176 | + pair->key = -1; |
| 177 | + pair->value = 0; |
| 178 | + break; |
| 179 | + } |
| 180 | +} |
| 181 | + |
| 182 | +static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, |
| 183 | + size_t pair_count, size_t cpusetsize, |
| 184 | + unsigned long __user *cpus_user, |
| 185 | + unsigned int flags) |
| 186 | +{ |
| 187 | + size_t out; |
| 188 | + int ret; |
| 189 | + cpumask_t cpus; |
| 190 | + |
| 191 | + /* Check the reserved flags. */ |
| 192 | + if (flags != 0) |
| 193 | + return -EINVAL; |
| 194 | + |
| 195 | + /* |
| 196 | + * The interface supports taking in a CPU mask, and returns values that |
| 197 | + * are consistent across that mask. Allow userspace to specify NULL and |
| 198 | + * 0 as a shortcut to all online CPUs. |
| 199 | + */ |
| 200 | + cpumask_clear(&cpus); |
| 201 | + if (!cpusetsize && !cpus_user) { |
| 202 | + cpumask_copy(&cpus, cpu_online_mask); |
| 203 | + } else { |
| 204 | + if (cpusetsize > cpumask_size()) |
| 205 | + cpusetsize = cpumask_size(); |
| 206 | + |
| 207 | + ret = copy_from_user(&cpus, cpus_user, cpusetsize); |
| 208 | + if (ret) |
| 209 | + return -EFAULT; |
| 210 | + |
| 211 | + /* |
| 212 | + * Userspace must provide at least one online CPU, without that |
| 213 | + * there's no way to define what is supported. |
| 214 | + */ |
| 215 | + cpumask_and(&cpus, &cpus, cpu_online_mask); |
| 216 | + if (cpumask_empty(&cpus)) |
| 217 | + return -EINVAL; |
| 218 | + } |
| 219 | + |
| 220 | + for (out = 0; out < pair_count; out++, pairs++) { |
| 221 | + struct riscv_hwprobe pair; |
| 222 | + |
| 223 | + if (get_user(pair.key, &pairs->key)) |
| 224 | + return -EFAULT; |
| 225 | + |
| 226 | + pair.value = 0; |
| 227 | + hwprobe_one_pair(&pair, &cpus); |
| 228 | + ret = put_user(pair.key, &pairs->key); |
| 229 | + if (ret == 0) |
| 230 | + ret = put_user(pair.value, &pairs->value); |
| 231 | + |
| 232 | + if (ret) |
| 233 | + return -EFAULT; |
| 234 | + } |
| 235 | + |
| 236 | + return 0; |
| 237 | +} |
| 238 | + |
| 239 | +#ifdef CONFIG_MMU |
| 240 | + |
| 241 | +static int __init init_hwprobe_vdso_data(void) |
| 242 | +{ |
| 243 | + struct vdso_data *vd = __arch_get_k_vdso_data(); |
| 244 | + struct arch_vdso_data *avd = &vd->arch_data; |
| 245 | + u64 id_bitsmash = 0; |
| 246 | + struct riscv_hwprobe pair; |
| 247 | + int key; |
| 248 | + |
| 249 | + /* |
| 250 | + * Initialize vDSO data with the answers for the "all CPUs" case, to |
| 251 | + * save a syscall in the common case. |
| 252 | + */ |
| 253 | + for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { |
| 254 | + pair.key = key; |
| 255 | + hwprobe_one_pair(&pair, cpu_online_mask); |
| 256 | + |
| 257 | + WARN_ON_ONCE(pair.key < 0); |
| 258 | + |
| 259 | + avd->all_cpu_hwprobe_values[key] = pair.value; |
| 260 | + /* |
| 261 | + * Smash together the vendor, arch, and impl IDs to see if |
| 262 | + * they're all 0 or any negative. |
| 263 | + */ |
| 264 | + if (key <= RISCV_HWPROBE_KEY_MIMPID) |
| 265 | + id_bitsmash |= pair.value; |
| 266 | + } |
| 267 | + |
| 268 | + /* |
| 269 | + * If the arch, vendor, and implementation ID are all the same across |
| 270 | + * all harts, then assume all CPUs are the same, and allow the vDSO to |
| 271 | + * answer queries for arbitrary masks. However if all values are 0 (not |
| 272 | + * populated) or any value returns -1 (varies across CPUs), then the |
| 273 | + * vDSO should defer to the kernel for exotic cpu masks. |
| 274 | + */ |
| 275 | + avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; |
| 276 | + return 0; |
| 277 | +} |
| 278 | + |
| 279 | +arch_initcall_sync(init_hwprobe_vdso_data); |
| 280 | + |
| 281 | +#endif /* CONFIG_MMU */ |
| 282 | + |
| 283 | +SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, |
| 284 | + size_t, pair_count, size_t, cpusetsize, unsigned long __user *, |
| 285 | + cpus, unsigned int, flags) |
| 286 | +{ |
| 287 | + return do_riscv_hwprobe(pairs, pair_count, cpusetsize, |
| 288 | + cpus, flags); |
| 289 | +} |
0 commit comments