|
35 | 35 | #include <asm/virt.h> |
36 | 36 | #include <asm/kvm_arm.h> |
37 | 37 | #include <asm/kvm_asm.h> |
| 38 | +#include <asm/kvm_emulate.h> |
38 | 39 | #include <asm/kvm_mmu.h> |
39 | 40 | #include <asm/kvm_nested.h> |
40 | 41 | #include <asm/kvm_pkvm.h> |
41 | | -#include <asm/kvm_emulate.h> |
| 42 | +#include <asm/kvm_ptrauth.h> |
42 | 43 | #include <asm/sections.h> |
43 | 44 |
|
44 | 45 | #include <kvm/arm_hypercalls.h> |
@@ -218,6 +219,40 @@ void kvm_arch_destroy_vm(struct kvm *kvm) |
218 | 219 | kvm_arm_teardown_hypercalls(kvm); |
219 | 220 | } |
220 | 221 |
|
| 222 | +static bool kvm_has_full_ptr_auth(void) |
| 223 | +{ |
| 224 | + bool apa, gpa, api, gpi, apa3, gpa3; |
| 225 | + u64 isar1, isar2, val; |
| 226 | + |
| 227 | + /* |
| 228 | + * Check that: |
| 229 | + * |
| 230 | + * - both Address and Generic auth are implemented for a given |
| 231 | + * algorithm (Q5, IMPDEF or Q3) |
| 232 | + * - only a single algorithm is implemented. |
| 233 | + */ |
| 234 | + if (!system_has_full_ptr_auth()) |
| 235 | + return false; |
| 236 | + |
| 237 | + isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); |
| 238 | + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); |
| 239 | + |
| 240 | + apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1); |
| 241 | + val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1); |
| 242 | + gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP); |
| 243 | + |
| 244 | + api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1); |
| 245 | + val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1); |
| 246 | + gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP); |
| 247 | + |
| 248 | + apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2); |
| 249 | + val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2); |
| 250 | + gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP); |
| 251 | + |
| 252 | + return (apa == gpa && api == gpi && apa3 == gpa3 && |
| 253 | + (apa + api + apa3) == 1); |
| 254 | +} |
| 255 | + |
221 | 256 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
222 | 257 | { |
223 | 258 | int r; |
@@ -311,7 +346,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
311 | 346 | break; |
312 | 347 | case KVM_CAP_ARM_PTRAUTH_ADDRESS: |
313 | 348 | case KVM_CAP_ARM_PTRAUTH_GENERIC: |
314 | | - r = system_has_full_ptr_auth(); |
| 349 | + r = kvm_has_full_ptr_auth(); |
315 | 350 | break; |
316 | 351 | case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: |
317 | 352 | if (kvm) |
@@ -422,6 +457,44 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
422 | 457 |
|
423 | 458 | } |
424 | 459 |
|
| 460 | +static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) |
| 461 | +{ |
| 462 | + if (vcpu_has_ptrauth(vcpu)) { |
| 463 | + /* |
| 464 | + * Either we're running running an L2 guest, and the API/APK |
| 465 | + * bits come from L1's HCR_EL2, or API/APK are both set. |
| 466 | + */ |
| 467 | + if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) { |
| 468 | + u64 val; |
| 469 | + |
| 470 | + val = __vcpu_sys_reg(vcpu, HCR_EL2); |
| 471 | + val &= (HCR_API | HCR_APK); |
| 472 | + vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); |
| 473 | + vcpu->arch.hcr_el2 |= val; |
| 474 | + } else { |
| 475 | + vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); |
| 476 | + } |
| 477 | + |
| 478 | + /* |
| 479 | + * Save the host keys if there is any chance for the guest |
| 480 | + * to use pauth, as the entry code will reload the guest |
| 481 | + * keys in that case. |
| 482 | + * Protected mode is the exception to that rule, as the |
| 483 | + * entry into the EL2 code eagerly switch back and forth |
| 484 | + * between host and hyp keys (and kvm_hyp_ctxt is out of |
| 485 | + * reach anyway). |
| 486 | + */ |
| 487 | + if (is_protected_kvm_enabled()) |
| 488 | + return; |
| 489 | + |
| 490 | + if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { |
| 491 | + struct kvm_cpu_context *ctxt; |
| 492 | + ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt); |
| 493 | + ptrauth_save_keys(ctxt); |
| 494 | + } |
| 495 | + } |
| 496 | +} |
| 497 | + |
425 | 498 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
426 | 499 | { |
427 | 500 | struct kvm_s2_mmu *mmu; |
@@ -460,8 +533,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
460 | 533 | else |
461 | 534 | vcpu_set_wfx_traps(vcpu); |
462 | 535 |
|
463 | | - if (vcpu_has_ptrauth(vcpu)) |
464 | | - vcpu_ptrauth_disable(vcpu); |
| 536 | + vcpu_set_pauth_traps(vcpu); |
| 537 | + |
465 | 538 | kvm_arch_vcpu_load_debug_state_flags(vcpu); |
466 | 539 |
|
467 | 540 | if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) |
@@ -1264,7 +1337,7 @@ static unsigned long system_supported_vcpu_features(void) |
1264 | 1337 | if (!system_supports_sve()) |
1265 | 1338 | clear_bit(KVM_ARM_VCPU_SVE, &features); |
1266 | 1339 |
|
1267 | | - if (!system_has_full_ptr_auth()) { |
| 1340 | + if (!kvm_has_full_ptr_auth()) { |
1268 | 1341 | clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features); |
1269 | 1342 | clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features); |
1270 | 1343 | } |
|
0 commit comments