|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * Copyright (C) 2017 - Linaro Ltd |
| 4 | + * Author: Jintack Lim <jintack.lim@linaro.org> |
| 5 | + */ |
| 6 | + |
| 7 | +#include <asm/kvm_hyp.h> |
| 8 | +#include <asm/kvm_mmu.h> |
| 9 | + |
| 10 | +struct mmu_config { |
| 11 | + u64 ttbr0; |
| 12 | + u64 ttbr1; |
| 13 | + u64 tcr; |
| 14 | + u64 mair; |
| 15 | + u64 sctlr; |
| 16 | + u64 vttbr; |
| 17 | + u64 vtcr; |
| 18 | + u64 hcr; |
| 19 | +}; |
| 20 | + |
| 21 | +static void __mmu_config_save(struct mmu_config *config) |
| 22 | +{ |
| 23 | + config->ttbr0 = read_sysreg_el1(SYS_TTBR0); |
| 24 | + config->ttbr1 = read_sysreg_el1(SYS_TTBR1); |
| 25 | + config->tcr = read_sysreg_el1(SYS_TCR); |
| 26 | + config->mair = read_sysreg_el1(SYS_MAIR); |
| 27 | + config->sctlr = read_sysreg_el1(SYS_SCTLR); |
| 28 | + config->vttbr = read_sysreg(vttbr_el2); |
| 29 | + config->vtcr = read_sysreg(vtcr_el2); |
| 30 | + config->hcr = read_sysreg(hcr_el2); |
| 31 | +} |
| 32 | + |
| 33 | +static void __mmu_config_restore(struct mmu_config *config) |
| 34 | +{ |
| 35 | + write_sysreg(config->hcr, hcr_el2); |
| 36 | + |
| 37 | + /* |
| 38 | + * ARM errata 1165522 and 1530923 require TGE to be 1 before |
| 39 | + * we update the guest state. |
| 40 | + */ |
| 41 | + asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); |
| 42 | + |
| 43 | + write_sysreg_el1(config->ttbr0, SYS_TTBR0); |
| 44 | + write_sysreg_el1(config->ttbr1, SYS_TTBR1); |
| 45 | + write_sysreg_el1(config->tcr, SYS_TCR); |
| 46 | + write_sysreg_el1(config->mair, SYS_MAIR); |
| 47 | + write_sysreg_el1(config->sctlr, SYS_SCTLR); |
| 48 | + write_sysreg(config->vttbr, vttbr_el2); |
| 49 | + write_sysreg(config->vtcr, vtcr_el2); |
| 50 | +} |
| 51 | + |
| 52 | +/* |
| 53 | + * Return the PAR_EL1 value as the result of a valid translation. |
| 54 | + * |
| 55 | + * If the translation is unsuccessful, the value may only contain |
| 56 | + * PAR_EL1.F, and cannot be taken at face value. It isn't an |
| 57 | + * indication of the translation having failed, only that the fast |
| 58 | + * path did not succeed, *unless* it indicates a S1 permission fault. |
| 59 | + */ |
| 60 | +static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) |
| 61 | +{ |
| 62 | + struct mmu_config config; |
| 63 | + struct kvm_s2_mmu *mmu; |
| 64 | + bool fail; |
| 65 | + u64 par; |
| 66 | + |
| 67 | + par = SYS_PAR_EL1_F; |
| 68 | + |
| 69 | + /* |
| 70 | + * We've trapped, so everything is live on the CPU. As we will |
| 71 | + * be switching contexts behind everybody's back, disable |
| 72 | + * interrupts while holding the mmu lock. |
| 73 | + */ |
| 74 | + guard(write_lock_irqsave)(&vcpu->kvm->mmu_lock); |
| 75 | + |
| 76 | + /* |
| 77 | + * If HCR_EL2.{E2H,TGE} == {1,1}, the MMU context is already |
| 78 | + * the right one (as we trapped from vEL2). If not, save the |
| 79 | + * full MMU context. |
| 80 | + */ |
| 81 | + if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) |
| 82 | + goto skip_mmu_switch; |
| 83 | + |
| 84 | + /* |
| 85 | + * Obtaining the S2 MMU for a L2 is horribly racy, and we may not |
| 86 | + * find it (recycled by another vcpu, for example). When this |
| 87 | + * happens, admit defeat immediately and use the SW (slow) path. |
| 88 | + */ |
| 89 | + mmu = lookup_s2_mmu(vcpu); |
| 90 | + if (!mmu) |
| 91 | + return par; |
| 92 | + |
| 93 | + __mmu_config_save(&config); |
| 94 | + |
| 95 | + write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR0_EL1), SYS_TTBR0); |
| 96 | + write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR1_EL1), SYS_TTBR1); |
| 97 | + write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR_EL1), SYS_TCR); |
| 98 | + write_sysreg_el1(vcpu_read_sys_reg(vcpu, MAIR_EL1), SYS_MAIR); |
| 99 | + write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1), SYS_SCTLR); |
| 100 | + __load_stage2(mmu, mmu->arch); |
| 101 | + |
| 102 | +skip_mmu_switch: |
| 103 | + /* Clear TGE, enable S2 translation, we're rolling */ |
| 104 | + write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2); |
| 105 | + isb(); |
| 106 | + |
| 107 | + switch (op) { |
| 108 | + case OP_AT_S1E1R: |
| 109 | + fail = __kvm_at(OP_AT_S1E1R, vaddr); |
| 110 | + break; |
| 111 | + case OP_AT_S1E1W: |
| 112 | + fail = __kvm_at(OP_AT_S1E1W, vaddr); |
| 113 | + break; |
| 114 | + case OP_AT_S1E0R: |
| 115 | + fail = __kvm_at(OP_AT_S1E0R, vaddr); |
| 116 | + break; |
| 117 | + case OP_AT_S1E0W: |
| 118 | + fail = __kvm_at(OP_AT_S1E0W, vaddr); |
| 119 | + break; |
| 120 | + default: |
| 121 | + WARN_ON_ONCE(1); |
| 122 | + fail = true; |
| 123 | + break; |
| 124 | + } |
| 125 | + |
| 126 | + if (!fail) |
| 127 | + par = read_sysreg_par(); |
| 128 | + |
| 129 | + if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu))) |
| 130 | + __mmu_config_restore(&config); |
| 131 | + |
| 132 | + return par; |
| 133 | +} |
| 134 | + |
| 135 | +void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) |
| 136 | +{ |
| 137 | + u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr); |
| 138 | + |
| 139 | + vcpu_write_sys_reg(vcpu, par, PAR_EL1); |
| 140 | +} |
0 commit comments