Skip to content

Commit 477e89c

Browse files
author
Marc Zyngier
committed
KVM: arm64: nv: Add basic emulation of AT S1E{0,1}{R,W}
Emulating AT instructions is one the tasks devolved to the host hypervisor when NV is on. Here, we take the basic approach of emulating AT S1E{0,1}{R,W} using the AT instructions themselves. While this mostly work, it doesn't *always* work: - S1 page tables can be swapped out - shadow S2 can be incomplete and not contain mappings for the S1 page tables We are not trying to handle these case here, and defer it to a later patch. Suitable comments indicate where we are in dire need of better handling. Co-developed-by: Jintack Lim <jintack.lim@linaro.org> Signed-off-by: Jintack Lim <jintack.lim@linaro.org> Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 9065985 commit 477e89c

3 files changed

Lines changed: 142 additions & 1 deletion

File tree

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,7 @@ extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
236236
extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
237237

238238
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
239+
extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
239240

240241
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
241242

arch/arm64/kvm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
1414
inject_fault.o va_layout.o handle_exit.o \
1515
guest.o debug.o reset.o sys_regs.o stacktrace.o \
1616
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
17-
arch_timer.o trng.o vmid.o emulate-nested.o nested.o \
17+
arch_timer.o trng.o vmid.o emulate-nested.o nested.o at.o \
1818
vgic/vgic.o vgic/vgic-init.o \
1919
vgic/vgic-irqfd.o vgic/vgic-v2.o \
2020
vgic/vgic-v3.o vgic/vgic-v4.o \

arch/arm64/kvm/at.c

Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (C) 2017 - Linaro Ltd
4+
* Author: Jintack Lim <jintack.lim@linaro.org>
5+
*/
6+
7+
#include <asm/kvm_hyp.h>
8+
#include <asm/kvm_mmu.h>
9+
10+
struct mmu_config {
11+
u64 ttbr0;
12+
u64 ttbr1;
13+
u64 tcr;
14+
u64 mair;
15+
u64 sctlr;
16+
u64 vttbr;
17+
u64 vtcr;
18+
u64 hcr;
19+
};
20+
21+
static void __mmu_config_save(struct mmu_config *config)
22+
{
23+
config->ttbr0 = read_sysreg_el1(SYS_TTBR0);
24+
config->ttbr1 = read_sysreg_el1(SYS_TTBR1);
25+
config->tcr = read_sysreg_el1(SYS_TCR);
26+
config->mair = read_sysreg_el1(SYS_MAIR);
27+
config->sctlr = read_sysreg_el1(SYS_SCTLR);
28+
config->vttbr = read_sysreg(vttbr_el2);
29+
config->vtcr = read_sysreg(vtcr_el2);
30+
config->hcr = read_sysreg(hcr_el2);
31+
}
32+
33+
static void __mmu_config_restore(struct mmu_config *config)
34+
{
35+
write_sysreg(config->hcr, hcr_el2);
36+
37+
/*
38+
* ARM errata 1165522 and 1530923 require TGE to be 1 before
39+
* we update the guest state.
40+
*/
41+
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
42+
43+
write_sysreg_el1(config->ttbr0, SYS_TTBR0);
44+
write_sysreg_el1(config->ttbr1, SYS_TTBR1);
45+
write_sysreg_el1(config->tcr, SYS_TCR);
46+
write_sysreg_el1(config->mair, SYS_MAIR);
47+
write_sysreg_el1(config->sctlr, SYS_SCTLR);
48+
write_sysreg(config->vttbr, vttbr_el2);
49+
write_sysreg(config->vtcr, vtcr_el2);
50+
}
51+
52+
/*
53+
* Return the PAR_EL1 value as the result of a valid translation.
54+
*
55+
* If the translation is unsuccessful, the value may only contain
56+
* PAR_EL1.F, and cannot be taken at face value. It isn't an
57+
* indication of the translation having failed, only that the fast
58+
* path did not succeed, *unless* it indicates a S1 permission fault.
59+
*/
60+
static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
61+
{
62+
struct mmu_config config;
63+
struct kvm_s2_mmu *mmu;
64+
bool fail;
65+
u64 par;
66+
67+
par = SYS_PAR_EL1_F;
68+
69+
/*
70+
* We've trapped, so everything is live on the CPU. As we will
71+
* be switching contexts behind everybody's back, disable
72+
* interrupts while holding the mmu lock.
73+
*/
74+
guard(write_lock_irqsave)(&vcpu->kvm->mmu_lock);
75+
76+
/*
77+
* If HCR_EL2.{E2H,TGE} == {1,1}, the MMU context is already
78+
* the right one (as we trapped from vEL2). If not, save the
79+
* full MMU context.
80+
*/
81+
if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu))
82+
goto skip_mmu_switch;
83+
84+
/*
85+
* Obtaining the S2 MMU for a L2 is horribly racy, and we may not
86+
* find it (recycled by another vcpu, for example). When this
87+
* happens, admit defeat immediately and use the SW (slow) path.
88+
*/
89+
mmu = lookup_s2_mmu(vcpu);
90+
if (!mmu)
91+
return par;
92+
93+
__mmu_config_save(&config);
94+
95+
write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR0_EL1), SYS_TTBR0);
96+
write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR1_EL1), SYS_TTBR1);
97+
write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR_EL1), SYS_TCR);
98+
write_sysreg_el1(vcpu_read_sys_reg(vcpu, MAIR_EL1), SYS_MAIR);
99+
write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1), SYS_SCTLR);
100+
__load_stage2(mmu, mmu->arch);
101+
102+
skip_mmu_switch:
103+
/* Clear TGE, enable S2 translation, we're rolling */
104+
write_sysreg((config.hcr & ~HCR_TGE) | HCR_VM, hcr_el2);
105+
isb();
106+
107+
switch (op) {
108+
case OP_AT_S1E1R:
109+
fail = __kvm_at(OP_AT_S1E1R, vaddr);
110+
break;
111+
case OP_AT_S1E1W:
112+
fail = __kvm_at(OP_AT_S1E1W, vaddr);
113+
break;
114+
case OP_AT_S1E0R:
115+
fail = __kvm_at(OP_AT_S1E0R, vaddr);
116+
break;
117+
case OP_AT_S1E0W:
118+
fail = __kvm_at(OP_AT_S1E0W, vaddr);
119+
break;
120+
default:
121+
WARN_ON_ONCE(1);
122+
fail = true;
123+
break;
124+
}
125+
126+
if (!fail)
127+
par = read_sysreg_par();
128+
129+
if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
130+
__mmu_config_restore(&config);
131+
132+
return par;
133+
}
134+
135+
void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
136+
{
137+
u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr);
138+
139+
vcpu_write_sys_reg(vcpu, par, PAR_EL1);
140+
}

0 commit comments

Comments
 (0)