Skip to content

Commit c0e1ad6

Browse files
committed
Merge branch kvm-arm64/stage2-vhe-load into kvmarm/next
* kvm-arm64/stage2-vhe-load: : Setup stage-2 MMU from vcpu_load() for VHE : : Unlike nVHE, there is no need to switch the stage-2 MMU around on guest : entry/exit in VHE mode as the host is running at EL2. Despite this KVM : reloads the stage-2 on every guest entry, which is needless. : : This series moves the setup of the stage-2 MMU context to vcpu_load() : when running in VHE mode. This is likely to be a win across the board, : but also allows us to remove an ISB on the guest entry path for systems : with one of the speculative AT errata. KVM: arm64: Load the stage-2 MMU context in kvm_vcpu_load_vhe() KVM: arm64: Rename helpers for VHE vCPU load/put KVM: arm64: Reload stage-2 for VMID change on VHE KVM: arm64: Restore the stage-2 context in VHE's __tlb_switch_to_host() KVM: arm64: Don't zero VTTBR in __tlb_switch_to_host() Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
2 parents 2e7e5eb + 934bf87 commit c0e1ad6

7 files changed

Lines changed: 57 additions & 38 deletions

File tree

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,7 +1048,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
10481048
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
10491049
int __init kvm_arm_vmid_alloc_init(void);
10501050
void __init kvm_arm_vmid_alloc_free(void);
1051-
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1051+
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
10521052
void kvm_arm_vmid_clear_active(void);
10531053

10541054
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
@@ -1134,8 +1134,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
11341134
}
11351135
#endif
11361136

1137-
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
1138-
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
1137+
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
1138+
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
11391139

11401140
int __init kvm_set_ipa_limit(void);
11411141

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu);
9393
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
9494
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
9595
#else
96+
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu);
97+
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu);
9698
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
9799
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
98100
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
@@ -111,11 +113,6 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
111113
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
112114
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
113115

114-
#ifndef __KVM_NVHE_HYPERVISOR__
115-
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
116-
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
117-
#endif
118-
119116
u64 __guest_enter(struct kvm_vcpu *vcpu);
120117

121118
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);

arch/arm64/kvm/arm.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
451451
kvm_vgic_load(vcpu);
452452
kvm_timer_vcpu_load(vcpu);
453453
if (has_vhe())
454-
kvm_vcpu_load_sysregs_vhe(vcpu);
454+
kvm_vcpu_load_vhe(vcpu);
455455
kvm_arch_vcpu_load_fp(vcpu);
456456
kvm_vcpu_pmu_restore_guest(vcpu);
457457
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
@@ -475,7 +475,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
475475
kvm_arch_vcpu_put_debug_state_flags(vcpu);
476476
kvm_arch_vcpu_put_fp(vcpu);
477477
if (has_vhe())
478-
kvm_vcpu_put_sysregs_vhe(vcpu);
478+
kvm_vcpu_put_vhe(vcpu);
479479
kvm_timer_vcpu_put(vcpu);
480480
kvm_vgic_put(vcpu);
481481
kvm_vcpu_pmu_restore_host(vcpu);
@@ -1006,7 +1006,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
10061006
* making a thread's VMID inactive. So we need to call
10071007
* kvm_arm_vmid_update() in non-premptible context.
10081008
*/
1009-
kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
1009+
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1010+
has_vhe())
1011+
__load_stage2(vcpu->arch.hw_mmu,
1012+
vcpu->arch.hw_mmu->arch);
10101013

10111014
kvm_pmu_flush_hwstate(vcpu);
10121015

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,12 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
9393
NOKPROBE_SYMBOL(__deactivate_traps);
9494

9595
/*
96-
* Disable IRQs in {activate,deactivate}_traps_vhe_{load,put}() to
96+
* Disable IRQs in __vcpu_{load,put}_{activate,deactivate}_traps() to
9797
* prevent a race condition between context switching of PMUSERENR_EL0
9898
* in __{activate,deactivate}_traps_common() and IPIs that attempts to
9999
* update PMUSERENR_EL0. See also kvm_set_pmuserenr().
100100
*/
101-
void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
101+
static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu)
102102
{
103103
unsigned long flags;
104104

@@ -107,7 +107,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
107107
local_irq_restore(flags);
108108
}
109109

110-
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
110+
static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
111111
{
112112
unsigned long flags;
113113

@@ -116,6 +116,19 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
116116
local_irq_restore(flags);
117117
}
118118

119+
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
120+
{
121+
__vcpu_load_switch_sysregs(vcpu);
122+
__vcpu_load_activate_traps(vcpu);
123+
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
124+
}
125+
126+
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
127+
{
128+
__vcpu_put_deactivate_traps(vcpu);
129+
__vcpu_put_switch_sysregs(vcpu);
130+
}
131+
119132
static const exit_handler_fn hyp_exit_handlers[] = {
120133
[0 ... ESR_ELx_EC_MAX] = NULL,
121134
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@@ -171,17 +184,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
171184
sysreg_save_host_state_vhe(host_ctxt);
172185

173186
/*
174-
* ARM erratum 1165522 requires us to configure both stage 1 and
175-
* stage 2 translation for the guest context before we clear
176-
* HCR_EL2.TGE.
177-
*
178-
* We have already configured the guest's stage 1 translation in
179-
* kvm_vcpu_load_sysregs_vhe above. We must now call
180-
* __load_stage2 before __activate_traps, because
181-
* __load_stage2 configures stage 2 translation, and
182-
* __activate_traps clear HCR_EL2.TGE (among other things).
187+
* Note that ARM erratum 1165522 requires us to configure both stage 1
188+
* and stage 2 translation for the guest context before we clear
189+
* HCR_EL2.TGE. The stage 1 and stage 2 guest context has already been
190+
* loaded on the CPU in kvm_vcpu_load_vhe().
183191
*/
184-
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
185192
__activate_traps(vcpu);
186193

187194
__kvm_adjust_pc(vcpu);

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
5252
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
5353

5454
/**
55-
* kvm_vcpu_load_sysregs_vhe - Load guest system registers to the physical CPU
55+
* __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU
5656
*
5757
* @vcpu: The VCPU pointer
5858
*
@@ -62,7 +62,7 @@ NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
6262
* and loading system register state early avoids having to load them on
6363
* every entry to the VM.
6464
*/
65-
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
65+
void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
6666
{
6767
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
6868
struct kvm_cpu_context *host_ctxt;
@@ -92,12 +92,10 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
9292
__sysreg_restore_el1_state(guest_ctxt);
9393

9494
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
95-
96-
activate_traps_vhe_load(vcpu);
9795
}
9896

9997
/**
100-
* kvm_vcpu_put_sysregs_vhe - Restore host system registers to the physical CPU
98+
* __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
10199
*
102100
* @vcpu: The VCPU pointer
103101
*
@@ -107,13 +105,12 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
107105
* and deferring saving system register state until we're no longer running the
108106
* VCPU avoids having to save them on every exit from the VM.
109107
*/
110-
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
108+
void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
111109
{
112110
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
113111
struct kvm_cpu_context *host_ctxt;
114112

115113
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
116-
deactivate_traps_vhe_put(vcpu);
117114

118115
__sysreg_save_el1_state(guest_ctxt);
119116
__sysreg_save_user_state(guest_ctxt);

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,18 +11,25 @@
1111
#include <asm/tlbflush.h>
1212

1313
struct tlb_inv_context {
14-
unsigned long flags;
15-
u64 tcr;
16-
u64 sctlr;
14+
struct kvm_s2_mmu *mmu;
15+
unsigned long flags;
16+
u64 tcr;
17+
u64 sctlr;
1718
};
1819

1920
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
2021
struct tlb_inv_context *cxt)
2122
{
23+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
2224
u64 val;
2325

2426
local_irq_save(cxt->flags);
2527

28+
if (vcpu && mmu != vcpu->arch.hw_mmu)
29+
cxt->mmu = vcpu->arch.hw_mmu;
30+
else
31+
cxt->mmu = NULL;
32+
2633
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
2734
/*
2835
* For CPUs that are affected by ARM errata 1165522 or 1530923,
@@ -66,10 +73,13 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
6673
* We're done with the TLB operation, let's restore the host's
6774
* view of HCR_EL2.
6875
*/
69-
write_sysreg(0, vttbr_el2);
7076
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
7177
isb();
7278

79+
/* ... and the stage-2 MMU context that we switched away from */
80+
if (cxt->mmu)
81+
__load_stage2(cxt->mmu, cxt->mmu->arch);
82+
7383
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
7484
/* Restore the registers to what they were */
7585
write_sysreg_el1(cxt->tcr, SYS_TCR);

arch/arm64/kvm/vmid.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,10 +135,11 @@ void kvm_arm_vmid_clear_active(void)
135135
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
136136
}
137137

138-
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
138+
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
139139
{
140140
unsigned long flags;
141141
u64 vmid, old_active_vmid;
142+
bool updated = false;
142143

143144
vmid = atomic64_read(&kvm_vmid->id);
144145

@@ -156,17 +157,21 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
156157
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
157158
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
158159
old_active_vmid, vmid))
159-
return;
160+
return false;
160161

161162
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
162163

163164
/* Check that our VMID belongs to the current generation. */
164165
vmid = atomic64_read(&kvm_vmid->id);
165-
if (!vmid_gen_match(vmid))
166+
if (!vmid_gen_match(vmid)) {
166167
vmid = new_vmid(kvm_vmid);
168+
updated = true;
169+
}
167170

168171
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
169172
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
173+
174+
return updated;
170175
}
171176

172177
/*

0 commit comments

Comments
 (0)