Skip to content

Commit 01dc926

Browse files
author
Marc Zyngier
committed
KVM: arm64: Ensure I-cache isolation between vcpus of a same VM
It recently became apparent that the ARMv8 architecture has interesting rules regarding attributes being used when fetching instructions if the MMU is off at Stage-1. In this situation, the CPU is allowed to fetch from the PoC and allocate into the I-cache (unless the memory is mapped with the XN attribute at Stage-2). If we transpose this to vcpus sharing a single physical CPU, it is possible for a vcpu running with its MMU off to influence another vcpu running with its MMU on, as the latter is expected to fetch from the PoU (and self-patching code doesn't flush below that level). In order to solve this, reuse the vcpu-private TLB invalidation code to apply the same policy to the I-cache, nuking it every time the vcpu runs on a physical CPU that ran another vcpu of the same VM in the past. This involve renaming __kvm_tlb_flush_local_vmid() to __kvm_flush_cpu_context(), and inserting a local i-cache invalidation there. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20210303164505.68492-1-maz@kernel.org
1 parent dbaee83 commit 01dc926

5 files changed

Lines changed: 15 additions & 8 deletions

File tree

arch/arm64/include/asm/kvm_asm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
4848
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
4949
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
50-
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
50+
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
5151
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
5252
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
5353
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
@@ -183,10 +183,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
183183
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
184184

185185
extern void __kvm_flush_vm_context(void);
186+
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
186187
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
187188
int level);
188189
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
189-
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
190190

191191
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
192192

arch/arm64/kvm/arm.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
385385
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
386386

387387
/*
388+
* We guarantee that both TLBs and I-cache are private to each
389+
* vcpu. If detecting that a vcpu from the same VM has
390+
* previously run on the same physical CPU, call into the
391+
* hypervisor code to nuke the relevant contexts.
392+
*
388393
* We might get preempted before the vCPU actually runs, but
389394
* over-invalidation doesn't affect correctness.
390395
*/
391396
if (*last_ran != vcpu->vcpu_id) {
392-
kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
397+
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
393398
*last_ran = vcpu->vcpu_id;
394399
}
395400

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
4646
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
4747
}
4848

49-
static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
49+
static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
5050
{
5151
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
5252

53-
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
53+
__kvm_flush_cpu_context(kern_hyp_va(mmu));
5454
}
5555

5656
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
@@ -115,7 +115,7 @@ static const hcall_t host_hcall[] = {
115115
HANDLE_FUNC(__kvm_flush_vm_context),
116116
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
117117
HANDLE_FUNC(__kvm_tlb_flush_vmid),
118-
HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
118+
HANDLE_FUNC(__kvm_flush_cpu_context),
119119
HANDLE_FUNC(__kvm_timer_set_cntvoff),
120120
HANDLE_FUNC(__kvm_enable_ssbs),
121121
HANDLE_FUNC(__vgic_v3_get_gic_config),

arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,14 +123,15 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
123123
__tlb_switch_to_host(&cxt);
124124
}
125125

126-
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
126+
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
127127
{
128128
struct tlb_inv_context cxt;
129129

130130
/* Switch to requested VMID */
131131
__tlb_switch_to_guest(mmu, &cxt);
132132

133133
__tlbi(vmalle1);
134+
asm volatile("ic iallu");
134135
dsb(nsh);
135136
isb();
136137

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,14 +127,15 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
127127
__tlb_switch_to_host(&cxt);
128128
}
129129

130-
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
130+
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
131131
{
132132
struct tlb_inv_context cxt;
133133

134134
/* Switch to requested VMID */
135135
__tlb_switch_to_guest(mmu, &cxt);
136136

137137
__tlbi(vmalle1);
138+
asm volatile("ic iallu");
138139
dsb(nsh);
139140
isb();
140141

0 commit comments

Comments
 (0)