Skip to content

Commit 83ebe71

Browse files
committed
KVM: VMX: Apply MMIO Stale Data mitigation if KVM maps MMIO into the guest
Enforce the MMIO State Data mitigation if KVM has ever mapped host MMIO into the VM, not if the VM has an assigned device. VFIO is but one of many ways to map host MMIO into a KVM guest, and even within VFIO, formally attaching a device to a VM via KVM_DEV_VFIO_FILE_ADD is entirely optional. Track whether or not the guest can access host MMIO on a per-MMU basis, i.e. based on whether or not the vCPU has a mapping to host MMIO. For simplicity, track MMIO mappings in "special" rools (those without a kvm_mmu_page) at the VM level, as only Intel CPUs are vulnerable, and so only legacy 32-bit shadow paging is affected, i.e. lack of precise tracking is a complete non-issue. Make the per-MMU and per-VM flags sticky. Detecting when *all* MMIO mappings have been removed would be absurdly complex. And in practice, removing MMIO from a guest will be done by deleting the associated memslot, which by default will force KVM to re-allocate all roots. Special roots will forever be mitigated, but as above, the affected scenarios are not expected to be performance sensitive. Use a VMX_RUN flag to communicate the need for a buffers flush to vmx_vcpu_enter_exit() so that kvm_vcpu_can_access_host_mmio() and all its dependencies don't need to be marked __always_inline, e.g. so that KASAN doesn't trigger a noinstr violation. Cc: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Fixes: 8cb861e ("x86/speculation/mmio: Add mitigation for Processor MMIO Stale Data") Tested-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Link: https://lore.kernel.org/r/20250523011756.3243624-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent ffe9d79 commit 83ebe71

6 files changed

Lines changed: 48 additions & 5 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1451,6 +1451,7 @@ struct kvm_arch {
14511451
bool x2apic_format;
14521452
bool x2apic_broadcast_quirk_disabled;
14531453

1454+
bool has_mapped_host_mmio;
14541455
bool guest_can_read_msr_platform_info;
14551456
bool exception_payload_enabled;
14561457

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,9 @@ struct kvm_mmu_page {
103103
int root_count;
104104
refcount_t tdp_mmu_root_count;
105105
};
106+
107+
bool has_mapped_host_mmio;
108+
106109
union {
107110
/* These two members aren't used for TDP MMU */
108111
struct {

arch/x86/kvm/mmu/spte.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,22 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
138138
return *is_host_mmio;
139139
}
140140

141+
static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu)
142+
{
143+
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
144+
145+
if (root)
146+
WRITE_ONCE(root->has_mapped_host_mmio, true);
147+
else
148+
WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true);
149+
150+
/*
151+
* Force vCPUs to exit and flush CPU buffers if the vCPU is using the
152+
* affected root(s).
153+
*/
154+
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
155+
}
156+
141157
/*
142158
* Returns true if the SPTE needs to be updated atomically due to having bits
143159
* that may be changed without holding mmu_lock, and for which KVM must not
@@ -276,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
276292
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
277293
}
278294

295+
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
296+
!kvm_vcpu_can_access_host_mmio(vcpu) &&
297+
kvm_is_mmio_pfn(pfn, &is_host_mmio))
298+
kvm_track_host_mmio_mapping(vcpu);
299+
279300
*new_spte = spte;
280301
return wrprot;
281302
}

arch/x86/kvm/mmu/spte.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep)
280280
return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
281281
}
282282

283+
static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu)
284+
{
285+
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
286+
287+
if (root)
288+
return READ_ONCE(root->has_mapped_host_mmio);
289+
290+
return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio);
291+
}
292+
283293
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
284294
{
285295
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&

arch/x86/kvm/vmx/run_flags.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,12 @@
22
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
33
#define __KVM_X86_VMX_RUN_FLAGS_H
44

5-
#define VMX_RUN_VMRESUME_SHIFT 0
6-
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
5+
#define VMX_RUN_VMRESUME_SHIFT 0
6+
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
7+
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2
78

8-
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
9-
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
9+
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
10+
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
11+
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT)
1012

1113
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

arch/x86/kvm/vmx/vmx.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@
7575
#include "vmx_onhyperv.h"
7676
#include "posted_intr.h"
7777

78+
#include "mmu/spte.h"
79+
7880
MODULE_AUTHOR("Qumranet");
7981
MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
8082
MODULE_LICENSE("GPL");
@@ -963,6 +965,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
963965
if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
964966
flags |= VMX_RUN_SAVE_SPEC_CTRL;
965967

968+
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
969+
kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
970+
flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
971+
966972
return flags;
967973
}
968974

@@ -7290,7 +7296,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
72907296
if (static_branch_unlikely(&vmx_l1d_should_flush))
72917297
vmx_l1d_flush(vcpu);
72927298
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
7293-
kvm_arch_has_assigned_device(vcpu->kvm))
7299+
(flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO))
72947300
mds_clear_cpu_buffers();
72957301

72967302
vmx_disable_fb_clear(vmx);

0 commit comments

Comments
 (0)