Skip to content

Commit f05efcf

Browse files
committed
Merge tag 'kvm-x86-mmio-6.17' of https://github.com/kvm-x86/linux into HEAD
KVM MMIO Stale Data mitigation cleanup for 6.17 Rework KVM's mitigation for the MMIO State Data vulnerability to track whether or not a vCPU has access to (host) MMIO based on the MMU that will be used when running in the guest. The current approach doesn't actually detect whether or not a guest has access to MMIO, and is prone to false negatives (and to a lesser extent, false positives), as KVM_DEV_VFIO_FILE_ADD is optional, and obviously only covers VFIO devices.
2 parents f02b1bc + 83ebe71 commit f05efcf

6 files changed

Lines changed: 67 additions & 8 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1465,6 +1465,7 @@ struct kvm_arch {
14651465
bool x2apic_format;
14661466
bool x2apic_broadcast_quirk_disabled;
14671467

1468+
bool has_mapped_host_mmio;
14681469
bool guest_can_read_msr_platform_info;
14691470
bool exception_payload_enabled;
14701471

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,9 @@ struct kvm_mmu_page {
103103
int root_count;
104104
refcount_t tdp_mmu_root_count;
105105
};
106+
107+
bool has_mapped_host_mmio;
108+
106109
union {
107110
/* These two members aren't used for TDP MMU */
108111
struct {

arch/x86/kvm/mmu/spte.c

Lines changed: 40 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
104104
return spte;
105105
}
106106

107-
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
107+
static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn)
108108
{
109109
if (pfn_valid(pfn))
110110
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
@@ -125,6 +125,35 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
125125
E820_TYPE_RAM);
126126
}
127127

128+
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio)
129+
{
130+
/*
131+
* Determining if a PFN is host MMIO is relative expensive. Cache the
132+
* result locally (in the sole caller) to avoid doing the full query
133+
* multiple times when creating a single SPTE.
134+
*/
135+
if (*is_host_mmio < 0)
136+
*is_host_mmio = __kvm_is_mmio_pfn(pfn);
137+
138+
return *is_host_mmio;
139+
}
140+
141+
static void kvm_track_host_mmio_mapping(struct kvm_vcpu *vcpu)
142+
{
143+
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
144+
145+
if (root)
146+
WRITE_ONCE(root->has_mapped_host_mmio, true);
147+
else
148+
WRITE_ONCE(vcpu->kvm->arch.has_mapped_host_mmio, true);
149+
150+
/*
151+
* Force vCPUs to exit and flush CPU buffers if the vCPU is using the
152+
* affected root(s).
153+
*/
154+
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_OUTSIDE_GUEST_MODE);
155+
}
156+
128157
/*
129158
* Returns true if the SPTE needs to be updated atomically due to having bits
130159
* that may be changed without holding mmu_lock, and for which KVM must not
@@ -162,6 +191,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
162191
{
163192
int level = sp->role.level;
164193
u64 spte = SPTE_MMU_PRESENT_MASK;
194+
int is_host_mmio = -1;
165195
bool wrprot = false;
166196

167197
/*
@@ -209,13 +239,15 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
209239
if (level > PG_LEVEL_4K)
210240
spte |= PT_PAGE_SIZE_MASK;
211241

212-
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
242+
if (kvm_x86_ops.get_mt_mask)
243+
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
244+
kvm_is_mmio_pfn(pfn, &is_host_mmio));
213245
if (host_writable)
214246
spte |= shadow_host_writable_mask;
215247
else
216248
pte_access &= ~ACC_WRITE_MASK;
217249

218-
if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
250+
if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio))
219251
spte |= shadow_me_value;
220252

221253
spte |= (u64)pfn << PAGE_SHIFT;
@@ -260,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
260292
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
261293
}
262294

295+
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
296+
!kvm_vcpu_can_access_host_mmio(vcpu) &&
297+
kvm_is_mmio_pfn(pfn, &is_host_mmio))
298+
kvm_track_host_mmio_mapping(vcpu);
299+
263300
*new_spte = spte;
264301
return wrprot;
265302
}

arch/x86/kvm/mmu/spte.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -280,6 +280,16 @@ static inline bool is_mirror_sptep(tdp_ptep_t sptep)
280280
return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
281281
}
282282

283+
static inline bool kvm_vcpu_can_access_host_mmio(struct kvm_vcpu *vcpu)
284+
{
285+
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
286+
287+
if (root)
288+
return READ_ONCE(root->has_mapped_host_mmio);
289+
290+
return READ_ONCE(vcpu->kvm->arch.has_mapped_host_mmio);
291+
}
292+
283293
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
284294
{
285295
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&

arch/x86/kvm/vmx/run_flags.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,12 @@
22
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
33
#define __KVM_X86_VMX_RUN_FLAGS_H
44

5-
#define VMX_RUN_VMRESUME_SHIFT 0
6-
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
5+
#define VMX_RUN_VMRESUME_SHIFT 0
6+
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
7+
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2
78

8-
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
9-
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
9+
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
10+
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
11+
#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT)
1012

1113
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

arch/x86/kvm/vmx/vmx.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@
7575
#include "vmx_onhyperv.h"
7676
#include "posted_intr.h"
7777

78+
#include "mmu/spte.h"
79+
7880
MODULE_AUTHOR("Qumranet");
7981
MODULE_DESCRIPTION("KVM support for VMX (Intel VT-x) extensions");
8082
MODULE_LICENSE("GPL");
@@ -961,6 +963,10 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
961963
if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
962964
flags |= VMX_RUN_SAVE_SPEC_CTRL;
963965

966+
if (static_branch_unlikely(&cpu_buf_vm_clear) &&
967+
kvm_vcpu_can_access_host_mmio(&vmx->vcpu))
968+
flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO;
969+
964970
return flags;
965971
}
966972

@@ -7288,7 +7294,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
72887294
if (static_branch_unlikely(&vmx_l1d_should_flush))
72897295
vmx_l1d_flush(vcpu);
72907296
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
7291-
kvm_arch_has_assigned_device(vcpu->kvm))
7297+
(flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO))
72927298
x86_clear_cpu_buffers();
72937299

72947300
vmx_disable_fb_clear(vmx);

0 commit comments

Comments
 (0)