@@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
104104 return spte ;
105105}
106106
107- static bool kvm_is_mmio_pfn (kvm_pfn_t pfn )
107+ static bool __kvm_is_mmio_pfn (kvm_pfn_t pfn )
108108{
109109 if (pfn_valid (pfn ))
110110 return !is_zero_pfn (pfn ) && PageReserved (pfn_to_page (pfn )) &&
@@ -125,6 +125,35 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
125125 E820_TYPE_RAM );
126126}
127127
128+ static bool kvm_is_mmio_pfn (kvm_pfn_t pfn , int * is_host_mmio )
129+ {
130+ /*
131+ * Determining if a PFN is host MMIO is relative expensive. Cache the
132+ * result locally (in the sole caller) to avoid doing the full query
133+ * multiple times when creating a single SPTE.
134+ */
135+ if (* is_host_mmio < 0 )
136+ * is_host_mmio = __kvm_is_mmio_pfn (pfn );
137+
138+ return * is_host_mmio ;
139+ }
140+
141+ static void kvm_track_host_mmio_mapping (struct kvm_vcpu * vcpu )
142+ {
143+ struct kvm_mmu_page * root = root_to_sp (vcpu -> arch .mmu -> root .hpa );
144+
145+ if (root )
146+ WRITE_ONCE (root -> has_mapped_host_mmio , true);
147+ else
148+ WRITE_ONCE (vcpu -> kvm -> arch .has_mapped_host_mmio , true);
149+
150+ /*
151+ * Force vCPUs to exit and flush CPU buffers if the vCPU is using the
152+ * affected root(s).
153+ */
154+ kvm_make_all_cpus_request (vcpu -> kvm , KVM_REQ_OUTSIDE_GUEST_MODE );
155+ }
156+
128157/*
129158 * Returns true if the SPTE needs to be updated atomically due to having bits
130159 * that may be changed without holding mmu_lock, and for which KVM must not
@@ -162,6 +191,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
162191{
163192 int level = sp -> role .level ;
164193 u64 spte = SPTE_MMU_PRESENT_MASK ;
194+ int is_host_mmio = -1 ;
165195 bool wrprot = false;
166196
167197 /*
@@ -209,13 +239,15 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
209239 if (level > PG_LEVEL_4K )
210240 spte |= PT_PAGE_SIZE_MASK ;
211241
212- spte |= kvm_x86_call (get_mt_mask )(vcpu , gfn , kvm_is_mmio_pfn (pfn ));
242+ if (kvm_x86_ops .get_mt_mask )
243+ spte |= kvm_x86_call (get_mt_mask )(vcpu , gfn ,
244+ kvm_is_mmio_pfn (pfn , & is_host_mmio ));
213245 if (host_writable )
214246 spte |= shadow_host_writable_mask ;
215247 else
216248 pte_access &= ~ACC_WRITE_MASK ;
217249
218- if (shadow_me_value && !kvm_is_mmio_pfn (pfn ))
250+ if (shadow_me_value && !kvm_is_mmio_pfn (pfn , & is_host_mmio ))
219251 spte |= shadow_me_value ;
220252
221253 spte |= (u64 )pfn << PAGE_SHIFT ;
@@ -260,6 +292,11 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
260292 mark_page_dirty_in_slot (vcpu -> kvm , slot , gfn );
261293 }
262294
295+ if (static_branch_unlikely (& cpu_buf_vm_clear ) &&
296+ !kvm_vcpu_can_access_host_mmio (vcpu ) &&
297+ kvm_is_mmio_pfn (pfn , & is_host_mmio ))
298+ kvm_track_host_mmio_mapping (vcpu );
299+
263300 * new_spte = spte ;
264301 return wrprot ;
265302}
0 commit comments