Skip to content

Commit 706c9c5

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86/mmu: Don't treat fully writable SPTEs as volatile (modulo A/D)
Don't treat SPTEs that are truly writable, i.e. writable in hardware, as being volatile (unless they're volatile for other reasons, e.g. A/D bits). KVM _sets_ the WRITABLE bit out of mmu_lock, but never _clears_ the bit out of mmu_lock, so if the WRITABLE bit is set, it cannot magically get cleared just because the SPTE is MMU-writable. Rename the wrapper of MMU-writable to be more literal, the previous name of spte_can_locklessly_be_made_writable() is wrong and misleading. Fixes: c7ba5b4 ("KVM: MMU: fast path of handling guest page fault") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220423034752.1161007-2-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 4418723 commit 706c9c5

2 files changed

Lines changed: 10 additions & 9 deletions

File tree

arch/x86/kvm/mmu/mmu.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -484,13 +484,15 @@ static bool spte_has_volatile_bits(u64 spte)
484484
* also, it can help us to get a stable is_writable_pte()
485485
* to ensure tlb flush is not missed.
486486
*/
487-
if (spte_can_locklessly_be_made_writable(spte) ||
488-
is_access_track_spte(spte))
487+
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
488+
return true;
489+
490+
if (is_access_track_spte(spte))
489491
return true;
490492

491493
if (spte_ad_enabled(spte)) {
492-
if ((spte & shadow_accessed_mask) == 0 ||
493-
(is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
494+
if (!(spte & shadow_accessed_mask) ||
495+
(is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
494496
return true;
495497
}
496498

@@ -557,7 +559,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
557559
* we always atomically update it, see the comments in
558560
* spte_has_volatile_bits().
559561
*/
560-
if (spte_can_locklessly_be_made_writable(old_spte) &&
562+
if (is_mmu_writable_spte(old_spte) &&
561563
!is_writable_pte(new_spte))
562564
flush = true;
563565

@@ -1187,7 +1189,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
11871189
u64 spte = *sptep;
11881190

11891191
if (!is_writable_pte(spte) &&
1190-
!(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1192+
!(pt_protect && is_mmu_writable_spte(spte)))
11911193
return false;
11921194

11931195
rmap_printk("spte %p %llx\n", sptep, *sptep);
@@ -3196,8 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
31963198
* be removed in the fast path only if the SPTE was
31973199
* write-protected for dirty-logging or access tracking.
31983200
*/
3199-
if (fault->write &&
3200-
spte_can_locklessly_be_made_writable(spte)) {
3201+
if (fault->write && is_mmu_writable_spte(spte)) {
32013202
new_spte |= PT_WRITABLE_MASK;
32023203

32033204
/*

arch/x86/kvm/mmu/spte.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ static inline void check_spte_writable_invariants(u64 spte)
390390
"kvm: Writable SPTE is not MMU-writable: %llx", spte);
391391
}
392392

393-
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
393+
static inline bool is_mmu_writable_spte(u64 spte)
394394
{
395395
return spte & shadow_mmu_writable_mask;
396396
}

0 commit comments

Comments
 (0)