Skip to content

Commit 1075d41

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86/mmu: Expand and clean up page fault stats
Expand and clean up the page fault stats. The current stats are at best incomplete, and at worst misleading. Differentiate between faults that are actually fixed vs those that result in an MMIO SPTE being created, track faults that are spurious, faults that trigger emulation, faults that that are fixed in the fast path, and last but not least, track the number of faults that are taken. Note, the number of faults that require emulation for write-protected shadow pages can roughly be calculated by subtracting the number of MMIO SPTEs created from the overall number of faults that trigger emulation. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220423034752.1161007-10-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 8d5265b commit 1075d41

6 files changed

Lines changed: 42 additions & 12 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1269,7 +1269,12 @@ struct kvm_vm_stat {
12691269

12701270
struct kvm_vcpu_stat {
12711271
struct kvm_vcpu_stat_generic generic;
1272+
u64 pf_taken;
12721273
u64 pf_fixed;
1274+
u64 pf_emulate;
1275+
u64 pf_spurious;
1276+
u64 pf_fast;
1277+
u64 pf_mmio_spte_created;
12731278
u64 pf_guest;
12741279
u64 tlb_flush;
12751280
u64 invlpg;

arch/x86/kvm/mmu/mmu.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2660,6 +2660,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
26602660
*sptep, write_fault, gfn);
26612661

26622662
if (unlikely(is_noslot_pfn(pfn))) {
2663+
vcpu->stat.pf_mmio_spte_created++;
26632664
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
26642665
return RET_PF_EMULATE;
26652666
}
@@ -2980,7 +2981,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
29802981
return ret;
29812982

29822983
direct_pte_prefetch(vcpu, it.sptep);
2983-
++vcpu->stat.pf_fixed;
29842984
return ret;
29852985
}
29862986

@@ -3249,6 +3249,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
32493249
trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
32503250
walk_shadow_page_lockless_end(vcpu);
32513251

3252+
if (ret != RET_PF_INVALID)
3253+
vcpu->stat.pf_fast++;
3254+
32523255
return ret;
32533256
}
32543257

@@ -5354,7 +5357,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
53545357
write_unlock(&vcpu->kvm->mmu_lock);
53555358
}
53565359

5357-
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5360+
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
53585361
void *insn, int insn_len)
53595362
{
53605363
int r, emulation_type = EMULTYPE_PF;

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,11 +248,35 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
248248
.req_level = PG_LEVEL_4K,
249249
.goal_level = PG_LEVEL_4K,
250250
};
251+
int r;
252+
253+
/*
254+
* Async #PF "faults", a.k.a. prefetch faults, are not faults from the
255+
* guest perspective and have already been counted at the time of the
256+
* original fault.
257+
*/
258+
if (!prefetch)
259+
vcpu->stat.pf_taken++;
251260

252261
if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp)
253-
return kvm_tdp_page_fault(vcpu, &fault);
262+
r = kvm_tdp_page_fault(vcpu, &fault);
263+
else
264+
r = vcpu->arch.mmu->page_fault(vcpu, &fault);
254265

255-
return vcpu->arch.mmu->page_fault(vcpu, &fault);
266+
/*
267+
* Similar to above, prefetch faults aren't truly spurious, and the
268+
* async #PF path doesn't do emulation. Do count faults that are fixed
269+
* by the async #PF handler though, otherwise they'll never be counted.
270+
*/
271+
if (r == RET_PF_FIXED)
272+
vcpu->stat.pf_fixed++;
273+
else if (prefetch)
274+
;
275+
else if (r == RET_PF_EMULATE)
276+
vcpu->stat.pf_emulate++;
277+
else if (r == RET_PF_SPURIOUS)
278+
vcpu->stat.pf_spurious++;
279+
return r;
256280
}
257281

258282
int kvm_mmu_max_mapping_level(struct kvm *kvm,

arch/x86/kvm/mmu/paging_tmpl.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -723,7 +723,6 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
723723
return ret;
724724

725725
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
726-
++vcpu->stat.pf_fixed;
727726
return ret;
728727

729728
out_gpte_changed:

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1100,6 +1100,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
11001100

11011101
/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
11021102
if (unlikely(is_mmio_spte(new_spte))) {
1103+
vcpu->stat.pf_mmio_spte_created++;
11031104
trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
11041105
new_spte);
11051106
ret = RET_PF_EMULATE;
@@ -1108,13 +1109,6 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
11081109
rcu_dereference(iter->sptep));
11091110
}
11101111

1111-
/*
1112-
* Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
1113-
* consistent with legacy MMU behavior.
1114-
*/
1115-
if (ret != RET_PF_SPURIOUS)
1116-
vcpu->stat.pf_fixed++;
1117-
11181112
return ret;
11191113
}
11201114

arch/x86/kvm/x86.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,12 @@ const struct kvm_stats_header kvm_vm_stats_header = {
266266

267267
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
268268
KVM_GENERIC_VCPU_STATS(),
269+
STATS_DESC_COUNTER(VCPU, pf_taken),
269270
STATS_DESC_COUNTER(VCPU, pf_fixed),
271+
STATS_DESC_COUNTER(VCPU, pf_emulate),
272+
STATS_DESC_COUNTER(VCPU, pf_spurious),
273+
STATS_DESC_COUNTER(VCPU, pf_fast),
274+
STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
270275
STATS_DESC_COUNTER(VCPU, pf_guest),
271276
STATS_DESC_COUNTER(VCPU, tlb_flush),
272277
STATS_DESC_COUNTER(VCPU, invlpg),

0 commit comments

Comments
 (0)