Skip to content

Commit cd42853

Browse files
Lai Jiangshansean-jc
authored andcommitted
kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_addr()
The @root_hpa for kvm_mmu_invalidate_addr() is called with @mmu->root.hpa or INVALID_PAGE where @mmu->root.hpa is to invalidate gva for the current root (the same meaning as KVM_MMU_ROOT_CURRENT) and INVALID_PAGE is to invalidate gva for all roots (the same meaning as KVM_MMU_ROOTS_ALL). Change the argument type of kvm_mmu_invalidate_addr() and use KVM_MMU_ROOT_XXX instead so that we can reuse the function for kvm_mmu_invpcid_gva() and nested_ept_invalidate_addr() for invalidating gva for different set of roots. No fuctionalities changed. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: https://lore.kernel.org/r/20230216154115.710033-9-jiangshanlai@gmail.com [sean: massage comment slightly] Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent f94db0c commit cd42853

3 files changed

Lines changed: 21 additions & 21 deletions

File tree

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2045,7 +2045,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
20452045
void *insn, int insn_len);
20462046
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
20472047
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
2048-
u64 addr, hpa_t root_hpa);
2048+
u64 addr, unsigned long roots);
20492049
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
20502050
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
20512051

arch/x86/kvm/mmu/mmu.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5765,10 +5765,12 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
57655765
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
57665766

57675767
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5768-
u64 addr, hpa_t root_hpa)
5768+
u64 addr, unsigned long roots)
57695769
{
57705770
int i;
57715771

5772+
WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
5773+
57725774
/* It's actually a GPA for vcpu->arch.guest_mmu. */
57735775
if (mmu != &vcpu->arch.guest_mmu) {
57745776
/* INVLPG on a non-canonical address is a NOP according to the SDM. */
@@ -5781,31 +5783,29 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
57815783
if (!mmu->invlpg)
57825784
return;
57835785

5784-
if (root_hpa == INVALID_PAGE) {
5786+
if (roots & KVM_MMU_ROOT_CURRENT)
57855787
mmu->invlpg(vcpu, addr, mmu->root.hpa);
57865788

5787-
/*
5788-
* INVLPG is required to invalidate any global mappings for the VA,
5789-
* irrespective of PCID. Since it would take us roughly similar amount
5790-
* of work to determine whether any of the prev_root mappings of the VA
5791-
* is marked global, or to just sync it blindly, so we might as well
5792-
* just always sync it.
5793-
*
5794-
* Mappings not reachable via the current cr3 or the prev_roots will be
5795-
* synced when switching to that cr3, so nothing needs to be done here
5796-
* for them.
5797-
*/
5798-
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5799-
if (VALID_PAGE(mmu->prev_roots[i].hpa))
5800-
mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa);
5801-
} else {
5802-
mmu->invlpg(vcpu, addr, root_hpa);
5789+
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5790+
if ((roots & KVM_MMU_ROOT_PREVIOUS(i)) &&
5791+
VALID_PAGE(mmu->prev_roots[i].hpa))
5792+
mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa);
58035793
}
58045794
}
58055795

58065796
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
58075797
{
5808-
kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
5798+
/*
5799+
* INVLPG is required to invalidate any global mappings for the VA,
5800+
* irrespective of PCID. Blindly sync all roots as it would take
5801+
* roughly the same amount of work/time to determine whether any of the
5802+
* previous roots have a global mapping.
5803+
*
5804+
* Mappings not reachable via the current or previous cached roots will
5805+
* be synced when switching to that new cr3, so nothing needs to be
5806+
* done here for them.
5807+
*/
5808+
kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
58095809
++vcpu->stat.invlpg;
58105810
}
58115811
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

arch/x86/kvm/x86.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -803,7 +803,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
803803
if ((fault->error_code & PFERR_PRESENT_MASK) &&
804804
!(fault->error_code & PFERR_RSVD_MASK))
805805
kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
806-
fault_mmu->root.hpa);
806+
KVM_MMU_ROOT_CURRENT);
807807

808808
fault_mmu->inject_page_fault(vcpu, fault);
809809
}

0 commit comments

Comments
 (0)