@@ -961,7 +961,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
961961 return count ;
962962}
963963
964- static void pte_list_desc_remove_entry (struct kvm_rmap_head * rmap_head ,
964+ static void pte_list_desc_remove_entry (struct kvm * kvm ,
965+ struct kvm_rmap_head * rmap_head ,
965966 struct pte_list_desc * desc , int i )
966967{
967968 struct pte_list_desc * head_desc = (struct pte_list_desc * )(rmap_head -> val & ~1ul );
@@ -997,7 +998,8 @@ static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
997998 mmu_free_pte_list_desc (head_desc );
998999}
9991000
1000- static void pte_list_remove (u64 * spte , struct kvm_rmap_head * rmap_head )
1001+ static void pte_list_remove (struct kvm * kvm , u64 * spte ,
1002+ struct kvm_rmap_head * rmap_head )
10011003{
10021004 struct pte_list_desc * desc ;
10031005 int i ;
@@ -1016,7 +1018,8 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
10161018 while (desc ) {
10171019 for (i = 0 ; i < desc -> spte_count ; ++ i ) {
10181020 if (desc -> sptes [i ] == spte ) {
1019- pte_list_desc_remove_entry (rmap_head , desc , i );
1021+ pte_list_desc_remove_entry (kvm , rmap_head ,
1022+ desc , i );
10201023 return ;
10211024 }
10221025 }
@@ -1031,7 +1034,7 @@ static void kvm_zap_one_rmap_spte(struct kvm *kvm,
10311034 struct kvm_rmap_head * rmap_head , u64 * sptep )
10321035{
10331036 mmu_spte_clear_track_bits (kvm , sptep );
1034- pte_list_remove (sptep , rmap_head );
1037+ pte_list_remove (kvm , sptep , rmap_head );
10351038}
10361039
10371040/* Return true if at least one SPTE was zapped, false otherwise */
@@ -1106,7 +1109,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
11061109 slot = __gfn_to_memslot (slots , gfn );
11071110 rmap_head = gfn_to_rmap (gfn , sp -> role .level , slot );
11081111
1109- pte_list_remove (spte , rmap_head );
1112+ pte_list_remove (kvm , spte , rmap_head );
11101113}
11111114
11121115/*
@@ -1753,16 +1756,16 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
17531756 pte_list_add (cache , parent_pte , & sp -> parent_ptes );
17541757}
17551758
1756- static void mmu_page_remove_parent_pte (struct kvm_mmu_page * sp ,
1759+ static void mmu_page_remove_parent_pte (struct kvm * kvm , struct kvm_mmu_page * sp ,
17571760 u64 * parent_pte )
17581761{
1759- pte_list_remove (parent_pte , & sp -> parent_ptes );
1762+ pte_list_remove (kvm , parent_pte , & sp -> parent_ptes );
17601763}
17611764
1762- static void drop_parent_pte (struct kvm_mmu_page * sp ,
1765+ static void drop_parent_pte (struct kvm * kvm , struct kvm_mmu_page * sp ,
17631766 u64 * parent_pte )
17641767{
1765- mmu_page_remove_parent_pte (sp , parent_pte );
1768+ mmu_page_remove_parent_pte (kvm , sp , parent_pte );
17661769 mmu_spte_clear_no_track (parent_pte );
17671770}
17681771
@@ -2477,7 +2480,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
24772480 if (child -> role .access == direct_access )
24782481 return ;
24792482
2480- drop_parent_pte (child , sptep );
2483+ drop_parent_pte (vcpu -> kvm , child , sptep );
24812484 kvm_flush_remote_tlbs_sptep (vcpu -> kvm , sptep );
24822485 }
24832486}
@@ -2495,7 +2498,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
24952498 drop_spte (kvm , spte );
24962499 } else {
24972500 child = spte_to_child_sp (pte );
2498- drop_parent_pte (child , spte );
2501+ drop_parent_pte (kvm , child , spte );
24992502
25002503 /*
25012504 * Recursively zap nested TDP SPs, parentless SPs are
@@ -2526,13 +2529,13 @@ static int kvm_mmu_page_unlink_children(struct kvm *kvm,
25262529 return zapped ;
25272530}
25282531
2529- static void kvm_mmu_unlink_parents (struct kvm_mmu_page * sp )
2532+ static void kvm_mmu_unlink_parents (struct kvm * kvm , struct kvm_mmu_page * sp )
25302533{
25312534 u64 * sptep ;
25322535 struct rmap_iterator iter ;
25332536
25342537 while ((sptep = rmap_get_first (& sp -> parent_ptes , & iter )))
2535- drop_parent_pte (sp , sptep );
2538+ drop_parent_pte (kvm , sp , sptep );
25362539}
25372540
25382541static int mmu_zap_unsync_children (struct kvm * kvm ,
@@ -2571,7 +2574,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
25712574 ++ kvm -> stat .mmu_shadow_zapped ;
25722575 * nr_zapped = mmu_zap_unsync_children (kvm , sp , invalid_list );
25732576 * nr_zapped += kvm_mmu_page_unlink_children (kvm , sp , invalid_list );
2574- kvm_mmu_unlink_parents (sp );
2577+ kvm_mmu_unlink_parents (kvm , sp );
25752578
25762579 /* Zapping children means active_mmu_pages has become unstable. */
25772580 list_unstable = * nr_zapped ;
@@ -2929,7 +2932,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
29292932 u64 pte = * sptep ;
29302933
29312934 child = spte_to_child_sp (pte );
2932- drop_parent_pte (child , sptep );
2935+ drop_parent_pte (vcpu -> kvm , child , sptep );
29332936 flush = true;
29342937 } else if (pfn != spte_to_pfn (* sptep )) {
29352938 drop_spte (vcpu -> kvm , sptep );
0 commit comments