@@ -5902,23 +5902,24 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
59025902EXPORT_SYMBOL_GPL (kvm_configure_mmu );
59035903
59045904/* The return value indicates if tlb flush on all vcpus is needed. */
5905- typedef bool (* slot_level_handler ) (struct kvm * kvm ,
5905+ typedef bool (* slot_rmaps_handler ) (struct kvm * kvm ,
59065906 struct kvm_rmap_head * rmap_head ,
59075907 const struct kvm_memory_slot * slot );
59085908
59095909/* The caller should hold mmu-lock before calling this function. */
5910- static __always_inline bool
5911- slot_handle_level_range (struct kvm * kvm , const struct kvm_memory_slot * memslot ,
5912- slot_level_handler fn , int start_level , int end_level ,
5913- gfn_t start_gfn , gfn_t end_gfn , bool flush_on_yield ,
5914- bool flush )
5910+ static __always_inline bool __walk_slot_rmaps (struct kvm * kvm ,
5911+ const struct kvm_memory_slot * slot ,
5912+ slot_rmaps_handler fn ,
5913+ int start_level , int end_level ,
5914+ gfn_t start_gfn , gfn_t end_gfn ,
5915+ bool flush_on_yield , bool flush )
59155916{
59165917 struct slot_rmap_walk_iterator iterator ;
59175918
5918- for_each_slot_rmap_range (memslot , start_level , end_level , start_gfn ,
5919+ for_each_slot_rmap_range (slot , start_level , end_level , start_gfn ,
59195920 end_gfn , & iterator ) {
59205921 if (iterator .rmap )
5921- flush |= fn (kvm , iterator .rmap , memslot );
5922+ flush |= fn (kvm , iterator .rmap , slot );
59225923
59235924 if (need_resched () || rwlock_needbreak (& kvm -> mmu_lock )) {
59245925 if (flush && flush_on_yield ) {
@@ -5933,23 +5934,23 @@ slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
59335934 return flush ;
59345935}
59355936
5936- static __always_inline bool
5937- slot_handle_level (struct kvm * kvm , const struct kvm_memory_slot * memslot ,
5938- slot_level_handler fn , int start_level , int end_level ,
5939- bool flush_on_yield )
5937+ static __always_inline bool walk_slot_rmaps (struct kvm * kvm ,
5938+ const struct kvm_memory_slot * slot ,
5939+ slot_rmaps_handler fn ,
5940+ int start_level , int end_level ,
5941+ bool flush_on_yield )
59405942{
5941- return slot_handle_level_range (kvm , memslot , fn , start_level ,
5942- end_level , memslot -> base_gfn ,
5943- memslot -> base_gfn + memslot -> npages - 1 ,
5944- flush_on_yield , false);
5943+ return __walk_slot_rmaps (kvm , slot , fn , start_level , end_level ,
5944+ slot -> base_gfn , slot -> base_gfn + slot -> npages - 1 ,
5945+ flush_on_yield , false);
59455946}
59465947
5947- static __always_inline bool
5948- slot_handle_level_4k (struct kvm * kvm , const struct kvm_memory_slot * memslot ,
5949- slot_level_handler fn , bool flush_on_yield )
5948+ static __always_inline bool walk_slot_rmaps_4k (struct kvm * kvm ,
5949+ const struct kvm_memory_slot * slot ,
5950+ slot_rmaps_handler fn ,
5951+ bool flush_on_yield )
59505952{
5951- return slot_handle_level (kvm , memslot , fn , PG_LEVEL_4K ,
5952- PG_LEVEL_4K , flush_on_yield );
5953+ return walk_slot_rmaps (kvm , slot , fn , PG_LEVEL_4K , PG_LEVEL_4K , flush_on_yield );
59535954}
59545955
59555956static void free_mmu_pages (struct kvm_mmu * mmu )
@@ -6244,9 +6245,9 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
62446245 if (WARN_ON_ONCE (start >= end ))
62456246 continue ;
62466247
6247- flush = slot_handle_level_range (kvm , memslot , __kvm_zap_rmap ,
6248- PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL ,
6249- start , end - 1 , true, flush );
6248+ flush = __walk_slot_rmaps (kvm , memslot , __kvm_zap_rmap ,
6249+ PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL ,
6250+ start , end - 1 , true, flush );
62506251 }
62516252 }
62526253
@@ -6298,8 +6299,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
62986299{
62996300 if (kvm_memslots_have_rmaps (kvm )) {
63006301 write_lock (& kvm -> mmu_lock );
6301- slot_handle_level (kvm , memslot , slot_rmap_write_protect ,
6302- start_level , KVM_MAX_HUGEPAGE_LEVEL , false);
6302+ walk_slot_rmaps (kvm , memslot , slot_rmap_write_protect ,
6303+ start_level , KVM_MAX_HUGEPAGE_LEVEL , false);
63036304 write_unlock (& kvm -> mmu_lock );
63046305 }
63056306
@@ -6534,10 +6535,9 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
65346535 * all the way to the target level. There's no need to split pages
65356536 * already at the target level.
65366537 */
6537- for (level = KVM_MAX_HUGEPAGE_LEVEL ; level > target_level ; level -- ) {
6538- slot_handle_level_range (kvm , slot , shadow_mmu_try_split_huge_pages ,
6539- level , level , start , end - 1 , true, false);
6540- }
6538+ for (level = KVM_MAX_HUGEPAGE_LEVEL ; level > target_level ; level -- )
6539+ __walk_slot_rmaps (kvm , slot , shadow_mmu_try_split_huge_pages ,
6540+ level , level , start , end - 1 , true, false);
65416541}
65426542
65436543/* Must be called with the mmu_lock held in write-mode. */
@@ -6635,8 +6635,8 @@ static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
66356635 * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
66366636 * pages that are already mapped at the maximum hugepage level.
66376637 */
6638- if (slot_handle_level (kvm , slot , kvm_mmu_zap_collapsible_spte ,
6639- PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL - 1 , true))
6638+ if (walk_slot_rmaps (kvm , slot , kvm_mmu_zap_collapsible_spte ,
6639+ PG_LEVEL_4K , KVM_MAX_HUGEPAGE_LEVEL - 1 , true))
66406640 kvm_arch_flush_remote_tlbs_memslot (kvm , slot );
66416641}
66426642
@@ -6679,7 +6679,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
66796679 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
66806680 * support dirty logging at a 4k granularity.
66816681 */
6682- slot_handle_level_4k (kvm , memslot , __rmap_clear_dirty , false);
6682+ walk_slot_rmaps_4k (kvm , memslot , __rmap_clear_dirty , false);
66836683 write_unlock (& kvm -> mmu_lock );
66846684 }
66856685
0 commit comments