@@ -155,23 +155,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
155155 * If shared is set, this function is operating under the MMU lock in read
156156 * mode.
157157 */
158- #define __for_each_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _shared , _only_valid )\
159- for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
160- _root; \
161- _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
162- if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
163- kvm_mmu_page_as_id(_root) != _as_id) { \
158+ #define __for_each_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _only_valid )\
159+ for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
160+ ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
161+ _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
162+ if (kvm_mmu_page_as_id(_root) != _as_id) { \
164163 } else
165164
166- #define for_each_valid_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _shared ) \
167- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
165+ #define for_each_valid_tdp_mmu_root_yield_safe (_kvm , _root , _as_id ) \
166+ __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
168167
169- #define for_each_tdp_mmu_root_yield_safe (_kvm , _root , _shared ) \
170- for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
171- _root; \
172- _root = tdp_mmu_next_root(_kvm, _root, false)) \
173- if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
174- } else
168+ #define for_each_tdp_mmu_root_yield_safe (_kvm , _root ) \
169+ for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
170+ ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
171+ _root = tdp_mmu_next_root(_kvm, _root, false))
175172
176173/*
177174 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@@ -840,7 +837,8 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
840837{
841838 struct kvm_mmu_page * root ;
842839
843- for_each_tdp_mmu_root_yield_safe (kvm , root , false)
840+ lockdep_assert_held_write (& kvm -> mmu_lock );
841+ for_each_tdp_mmu_root_yield_safe (kvm , root )
844842 flush = tdp_mmu_zap_leafs (kvm , root , start , end , true, flush );
845843
846844 return flush ;
@@ -862,7 +860,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
862860 * is being destroyed or the userspace VMM has exited. In both cases,
863861 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
864862 */
865- for_each_tdp_mmu_root_yield_safe (kvm , root , false)
863+ lockdep_assert_held_write (& kvm -> mmu_lock );
864+ for_each_tdp_mmu_root_yield_safe (kvm , root )
866865 tdp_mmu_zap_root (kvm , root , false);
867866}
868867
@@ -876,7 +875,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
876875
877876 read_lock (& kvm -> mmu_lock );
878877
879- for_each_tdp_mmu_root_yield_safe (kvm , root , true ) {
878+ for_each_tdp_mmu_root_yield_safe (kvm , root ) {
880879 if (!root -> tdp_mmu_scheduled_root_to_zap )
881880 continue ;
882881
@@ -1133,7 +1132,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
11331132{
11341133 struct kvm_mmu_page * root ;
11351134
1136- __for_each_tdp_mmu_root_yield_safe (kvm , root , range -> slot -> as_id , false, false )
1135+ __for_each_tdp_mmu_root_yield_safe (kvm , root , range -> slot -> as_id , false)
11371136 flush = tdp_mmu_zap_leafs (kvm , root , range -> start , range -> end ,
11381137 range -> may_block , flush );
11391138
@@ -1322,7 +1321,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
13221321
13231322 lockdep_assert_held_read (& kvm -> mmu_lock );
13241323
1325- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , true )
1324+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id )
13261325 spte_set |= wrprot_gfn_range (kvm , root , slot -> base_gfn ,
13271326 slot -> base_gfn + slot -> npages , min_level );
13281327
@@ -1354,6 +1353,8 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
13541353{
13551354 struct kvm_mmu_page * sp ;
13561355
1356+ kvm_lockdep_assert_mmu_lock_held (kvm , shared );
1357+
13571358 /*
13581359 * Since we are allocating while under the MMU lock we have to be
13591360 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
@@ -1504,8 +1505,7 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
15041505 int r = 0 ;
15051506
15061507 kvm_lockdep_assert_mmu_lock_held (kvm , shared );
1507-
1508- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , shared ) {
1508+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id ) {
15091509 r = tdp_mmu_split_huge_pages_root (kvm , root , start , end , target_level , shared );
15101510 if (r ) {
15111511 kvm_tdp_mmu_put_root (kvm , root );
@@ -1569,8 +1569,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
15691569 bool spte_set = false;
15701570
15711571 lockdep_assert_held_read (& kvm -> mmu_lock );
1572-
1573- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , true)
1572+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id )
15741573 spte_set |= clear_dirty_gfn_range (kvm , root , slot -> base_gfn ,
15751574 slot -> base_gfn + slot -> npages );
15761575
@@ -1704,8 +1703,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
17041703 struct kvm_mmu_page * root ;
17051704
17061705 lockdep_assert_held_read (& kvm -> mmu_lock );
1707-
1708- for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , true)
1706+ for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id )
17091707 zap_collapsible_spte_range (kvm , root , slot );
17101708}
17111709
0 commit comments