@@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
7373 tdp_mmu_free_sp (sp );
7474}
7575
76- void kvm_tdp_mmu_put_root (struct kvm * kvm , struct kvm_mmu_page * root ,
77- bool shared )
76+ void kvm_tdp_mmu_put_root (struct kvm * kvm , struct kvm_mmu_page * root )
7877{
79- kvm_lockdep_assert_mmu_lock_held (kvm , shared );
78+ /*
79+ * Either read or write is okay, but mmu_lock must be held because
80+ * writers are not required to take tdp_mmu_pages_lock.
81+ */
82+ lockdep_assert_held (& kvm -> mmu_lock );
8083
8184 if (!refcount_dec_and_test (& root -> tdp_mmu_root_count ))
8285 return ;
@@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
106109 */
107110static struct kvm_mmu_page * tdp_mmu_next_root (struct kvm * kvm ,
108111 struct kvm_mmu_page * prev_root ,
109- bool shared , bool only_valid )
112+ bool only_valid )
110113{
111114 struct kvm_mmu_page * next_root ;
112115
116+ /*
117+ * While the roots themselves are RCU-protected, fields such as
118+ * role.invalid are protected by mmu_lock.
119+ */
120+ lockdep_assert_held (& kvm -> mmu_lock );
121+
113122 rcu_read_lock ();
114123
115124 if (prev_root )
@@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
132141 rcu_read_unlock ();
133142
134143 if (prev_root )
135- kvm_tdp_mmu_put_root (kvm , prev_root , shared );
144+ kvm_tdp_mmu_put_root (kvm , prev_root );
136145
137146 return next_root ;
138147}
@@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
144153 * recent root. (Unless keeping a live reference is desirable.)
145154 *
146155 * If shared is set, this function is operating under the MMU lock in read
147- * mode. In the unlikely event that this thread must free a root, the lock
148- * will be temporarily dropped and reacquired in write mode.
156+ * mode.
149157 */
150158#define __for_each_tdp_mmu_root_yield_safe (_kvm , _root , _as_id , _shared , _only_valid )\
151- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
159+ for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
152160 _root; \
153- _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
161+ _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
154162 if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
155163 kvm_mmu_page_as_id(_root) != _as_id) { \
156164 } else
@@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
159167 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
160168
161169#define for_each_tdp_mmu_root_yield_safe (_kvm , _root , _shared ) \
162- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
170+ for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
163171 _root; \
164- _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
172+ _root = tdp_mmu_next_root(_kvm, _root, false)) \
165173 if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
166174 } else
167175
@@ -891,7 +899,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
891899 * the root must be reachable by mmu_notifiers while it's being
892900 * zapped
893901 */
894- kvm_tdp_mmu_put_root (kvm , root , true );
902+ kvm_tdp_mmu_put_root (kvm , root );
895903 }
896904
897905 read_unlock (& kvm -> mmu_lock );
@@ -1500,7 +1508,7 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
15001508 for_each_valid_tdp_mmu_root_yield_safe (kvm , root , slot -> as_id , shared ) {
15011509 r = tdp_mmu_split_huge_pages_root (kvm , root , start , end , target_level , shared );
15021510 if (r ) {
1503- kvm_tdp_mmu_put_root (kvm , root , shared );
1511+ kvm_tdp_mmu_put_root (kvm , root );
15041512 break ;
15051513 }
15061514 }
0 commit comments