Skip to content

Commit bee8a52

Browse files
committed
rhashtable: Use rcu_dereference_all and rcu_dereference_all_check
Add rcu_dereference_all and rcu_dereference_all_check so that library code such as rhashtable can be used with any RCU variant. As it stands rcu_dereference is used within rashtable, which creates false-positive warnings if the user calls it from another RCU context, such as preempt_disable(). Use the rcu_dereference_all and rcu_dereference_all_check calls in rhashtable to suppress these warnings. Also replace the rcu_dereference_raw calls in the list iterators with rcu_dereference_all to uncover buggy calls. Reported-by: Menglong Dong <dongml2@chinatelecom.cn> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent f75f666 commit bee8a52

2 files changed

Lines changed: 33 additions & 7 deletions

File tree

include/linux/rcupdate.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -713,6 +713,24 @@ do { \
713713
(c) || rcu_read_lock_sched_held(), \
714714
__rcu)
715715

716+
/**
717+
* rcu_dereference_all_check() - rcu_dereference_all with debug checking
718+
* @p: The pointer to read, prior to dereferencing
719+
* @c: The conditions under which the dereference will take place
720+
*
721+
* This is similar to rcu_dereference_check(), but allows protection
722+
* by all forms of vanilla RCU readers, including preemption disabled,
723+
* bh-disabled, and interrupt-disabled regions of code. Note that "vanilla
724+
* RCU" excludes SRCU and the various Tasks RCU flavors. Please note
725+
* that this macro should not be backported to any Linux-kernel version
726+
* preceding v5.0 due to changes in synchronize_rcu() semantics prior
727+
* to that version.
728+
*/
729+
#define rcu_dereference_all_check(p, c) \
730+
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
731+
(c) || rcu_read_lock_any_held(), \
732+
__rcu)
733+
716734
/*
717735
* The tracing infrastructure traces RCU (we want that), but unfortunately
718736
* some of the RCU checks causes tracing to lock up the system.
@@ -767,6 +785,14 @@ do { \
767785
*/
768786
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
769787

788+
/**
789+
* rcu_dereference_all() - fetch RCU-all-protected pointer for dereferencing
790+
* @p: The pointer to read, prior to dereferencing
791+
*
792+
* Makes rcu_dereference_check() do the dirty work.
793+
*/
794+
#define rcu_dereference_all(p) rcu_dereference_all_check(p, 0)
795+
770796
/**
771797
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
772798
* @p: The pointer to hand off

include/linux/rhashtable.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -272,13 +272,13 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(
272272
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
273273

274274
#define rht_dereference_rcu(p, ht) \
275-
rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
275+
rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
276276

277277
#define rht_dereference_bucket(p, tbl, hash) \
278278
rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
279279

280280
#define rht_dereference_bucket_rcu(p, tbl, hash) \
281-
rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
281+
rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
282282

283283
#define rht_entry(tpos, pos, member) \
284284
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
@@ -373,7 +373,7 @@ static inline struct rhash_head *__rht_ptr(
373373
static inline struct rhash_head *rht_ptr_rcu(
374374
struct rhash_lock_head __rcu *const *bkt)
375375
{
376-
return __rht_ptr(rcu_dereference(*bkt), bkt);
376+
return __rht_ptr(rcu_dereference_all(*bkt), bkt);
377377
}
378378

379379
static inline struct rhash_head *rht_ptr(
@@ -497,7 +497,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
497497
for (({barrier(); }), \
498498
pos = head; \
499499
!rht_is_a_nulls(pos); \
500-
pos = rcu_dereference_raw(pos->next))
500+
pos = rcu_dereference_all(pos->next))
501501

502502
/**
503503
* rht_for_each_rcu - iterate over rcu hash chain
@@ -513,7 +513,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
513513
for (({barrier(); }), \
514514
pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
515515
!rht_is_a_nulls(pos); \
516-
pos = rcu_dereference_raw(pos->next))
516+
pos = rcu_dereference_all(pos->next))
517517

518518
/**
519519
* rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
@@ -560,7 +560,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
560560
* list returned by rhltable_lookup.
561561
*/
562562
#define rhl_for_each_rcu(pos, list) \
563-
for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
563+
for (pos = list; pos; pos = rcu_dereference_all(pos->next))
564564

565565
/**
566566
* rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
@@ -574,7 +574,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
574574
*/
575575
#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
576576
for (pos = list; pos && rht_entry(tpos, pos, member); \
577-
pos = rcu_dereference_raw(pos->next))
577+
pos = rcu_dereference_all(pos->next))
578578

579579
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
580580
const void *obj)

0 commit comments

Comments
 (0)