Skip to content

Commit 1fe09eb

Browse files
committed
rcu: Inline __call_rcu() into call_rcu()
Because __call_rcu() is invoked only by call_rcu(), this commit inlines the former into the latter. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent 218b957 commit 1fe09eb

1 file changed

Lines changed: 42 additions & 49 deletions

File tree

kernel/rcu/tree.c

Lines changed: 42 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -2995,9 +2995,47 @@ static void check_cb_ovld(struct rcu_data *rdp)
29952995
raw_spin_unlock_rcu_node(rnp);
29962996
}
29972997

2998-
/* Helper function for call_rcu() and friends. */
2999-
static void
3000-
__call_rcu(struct rcu_head *head, rcu_callback_t func)
2998+
/**
2999+
* call_rcu() - Queue an RCU callback for invocation after a grace period.
3000+
* @head: structure to be used for queueing the RCU updates.
3001+
* @func: actual callback function to be invoked after the grace period
3002+
*
3003+
* The callback function will be invoked some time after a full grace
3004+
* period elapses, in other words after all pre-existing RCU read-side
3005+
* critical sections have completed. However, the callback function
3006+
* might well execute concurrently with RCU read-side critical sections
3007+
* that started after call_rcu() was invoked.
3008+
*
3009+
* RCU read-side critical sections are delimited by rcu_read_lock()
3010+
* and rcu_read_unlock(), and may be nested. In addition, but only in
3011+
* v5.0 and later, regions of code across which interrupts, preemption,
3012+
* or softirqs have been disabled also serve as RCU read-side critical
3013+
* sections. This includes hardware interrupt handlers, softirq handlers,
3014+
* and NMI handlers.
3015+
*
3016+
* Note that all CPUs must agree that the grace period extended beyond
3017+
* all pre-existing RCU read-side critical section. On systems with more
3018+
* than one CPU, this means that when "func()" is invoked, each CPU is
3019+
* guaranteed to have executed a full memory barrier since the end of its
3020+
* last RCU read-side critical section whose beginning preceded the call
3021+
* to call_rcu(). It also means that each CPU executing an RCU read-side
3022+
* critical section that continues beyond the start of "func()" must have
3023+
* executed a memory barrier after the call_rcu() but before the beginning
3024+
* of that RCU read-side critical section. Note that these guarantees
3025+
* include CPUs that are offline, idle, or executing in user mode, as
3026+
* well as CPUs that are executing in the kernel.
3027+
*
3028+
* Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3029+
* resulting RCU callback function "func()", then both CPU A and CPU B are
3030+
* guaranteed to execute a full memory barrier during the time interval
3031+
* between the call to call_rcu() and the invocation of "func()" -- even
3032+
* if CPU A and CPU B are the same CPU (but again only if the system has
3033+
* more than one CPU).
3034+
*
3035+
* Implementation of these memory-ordering guarantees is described here:
3036+
* Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3037+
*/
3038+
void call_rcu(struct rcu_head *head, rcu_callback_t func)
30013039
{
30023040
static atomic_t doublefrees;
30033041
unsigned long flags;
@@ -3011,7 +3049,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
30113049
/*
30123050
* Probable double call_rcu(), so leak the callback.
30133051
* Use rcu:rcu_callback trace event to find the previous
3014-
* time callback was passed to __call_rcu().
3052+
* time callback was passed to call_rcu().
30153053
*/
30163054
if (atomic_inc_return(&doublefrees) < 4) {
30173055
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
@@ -3060,51 +3098,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
30603098
local_irq_restore(flags);
30613099
}
30623100
}
3063-
3064-
/**
3065-
* call_rcu() - Queue an RCU callback for invocation after a grace period.
3066-
* @head: structure to be used for queueing the RCU updates.
3067-
* @func: actual callback function to be invoked after the grace period
3068-
*
3069-
* The callback function will be invoked some time after a full grace
3070-
* period elapses, in other words after all pre-existing RCU read-side
3071-
* critical sections have completed. However, the callback function
3072-
* might well execute concurrently with RCU read-side critical sections
3073-
* that started after call_rcu() was invoked.
3074-
*
3075-
* RCU read-side critical sections are delimited by rcu_read_lock()
3076-
* and rcu_read_unlock(), and may be nested. In addition, but only in
3077-
* v5.0 and later, regions of code across which interrupts, preemption,
3078-
* or softirqs have been disabled also serve as RCU read-side critical
3079-
* sections. This includes hardware interrupt handlers, softirq handlers,
3080-
* and NMI handlers.
3081-
*
3082-
* Note that all CPUs must agree that the grace period extended beyond
3083-
* all pre-existing RCU read-side critical section. On systems with more
3084-
* than one CPU, this means that when "func()" is invoked, each CPU is
3085-
* guaranteed to have executed a full memory barrier since the end of its
3086-
* last RCU read-side critical section whose beginning preceded the call
3087-
* to call_rcu(). It also means that each CPU executing an RCU read-side
3088-
* critical section that continues beyond the start of "func()" must have
3089-
* executed a memory barrier after the call_rcu() but before the beginning
3090-
* of that RCU read-side critical section. Note that these guarantees
3091-
* include CPUs that are offline, idle, or executing in user mode, as
3092-
* well as CPUs that are executing in the kernel.
3093-
*
3094-
* Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3095-
* resulting RCU callback function "func()", then both CPU A and CPU B are
3096-
* guaranteed to execute a full memory barrier during the time interval
3097-
* between the call to call_rcu() and the invocation of "func()" -- even
3098-
* if CPU A and CPU B are the same CPU (but again only if the system has
3099-
* more than one CPU).
3100-
*
3101-
* Implementation of these memory-ordering guarantees is described here:
3102-
* Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3103-
*/
3104-
void call_rcu(struct rcu_head *head, rcu_callback_t func)
3105-
{
3106-
__call_rcu(head, func);
3107-
}
31083101
EXPORT_SYMBOL_GPL(call_rcu);
31093102

31103103

0 commit comments

Comments
 (0)