Skip to content

Commit 6120b72

Browse files
Frederic Weisbeckerpaulmckrcu
authored andcommitted
rcu: Remove rcu_data.exp_deferred_qs and convert to rcu_data.cpu no_qs.b.exp
Having two fields for the same purpose with subtle differences on different RCU flavours is confusing, especially when both fields always exist on both RCU flavours. Fortunately, it is now safe for preemptible RCU to rely on the rcu_data structure's ->cpu_no_qs.b.exp field, just like non-preemptible RCU. This commit therefore removes the ad-hoc ->exp_deferred_qs field. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent 6e16b0f commit 6120b72

3 files changed

Lines changed: 8 additions & 10 deletions

File tree

kernel/rcu/tree.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,6 @@ struct rcu_data {
157157
bool core_needs_qs; /* Core waits for quiescent state. */
158158
bool beenonline; /* CPU online at least once. */
159159
bool gpwrap; /* Possible ->gp_seq wrap. */
160-
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
161160
bool cpu_started; /* RCU watching this onlining CPU. */
162161
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
163162
unsigned long grpmask; /* Mask to apply to leaf qsmask. */

kernel/rcu/tree_exp.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,6 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
255255
*/
256256
static void rcu_report_exp_rdp(struct rcu_data *rdp)
257257
{
258-
WRITE_ONCE(rdp->exp_deferred_qs, false);
259258
WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
260259
rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
261260
}
@@ -656,7 +655,7 @@ static void rcu_exp_handler(void *unused)
656655
rcu_dynticks_curr_cpu_in_eqs()) {
657656
rcu_report_exp_rdp(rdp);
658657
} else {
659-
rdp->exp_deferred_qs = true;
658+
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
660659
set_tsk_need_resched(t);
661660
set_preempt_need_resched();
662661
}
@@ -678,7 +677,7 @@ static void rcu_exp_handler(void *unused)
678677
if (depth > 0) {
679678
raw_spin_lock_irqsave_rcu_node(rnp, flags);
680679
if (rnp->expmask & rdp->grpmask) {
681-
rdp->exp_deferred_qs = true;
680+
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
682681
t->rcu_read_unlock_special.b.exp_hint = true;
683682
}
684683
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);

kernel/rcu/tree_plugin.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -260,10 +260,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
260260
* no need to check for a subsequent expedited GP. (Though we are
261261
* still in a quiescent state in any case.)
262262
*/
263-
if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
263+
if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
264264
rcu_report_exp_rdp(rdp);
265265
else
266-
WARN_ON_ONCE(rdp->exp_deferred_qs);
266+
WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
267267
}
268268

269269
/*
@@ -354,7 +354,7 @@ void rcu_note_context_switch(bool preempt)
354354
* means that we continue to block the current grace period.
355355
*/
356356
rcu_qs();
357-
if (rdp->exp_deferred_qs)
357+
if (rdp->cpu_no_qs.b.exp)
358358
rcu_report_exp_rdp(rdp);
359359
rcu_tasks_qs(current, preempt);
360360
trace_rcu_utilization(TPS("End context switch"));
@@ -481,7 +481,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
481481
*/
482482
special = t->rcu_read_unlock_special;
483483
rdp = this_cpu_ptr(&rcu_data);
484-
if (!special.s && !rdp->exp_deferred_qs) {
484+
if (!special.s && !rdp->cpu_no_qs.b.exp) {
485485
local_irq_restore(flags);
486486
return;
487487
}
@@ -501,7 +501,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
501501
* tasks are handled when removing the task from the
502502
* blocked-tasks list below.
503503
*/
504-
if (rdp->exp_deferred_qs)
504+
if (rdp->cpu_no_qs.b.exp)
505505
rcu_report_exp_rdp(rdp);
506506

507507
/* Clean up if blocked during RCU read-side critical section. */
@@ -584,7 +584,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
584584
*/
585585
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
586586
{
587-
return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
587+
return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
588588
READ_ONCE(t->rcu_read_unlock_special.s)) &&
589589
rcu_preempt_depth() == 0;
590590
}

0 commit comments

Comments
 (0)