Skip to content

Commit e22ef8d

Browse files
committed
rcutorture: Make rcu_fwd_cb_nodelay be a counter
Back when only one rcutorture kthread could do forward-progress testing, it was just fine for rcu_fwd_cb_nodelay to be a non-atomic bool. It was set at the start of forward-progress testing and cleared at the end. But now that there are multiple threads, the value can be cleared while one of the threads is still doing forward-progress testing. This commit therefore makes rcu_fwd_cb_nodelay be an atomic counter, replacing the WRITE_ONCE() operations with atomic_inc() and atomic_dec(). Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent 05b7246 commit e22ef8d

1 file changed

Lines changed: 7 additions & 7 deletions

File tree

kernel/rcu/rcutorture.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
284284
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
285285
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
286286

287-
static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
287+
static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
288288

289289
/*
290290
* Allocate an element from the rcu_tortures pool.
@@ -387,7 +387,7 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
387387
* period, and we want a long delay occasionally to trigger
388388
* force_quiescent_state. */
389389

390-
if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
390+
if (!atomic_read(&rcu_fwd_cb_nodelay) &&
391391
!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
392392
started = cur_ops->get_gp_seq();
393393
ts = rcu_trace_clock_local();
@@ -1276,7 +1276,7 @@ rcu_torture_writer(void *arg)
12761276
boot_ended = rcu_inkernel_boot_has_ended();
12771277
stutter_waited = stutter_wait("rcu_torture_writer");
12781278
if (stutter_waited &&
1279-
!READ_ONCE(rcu_fwd_cb_nodelay) &&
1279+
!atomic_read(&rcu_fwd_cb_nodelay) &&
12801280
!cur_ops->slow_gps &&
12811281
!torture_must_stop() &&
12821282
boot_ended)
@@ -2290,7 +2290,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
22902290
}
22912291

22922292
/* Tight loop containing cond_resched(). */
2293-
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2293+
atomic_inc(&rcu_fwd_cb_nodelay);
22942294
cur_ops->sync(); /* Later readers see above write. */
22952295
if (selfpropcb) {
22962296
WRITE_ONCE(fcs.stop, 0);
@@ -2335,7 +2335,7 @@ static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
23352335
destroy_rcu_head_on_stack(&fcs.rh);
23362336
}
23372337
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2338-
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2338+
atomic_dec(&rcu_fwd_cb_nodelay);
23392339
}
23402340

23412341
/* Carry out call_rcu() forward-progress testing. */
@@ -2362,7 +2362,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
23622362
return; /* Can't do call_rcu() fwd prog without ->call. */
23632363

23642364
/* Loop continuously posting RCU callbacks. */
2365-
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2365+
atomic_inc(&rcu_fwd_cb_nodelay);
23662366
cur_ops->sync(); /* Later readers see above write. */
23672367
WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
23682368
stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
@@ -2435,7 +2435,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
24352435
}
24362436
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
24372437
tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2438-
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2438+
atomic_dec(&rcu_fwd_cb_nodelay);
24392439
}
24402440

24412441

0 commit comments

Comments
 (0)