Skip to content

Commit 613b00f

Browse files
committed
rcutorture: Add ability to limit callback-flood intensity
The RCU tasks flavors of RCU now need concurrent callback flooding to test their ability to switch between single-queue mode and per-CPU queue mode, but their lack of heavy-duty forward-progress features rules out the use of rcutorture's current callback-flooding code. This commit therefore provides the ability to limit the intensity of the callback floods using a new ->cbflood_max field in the rcu_operations structure. When this field is zero, there is no limit, otherwise, each callback-flood kthread allocates at most ->cbflood_max callbacks. Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent 82e3100 commit 613b00f

1 file changed

Lines changed: 14 additions & 5 deletions

File tree

kernel/rcu/rcutorture.c

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -348,6 +348,7 @@ struct rcu_torture_ops {
348348
void (*gp_kthread_dbg)(void);
349349
bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
350350
int (*stall_dur)(void);
351+
long cbflood_max;
351352
int irq_capable;
352353
int can_boost;
353354
int extendables;
@@ -841,6 +842,7 @@ static struct rcu_torture_ops tasks_rude_ops = {
841842
.call = call_rcu_tasks_rude,
842843
.cb_barrier = rcu_barrier_tasks_rude,
843844
.gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
845+
.cbflood_max = 50000,
844846
.fqs = NULL,
845847
.stats = NULL,
846848
.irq_capable = 1,
@@ -881,6 +883,7 @@ static struct rcu_torture_ops tasks_tracing_ops = {
881883
.call = call_rcu_tasks_trace,
882884
.cb_barrier = rcu_barrier_tasks_trace,
883885
.gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
886+
.cbflood_max = 50000,
884887
.fqs = NULL,
885888
.stats = NULL,
886889
.irq_capable = 1,
@@ -2387,7 +2390,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
23872390
rfp->rcu_fwd_cb_head = rfcpn;
23882391
n_launders++;
23892392
n_launders_sa++;
2390-
} else {
2393+
} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
23912394
rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
23922395
if (WARN_ON_ONCE(!rfcp)) {
23932396
schedule_timeout_interruptible(1);
@@ -2397,8 +2400,11 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
23972400
n_launders_sa = 0;
23982401
rfcp->rfc_gps = 0;
23992402
rfcp->rfc_rfp = rfp;
2403+
} else {
2404+
rfcp = NULL;
24002405
}
2401-
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2406+
if (rfcp)
2407+
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
24022408
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
24032409
if (tick_nohz_full_enabled()) {
24042410
local_irq_save(flags);
@@ -2506,8 +2512,10 @@ static int rcu_torture_fwd_prog(void *args)
25062512
pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
25072513
if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
25082514
rcu_torture_fwd_prog_cr(rfp);
2509-
if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2510-
(rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id))
2515+
if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2516+
(!IS_ENABLED(CONFIG_TINY_RCU) ||
2517+
(rcu_inkernel_boot_has_ended() &&
2518+
torture_num_online_cpus() > rfp->rcu_fwd_id)))
25112519
rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
25122520

25132521
/* Avoid slow periods, better to test when busy. */
@@ -2539,7 +2547,8 @@ static int __init rcu_torture_fwd_prog_init(void)
25392547
fwd_progress = nr_cpu_ids;
25402548
}
25412549
if ((!cur_ops->sync && !cur_ops->call) ||
2542-
!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2550+
(!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2551+
cur_ops == &rcu_busted_ops) {
25432552
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
25442553
fwd_progress = 0;
25452554
return 0;

0 commit comments

Comments
 (0)