Skip to content

Commit 218b957

Browse files
dwmw2paulmckrcu
authored andcommitted
rcu: Add mutex for rcu boost kthread spawning and affinity setting
As we handle parallel CPU bringup, we will need to take care to avoid spawning multiple boost threads, or race conditions when setting their affinity. Spotted by Paul McKenney. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent 150154a commit 218b957

3 files changed

Lines changed: 12 additions & 2 deletions

File tree

kernel/rcu/tree.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4570,6 +4570,7 @@ static void __init rcu_init_one(void)
45704570
init_waitqueue_head(&rnp->exp_wq[2]);
45714571
init_waitqueue_head(&rnp->exp_wq[3]);
45724572
spin_lock_init(&rnp->exp_lock);
4573+
mutex_init(&rnp->boost_kthread_mutex);
45734574
}
45744575
}
45754576

kernel/rcu/tree.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,9 @@ struct rcu_node {
110110
/* side effect, not as a lock. */
111111
unsigned long boost_time;
112112
/* When to start boosting (jiffies). */
113+
struct mutex boost_kthread_mutex;
114+
/* Exclusion for thread spawning and affinity */
115+
/* manipulation. */
113116
struct task_struct *boost_kthread_task;
114117
/* kthread that takes care of priority */
115118
/* boosting for this rcu_node structure. */

kernel/rcu/tree_plugin.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1172,22 +1172,26 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
11721172
struct sched_param sp;
11731173
struct task_struct *t;
11741174

1175+
mutex_lock(&rnp->boost_kthread_mutex);
11751176
if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
1176-
return;
1177+
goto out;
11771178

11781179
rcu_state.boost = 1;
11791180

11801181
t = kthread_create(rcu_boost_kthread, (void *)rnp,
11811182
"rcub/%d", rnp_index);
11821183
if (WARN_ON_ONCE(IS_ERR(t)))
1183-
return;
1184+
goto out;
11841185

11851186
raw_spin_lock_irqsave_rcu_node(rnp, flags);
11861187
rnp->boost_kthread_task = t;
11871188
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
11881189
sp.sched_priority = kthread_prio;
11891190
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
11901191
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1192+
1193+
out:
1194+
mutex_unlock(&rnp->boost_kthread_mutex);
11911195
}
11921196

11931197
/*
@@ -1210,6 +1214,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
12101214
return;
12111215
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
12121216
return;
1217+
mutex_lock(&rnp->boost_kthread_mutex);
12131218
for_each_leaf_node_possible_cpu(rnp, cpu)
12141219
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
12151220
cpu != outgoingcpu)
@@ -1218,6 +1223,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
12181223
if (cpumask_weight(cm) == 0)
12191224
cpumask_copy(cm, housekeeping_cpumask(HK_FLAG_RCU));
12201225
set_cpus_allowed_ptr(t, cm);
1226+
mutex_unlock(&rnp->boost_kthread_mutex);
12211227
free_cpumask_var(cm);
12221228
}
12231229

0 commit comments

Comments
 (0)