Skip to content

Commit 65b4a59

Browse files
paulmckrcuurezki
authored andcommitted
srcu: Make Tiny SRCU explicitly disable preemption
Because Tiny SRCU is used only in kernels built with either CONFIG_PREEMPT_NONE=y or CONFIG_PREEMPT_VOLUNTARY=y, there has not been any need for TINY SRCU to explicitly disable preemption. However, the prospect of lazy preemption changes that, and the lazy-preemption patches do result in rcutorture runs finding both too-short grace periods and grace-period hangs for Tiny SRCU. This commit therefore adds the needed preempt_disable() and preempt_enable() calls to Tiny SRCU. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Ankur Arora <ankur.a.arora@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
1 parent c1ec7c1 commit 65b4a59

2 files changed

Lines changed: 28 additions & 5 deletions

File tree

include/linux/srcutiny.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,10 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
6464
{
6565
int idx;
6666

67+
preempt_disable(); // Needed for PREEMPT_AUTO
6768
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
6869
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
70+
preempt_enable();
6971
return idx;
7072
}
7173

kernel/rcu/srcutiny.c

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -96,9 +96,12 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
9696
*/
9797
void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
9898
{
99-
int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
99+
int newval;
100100

101+
preempt_disable(); // Needed for PREEMPT_AUTO
102+
newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
101103
WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
104+
preempt_enable();
102105
if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task())
103106
swake_up_one(&ssp->srcu_wq);
104107
}
@@ -117,8 +120,11 @@ void srcu_drive_gp(struct work_struct *wp)
117120
struct srcu_struct *ssp;
118121

119122
ssp = container_of(wp, struct srcu_struct, srcu_work);
120-
if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
123+
preempt_disable(); // Needed for PREEMPT_AUTO
124+
if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) {
121125
return; /* Already running or nothing to do. */
126+
preempt_enable();
127+
}
122128

123129
/* Remove recently arrived callbacks and wait for readers. */
124130
WRITE_ONCE(ssp->srcu_gp_running, true);
@@ -130,9 +136,12 @@ void srcu_drive_gp(struct work_struct *wp)
130136
idx = (ssp->srcu_idx & 0x2) / 2;
131137
WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
132138
WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
139+
preempt_enable();
133140
swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
141+
preempt_disable(); // Needed for PREEMPT_AUTO
134142
WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
135143
WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
144+
preempt_enable();
136145

137146
/* Invoke the callbacks we removed above. */
138147
while (lh) {
@@ -150,8 +159,11 @@ void srcu_drive_gp(struct work_struct *wp)
150159
* at interrupt level, but the ->srcu_gp_running checks will
151160
* straighten that out.
152161
*/
162+
preempt_disable(); // Needed for PREEMPT_AUTO
153163
WRITE_ONCE(ssp->srcu_gp_running, false);
154-
if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
164+
idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max));
165+
preempt_enable();
166+
if (idx)
155167
schedule_work(&ssp->srcu_work);
156168
}
157169
EXPORT_SYMBOL_GPL(srcu_drive_gp);
@@ -160,16 +172,20 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
160172
{
161173
unsigned long cookie;
162174

175+
preempt_disable(); // Needed for PREEMPT_AUTO
163176
cookie = get_state_synchronize_srcu(ssp);
164-
if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
177+
if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) {
178+
preempt_enable();
165179
return;
180+
}
166181
WRITE_ONCE(ssp->srcu_idx_max, cookie);
167182
if (!READ_ONCE(ssp->srcu_gp_running)) {
168183
if (likely(srcu_init_done))
169184
schedule_work(&ssp->srcu_work);
170185
else if (list_empty(&ssp->srcu_work.entry))
171186
list_add(&ssp->srcu_work.entry, &srcu_boot_list);
172187
}
188+
preempt_enable();
173189
}
174190

175191
/*
@@ -183,11 +199,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
183199

184200
rhp->func = func;
185201
rhp->next = NULL;
202+
preempt_disable(); // Needed for PREEMPT_AUTO
186203
local_irq_save(flags);
187204
*ssp->srcu_cb_tail = rhp;
188205
ssp->srcu_cb_tail = &rhp->next;
189206
local_irq_restore(flags);
190207
srcu_gp_start_if_needed(ssp);
208+
preempt_enable();
191209
}
192210
EXPORT_SYMBOL_GPL(call_srcu);
193211

@@ -241,9 +259,12 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
241259
*/
242260
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
243261
{
244-
unsigned long ret = get_state_synchronize_srcu(ssp);
262+
unsigned long ret;
245263

264+
preempt_disable(); // Needed for PREEMPT_AUTO
265+
ret = get_state_synchronize_srcu(ssp);
246266
srcu_gp_start_if_needed(ssp);
267+
preempt_enable();
247268
return ret;
248269
}
249270
EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);

0 commit comments

Comments
 (0)