Skip to content

Commit 3253cb4

Browse files
Sebastian Andrzej SiewiorKAGA-KOKO
authored andcommitted
softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT
softirqs are preemptible on PREEMPT_RT. There is synchronisation between individual sections which disable bottom halves. This in turn means that a forced threaded interrupt cannot preempt another forced threaded interrupt. Instead it will PI-boost the other handler and wait for its completion. This is required because code within a softirq section is assumed to be non-preemptible and may expect exclusive access to per-CPU resources such as variables or pinned timers. Code with such expectation has been identified and updated to use local_lock_nested_bh() for locking of the per-CPU resource. This means the softirq lock can be removed. Disable the softirq synchronization, but add a new config switch CONFIG_PREEMPT_RT_NEEDS_BH_LOCK which allows to re-enable the synchronized behavior in case that there are issues, which haven't been detected yet. The softirq_ctrl.cnt accounting remains to let the NOHZ code know if softirqs are currently handled. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent fd4e876 commit 3253cb4

2 files changed

Lines changed: 76 additions & 20 deletions

File tree

kernel/Kconfig.preempt

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,19 @@ config PREEMPT_RT
103103
Select this if you are building a kernel for systems which
104104
require real-time guarantees.
105105

106+
config PREEMPT_RT_NEEDS_BH_LOCK
107+
bool "Enforce softirq synchronisation on PREEMPT_RT"
108+
depends on PREEMPT_RT
109+
help
110+
Enforce synchronisation across the softirqs context. On PREEMPT_RT
111+
the softirq is preemptible. This enforces the same per-CPU BLK
112+
semantic non-PREEMPT_RT builds have. This should not be needed
113+
because per-CPU locks were added to avoid the per-CPU BKL.
114+
115+
This switch provides the old behaviour for testing reasons. Select
116+
this if you suspect an error with preemptible softirq and want test
117+
the old synchronized behaviour.
118+
106119
config PREEMPT_COUNT
107120
bool
108121

kernel/softirq.c

Lines changed: 63 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,11 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
165165
/* First entry of a task into a BH disabled section? */
166166
if (!current->softirq_disable_cnt) {
167167
if (preemptible()) {
168-
local_lock(&softirq_ctrl.lock);
168+
if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK))
169+
local_lock(&softirq_ctrl.lock);
170+
else
171+
migrate_disable();
172+
169173
/* Required to meet the RCU bottomhalf requirements. */
170174
rcu_read_lock();
171175
} else {
@@ -177,41 +181,77 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
177181
* Track the per CPU softirq disabled state. On RT this is per CPU
178182
* state to allow preemption of bottom half disabled sections.
179183
*/
180-
newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
181-
/*
182-
* Reflect the result in the task state to prevent recursion on the
183-
* local lock and to make softirq_count() & al work.
184-
*/
185-
current->softirq_disable_cnt = newcnt;
184+
if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
185+
newcnt = this_cpu_add_return(softirq_ctrl.cnt, cnt);
186+
/*
187+
* Reflect the result in the task state to prevent recursion on the
188+
* local lock and to make softirq_count() & al work.
189+
*/
190+
current->softirq_disable_cnt = newcnt;
186191

187-
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
188-
raw_local_irq_save(flags);
189-
lockdep_softirqs_off(ip);
190-
raw_local_irq_restore(flags);
192+
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
193+
raw_local_irq_save(flags);
194+
lockdep_softirqs_off(ip);
195+
raw_local_irq_restore(flags);
196+
}
197+
} else {
198+
bool sirq_dis = false;
199+
200+
if (!current->softirq_disable_cnt)
201+
sirq_dis = true;
202+
203+
this_cpu_add(softirq_ctrl.cnt, cnt);
204+
current->softirq_disable_cnt += cnt;
205+
WARN_ON_ONCE(current->softirq_disable_cnt < 0);
206+
207+
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && sirq_dis) {
208+
raw_local_irq_save(flags);
209+
lockdep_softirqs_off(ip);
210+
raw_local_irq_restore(flags);
211+
}
191212
}
192213
}
193214
EXPORT_SYMBOL(__local_bh_disable_ip);
194215

195216
static void __local_bh_enable(unsigned int cnt, bool unlock)
196217
{
197218
unsigned long flags;
219+
bool sirq_en = false;
198220
int newcnt;
199221

200-
DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
201-
this_cpu_read(softirq_ctrl.cnt));
222+
if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
223+
DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
224+
this_cpu_read(softirq_ctrl.cnt));
225+
if (softirq_count() == cnt)
226+
sirq_en = true;
227+
} else {
228+
if (current->softirq_disable_cnt == cnt)
229+
sirq_en = true;
230+
}
202231

203-
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
232+
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && sirq_en) {
204233
raw_local_irq_save(flags);
205234
lockdep_softirqs_on(_RET_IP_);
206235
raw_local_irq_restore(flags);
207236
}
208237

209-
newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
210-
current->softirq_disable_cnt = newcnt;
238+
if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK)) {
239+
newcnt = this_cpu_sub_return(softirq_ctrl.cnt, cnt);
240+
current->softirq_disable_cnt = newcnt;
211241

212-
if (!newcnt && unlock) {
213-
rcu_read_unlock();
214-
local_unlock(&softirq_ctrl.lock);
242+
if (!newcnt && unlock) {
243+
rcu_read_unlock();
244+
local_unlock(&softirq_ctrl.lock);
245+
}
246+
} else {
247+
current->softirq_disable_cnt -= cnt;
248+
this_cpu_sub(softirq_ctrl.cnt, cnt);
249+
if (unlock && !current->softirq_disable_cnt) {
250+
migrate_enable();
251+
rcu_read_unlock();
252+
} else {
253+
WARN_ON_ONCE(current->softirq_disable_cnt < 0);
254+
}
215255
}
216256
}
217257

@@ -228,7 +268,10 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
228268
lock_map_release(&bh_lock_map);
229269

230270
local_irq_save(flags);
231-
curcnt = __this_cpu_read(softirq_ctrl.cnt);
271+
if (IS_ENABLED(CONFIG_PREEMPT_RT_NEEDS_BH_LOCK))
272+
curcnt = this_cpu_read(softirq_ctrl.cnt);
273+
else
274+
curcnt = current->softirq_disable_cnt;
232275

233276
/*
234277
* If this is not reenabling soft interrupts, no point in trying to

0 commit comments

Comments
 (0)