Skip to content

Commit fd4e876

Browse files
Sebastian Andrzej SiewiorKAGA-KOKO
authored andcommitted
softirq: Provide a handshake for canceling tasklets via polling
The tasklet_unlock_spin_wait() via tasklet_disable_in_atomic() is provided for a few legacy tasklet users. The interface is used from atomic context (which is either softirq or disabled preemption) on non-PREEMPT_RT and relies on spinning until the tasklet callback completes. On PREEMPT_RT the context is never atomic but the busy polling logic remains. It is possible that the thread invoking tasklet_unlock_spin_wait() has higher priority than the tasklet. If both run on the same CPU the the tasklet makes no progress and the thread trying to cancel the tasklet will live-lock the system. To avoid the lockup tasklet_unlock_spin_wait() uses local_bh_disable()/ enable() which utilizes the local_lock_t for synchronisation. This lock is a central per-CPU BKL and about to be removed. Solve this by acquire a lock in tasklet_action_common() which is held while the tasklet's callback is invoked. This lock will be acquired from tasklet_unlock_spin_wait() via tasklet_callback_cancel_wait_running(). After the tasklet completed tasklet_callback_sync_wait_running() drops the lock and acquires it again. In order to avoid unlocking the lock even if there is no cancel request, there is a cb_waiters counter which is incremented during a cancel request. Blocking on the lock will PI-boost the tasklet if needed, ensuring progress is made. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 8ad25eb commit fd4e876

1 file changed

Lines changed: 57 additions & 5 deletions

File tree

kernel/softirq.c

Lines changed: 57 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -805,6 +805,58 @@ static bool tasklet_clear_sched(struct tasklet_struct *t)
805805
return false;
806806
}
807807

808+
#ifdef CONFIG_PREEMPT_RT
809+
struct tasklet_sync_callback {
810+
spinlock_t cb_lock;
811+
atomic_t cb_waiters;
812+
};
813+
814+
static DEFINE_PER_CPU(struct tasklet_sync_callback, tasklet_sync_callback) = {
815+
.cb_lock = __SPIN_LOCK_UNLOCKED(tasklet_sync_callback.cb_lock),
816+
.cb_waiters = ATOMIC_INIT(0),
817+
};
818+
819+
static void tasklet_lock_callback(void)
820+
{
821+
spin_lock(this_cpu_ptr(&tasklet_sync_callback.cb_lock));
822+
}
823+
824+
static void tasklet_unlock_callback(void)
825+
{
826+
spin_unlock(this_cpu_ptr(&tasklet_sync_callback.cb_lock));
827+
}
828+
829+
static void tasklet_callback_cancel_wait_running(void)
830+
{
831+
struct tasklet_sync_callback *sync_cb = this_cpu_ptr(&tasklet_sync_callback);
832+
833+
atomic_inc(&sync_cb->cb_waiters);
834+
spin_lock(&sync_cb->cb_lock);
835+
atomic_dec(&sync_cb->cb_waiters);
836+
spin_unlock(&sync_cb->cb_lock);
837+
}
838+
839+
static void tasklet_callback_sync_wait_running(void)
840+
{
841+
struct tasklet_sync_callback *sync_cb = this_cpu_ptr(&tasklet_sync_callback);
842+
843+
if (atomic_read(&sync_cb->cb_waiters)) {
844+
spin_unlock(&sync_cb->cb_lock);
845+
spin_lock(&sync_cb->cb_lock);
846+
}
847+
}
848+
849+
#else /* !CONFIG_PREEMPT_RT: */
850+
851+
static void tasklet_lock_callback(void) { }
852+
static void tasklet_unlock_callback(void) { }
853+
static void tasklet_callback_sync_wait_running(void) { }
854+
855+
#ifdef CONFIG_SMP
856+
static void tasklet_callback_cancel_wait_running(void) { }
857+
#endif
858+
#endif /* !CONFIG_PREEMPT_RT */
859+
808860
static void tasklet_action_common(struct tasklet_head *tl_head,
809861
unsigned int softirq_nr)
810862
{
@@ -816,6 +868,7 @@ static void tasklet_action_common(struct tasklet_head *tl_head,
816868
tl_head->tail = &tl_head->head;
817869
local_irq_enable();
818870

871+
tasklet_lock_callback();
819872
while (list) {
820873
struct tasklet_struct *t = list;
821874

@@ -835,6 +888,7 @@ static void tasklet_action_common(struct tasklet_head *tl_head,
835888
}
836889
}
837890
tasklet_unlock(t);
891+
tasklet_callback_sync_wait_running();
838892
continue;
839893
}
840894
tasklet_unlock(t);
@@ -847,6 +901,7 @@ static void tasklet_action_common(struct tasklet_head *tl_head,
847901
__raise_softirq_irqoff(softirq_nr);
848902
local_irq_enable();
849903
}
904+
tasklet_unlock_callback();
850905
}
851906

852907
static __latent_entropy void tasklet_action(void)
@@ -897,12 +952,9 @@ void tasklet_unlock_spin_wait(struct tasklet_struct *t)
897952
/*
898953
* Prevent a live lock when current preempted soft
899954
* interrupt processing or prevents ksoftirqd from
900-
* running. If the tasklet runs on a different CPU
901-
* then this has no effect other than doing the BH
902-
* disable/enable dance for nothing.
955+
* running.
903956
*/
904-
local_bh_disable();
905-
local_bh_enable();
957+
tasklet_callback_cancel_wait_running();
906958
} else {
907959
cpu_relax();
908960
}

0 commit comments

Comments
 (0)