Skip to content

Commit b85a1ac

Browse files
Sebastian Andrzej SiewiorSasha Levin
authored andcommitted
net: Provide a PREEMPT_RT specific check for netdev_queue::_xmit_lock
[ Upstream commit b824c3e ] After acquiring netdev_queue::_xmit_lock the number of the CPU owning the lock is recorded in netdev_queue::xmit_lock_owner. This works as long as the BH context is not preemptible. On PREEMPT_RT the softirq context is preemptible and without the softirq-lock it is possible to have multiple user in __dev_queue_xmit() submitting a skb on the same CPU. This is fine in general but this means also that the current CPU is recorded as netdev_queue::xmit_lock_owner. This in turn leads to the recursion alert and the skb is dropped. Instead checking the for CPU number, that owns the lock, PREEMPT_RT can check if the lockowner matches the current task. Add netif_tx_owned() which returns true if the current context owns the lock by comparing the provided CPU number with the recorded number. This resembles the current check by negating the condition (the current check returns true if the lock is not owned). On PREEMPT_RT use rt_mutex_owner() to return the lock owner and compare the current task against it. Use the new helper in __dev_queue_xmit() and netif_local_xmit_active() which provides a similar check. Update comments regarding pairing READ_ONCE(). Reported-by: Bert Karwatzki <spasswolf@web.de> Closes: https://lore.kernel.org/all/20260216134333.412332-1-spasswolf@web.de Fixes: 3253cb4 ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT") Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reported-by: Bert Karwatzki <spasswolf@web.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://patch.msgid.link/20260302162631.uGUyIqDT@linutronix.de Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 8700ed8 commit b85a1ac

3 files changed

Lines changed: 24 additions & 10 deletions

File tree

include/linux/netdevice.h

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4708,7 +4708,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
47084708
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
47094709
{
47104710
spin_lock(&txq->_xmit_lock);
4711-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4711+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47124712
WRITE_ONCE(txq->xmit_lock_owner, cpu);
47134713
}
47144714

@@ -4726,7 +4726,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
47264726
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
47274727
{
47284728
spin_lock_bh(&txq->_xmit_lock);
4729-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4729+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47304730
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
47314731
}
47324732

@@ -4735,22 +4735,22 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
47354735
bool ok = spin_trylock(&txq->_xmit_lock);
47364736

47374737
if (likely(ok)) {
4738-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4738+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47394739
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
47404740
}
47414741
return ok;
47424742
}
47434743

47444744
static inline void __netif_tx_unlock(struct netdev_queue *txq)
47454745
{
4746-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4746+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47474747
WRITE_ONCE(txq->xmit_lock_owner, -1);
47484748
spin_unlock(&txq->_xmit_lock);
47494749
}
47504750

47514751
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
47524752
{
4753-
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
4753+
/* Pairs with READ_ONCE() in netif_tx_owned() */
47544754
WRITE_ONCE(txq->xmit_lock_owner, -1);
47554755
spin_unlock_bh(&txq->_xmit_lock);
47564756
}
@@ -4843,6 +4843,23 @@ static inline void netif_tx_disable(struct net_device *dev)
48434843
local_bh_enable();
48444844
}
48454845

4846+
#ifndef CONFIG_PREEMPT_RT
4847+
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4848+
{
4849+
/* Other cpus might concurrently change txq->xmit_lock_owner
4850+
* to -1 or to their cpu id, but not to our id.
4851+
*/
4852+
return READ_ONCE(txq->xmit_lock_owner) == cpu;
4853+
}
4854+
4855+
#else
4856+
static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4857+
{
4858+
return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
4859+
}
4860+
4861+
#endif
4862+
48464863
static inline void netif_addr_lock(struct net_device *dev)
48474864
{
48484865
unsigned char nest_level = 0;

net/core/dev.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4814,10 +4814,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
48144814
if (dev->flags & IFF_UP) {
48154815
int cpu = smp_processor_id(); /* ok because BHs are off */
48164816

4817-
/* Other cpus might concurrently change txq->xmit_lock_owner
4818-
* to -1 or to their cpu id, but not to our id.
4819-
*/
4820-
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4817+
if (!netif_tx_owned(txq, cpu)) {
48214818
bool is_list = false;
48224819

48234820
if (dev_xmit_recursion())

net/core/netpoll.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ static int netif_local_xmit_active(struct net_device *dev)
132132
for (i = 0; i < dev->num_tx_queues; i++) {
133133
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
134134

135-
if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
135+
if (netif_tx_owned(txq, smp_processor_id()))
136136
return 1;
137137
}
138138

0 commit comments

Comments
 (0)