Skip to content

Commit bf3c76b

Browse files
committed
Merge branch 'net-netpoll-improve-skb-pool-management'
Breno Leitao says: ==================== net: netpoll: Improve SKB pool management The netpoll subsystem pre-allocates 32 SKBs in a pool for emergency use during out-of-memory conditions. However, the current implementation has several inefficiencies: * The SKB pool, once allocated, is never freed: * Resources remain allocated even after netpoll users are removed * Failed initialization can leave pool populated forever * The global pool design makes resource tracking difficult This series addresses these issues through three patches: Patch 1 ("net: netpoll: Individualize the skb pool"): - Replace global pool with per-user pools in netpoll struct Patch 2 ("net: netpoll: flush skb pool during cleanup"): - Properly free pool resources during netconsole cleanup These changes improve resource management and make the code more maintainable. As a side benefit, the improved structure would allow netpoll to be modularized if desired in the future. v2: https://lore.kernel.org/20241107-skb_buffers_v2-v2-0-288c6264ba4f@debian.org v1: https://lore.kernel.org/20241025142025.3558051-1-leitao@debian.org ==================== Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-0-9be9f52a8b69@debian.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents 11ee317 + 6c59f16 commit bf3c76b

2 files changed

Lines changed: 27 additions & 19 deletions

File tree

include/linux/netpoll.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ struct netpoll {
3232
bool ipv6;
3333
u16 local_port, remote_port;
3434
u8 remote_mac[ETH_ALEN];
35+
struct sk_buff_head skb_pool;
3536
};
3637

3738
struct netpoll_info {

net/core/netpoll.c

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,6 @@
4545

4646
#define MAX_UDP_CHUNK 1460
4747
#define MAX_SKBS 32
48-
49-
static struct sk_buff_head skb_pool;
50-
5148
#define USEC_PER_POLL 50
5249

5350
#define MAX_SKB_SIZE \
@@ -234,20 +231,23 @@ void netpoll_poll_enable(struct net_device *dev)
234231
up(&ni->dev_lock);
235232
}
236233

237-
static void refill_skbs(void)
234+
static void refill_skbs(struct netpoll *np)
238235
{
236+
struct sk_buff_head *skb_pool;
239237
struct sk_buff *skb;
240238
unsigned long flags;
241239

242-
spin_lock_irqsave(&skb_pool.lock, flags);
243-
while (skb_pool.qlen < MAX_SKBS) {
240+
skb_pool = &np->skb_pool;
241+
242+
spin_lock_irqsave(&skb_pool->lock, flags);
243+
while (skb_pool->qlen < MAX_SKBS) {
244244
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
245245
if (!skb)
246246
break;
247247

248-
__skb_queue_tail(&skb_pool, skb);
248+
__skb_queue_tail(skb_pool, skb);
249249
}
250-
spin_unlock_irqrestore(&skb_pool.lock, flags);
250+
spin_unlock_irqrestore(&skb_pool->lock, flags);
251251
}
252252

253253
static void zap_completion_queue(void)
@@ -284,12 +284,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
284284
struct sk_buff *skb;
285285

286286
zap_completion_queue();
287-
refill_skbs();
287+
refill_skbs(np);
288288
repeat:
289289

290290
skb = alloc_skb(len, GFP_ATOMIC);
291291
if (!skb)
292-
skb = skb_dequeue(&skb_pool);
292+
skb = skb_dequeue(&np->skb_pool);
293293

294294
if (!skb) {
295295
if (++count < 10) {
@@ -531,6 +531,14 @@ static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
531531
return -1;
532532
}
533533

534+
static void skb_pool_flush(struct netpoll *np)
535+
{
536+
struct sk_buff_head *skb_pool;
537+
538+
skb_pool = &np->skb_pool;
539+
skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
540+
}
541+
534542
int netpoll_parse_options(struct netpoll *np, char *opt)
535543
{
536544
char *cur=opt, *delim;
@@ -673,6 +681,8 @@ int netpoll_setup(struct netpoll *np)
673681
struct in_device *in_dev;
674682
int err;
675683

684+
skb_queue_head_init(&np->skb_pool);
685+
676686
rtnl_lock();
677687
if (np->dev_name[0]) {
678688
struct net *net = current->nsproxy->net_ns;
@@ -773,14 +783,16 @@ int netpoll_setup(struct netpoll *np)
773783
}
774784

775785
/* fill up the skb queue */
776-
refill_skbs();
786+
refill_skbs(np);
777787

778788
err = __netpoll_setup(np, ndev);
779789
if (err)
780-
goto put;
790+
goto flush;
781791
rtnl_unlock();
782792
return 0;
783793

794+
flush:
795+
skb_pool_flush(np);
784796
put:
785797
DEBUG_NET_WARN_ON_ONCE(np->dev);
786798
if (ip_overwritten)
@@ -792,13 +804,6 @@ int netpoll_setup(struct netpoll *np)
792804
}
793805
EXPORT_SYMBOL(netpoll_setup);
794806

795-
static int __init netpoll_init(void)
796-
{
797-
skb_queue_head_init(&skb_pool);
798-
return 0;
799-
}
800-
core_initcall(netpoll_init);
801-
802807
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
803808
{
804809
struct netpoll_info *npinfo =
@@ -835,6 +840,8 @@ void __netpoll_cleanup(struct netpoll *np)
835840
call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
836841
} else
837842
RCU_INIT_POINTER(np->dev->npinfo, NULL);
843+
844+
skb_pool_flush(np);
838845
}
839846
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
840847

0 commit comments

Comments
 (0)