Skip to content

Commit 8e8e47d

Browse files
committed
Merge branch 'add-page_pool-support-for-page-recycling-in-veth-driver'
Lorenzo Bianconi says: ==================== add page_pool support for page recycling in veth driver Introduce page_pool support in veth driver in order to recycle pages in veth_convert_skb_to_xdp_buff routine and avoid reallocating the skb through the page allocator when we run a xdp program on the device and we receive skbs from the stack. ==================== Link: https://lore.kernel.org/r/cover.1682188837.git.lorenzo@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents ffcddca + 4fc4180 commit 8e8e47d

2 files changed

Lines changed: 63 additions & 7 deletions

File tree

drivers/net/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -402,6 +402,8 @@ config TUN_VNET_CROSS_LE
402402

403403
config VETH
404404
tristate "Virtual ethernet pair device"
405+
select PAGE_POOL
406+
select PAGE_POOL_STATS
405407
help
406408
This device is a local ethernet tunnel. Devices are created in pairs.
407409
When one end receives the packet it appears on its pair and vice

drivers/net/veth.c

Lines changed: 61 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/ptr_ring.h>
2727
#include <linux/bpf_trace.h>
2828
#include <linux/net_tstamp.h>
29+
#include <net/page_pool.h>
2930

3031
#define DRV_NAME "veth"
3132
#define DRV_VERSION "1.0"
@@ -65,6 +66,7 @@ struct veth_rq {
6566
bool rx_notify_masked;
6667
struct ptr_ring xdp_ring;
6768
struct xdp_rxq_info xdp_rxq;
69+
struct page_pool *page_pool;
6870
};
6971

7072
struct veth_priv {
@@ -155,6 +157,8 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
155157
for (j = 0; j < VETH_TQ_STATS_LEN; j++)
156158
ethtool_sprintf(&p, "tx_queue_%u_%.18s",
157159
i, veth_tq_stats_desc[j].desc);
160+
161+
page_pool_ethtool_stats_get_strings(p);
158162
break;
159163
}
160164
}
@@ -165,7 +169,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
165169
case ETH_SS_STATS:
166170
return ARRAY_SIZE(ethtool_stats_keys) +
167171
VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
168-
VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
172+
VETH_TQ_STATS_LEN * dev->real_num_tx_queues +
173+
page_pool_ethtool_stats_get_count();
169174
default:
170175
return -EOPNOTSUPP;
171176
}
@@ -176,7 +181,8 @@ static void veth_get_ethtool_stats(struct net_device *dev,
176181
{
177182
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
178183
struct net_device *peer = rtnl_dereference(priv->peer);
179-
int i, j, idx;
184+
struct page_pool_stats pp_stats = {};
185+
int i, j, idx, pp_idx;
180186

181187
data[0] = peer ? peer->ifindex : 0;
182188
idx = 1;
@@ -195,9 +201,10 @@ static void veth_get_ethtool_stats(struct net_device *dev,
195201
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
196202
idx += VETH_RQ_STATS_LEN;
197203
}
204+
pp_idx = idx;
198205

199206
if (!peer)
200-
return;
207+
goto page_pool_stats;
201208

202209
rcv_priv = netdev_priv(peer);
203210
for (i = 0; i < peer->real_num_rx_queues; i++) {
@@ -214,7 +221,16 @@ static void veth_get_ethtool_stats(struct net_device *dev,
214221
data[tx_idx + j] += *(u64 *)(base + offset);
215222
}
216223
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
224+
pp_idx = tx_idx + VETH_TQ_STATS_LEN;
217225
}
226+
227+
page_pool_stats:
228+
for (i = 0; i < dev->real_num_rx_queues; i++) {
229+
if (!priv->rq[i].page_pool)
230+
continue;
231+
page_pool_get_stats(priv->rq[i].page_pool, &pp_stats);
232+
}
233+
page_pool_ethtool_stats_get(&data[pp_idx], &pp_stats);
218234
}
219235

220236
static void veth_get_channels(struct net_device *dev,
@@ -727,25 +743,27 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
727743
goto drop;
728744

729745
/* Allocate skb head */
730-
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
746+
page = page_pool_dev_alloc_pages(rq->page_pool);
731747
if (!page)
732748
goto drop;
733749

734750
nskb = build_skb(page_address(page), PAGE_SIZE);
735751
if (!nskb) {
736-
put_page(page);
752+
page_pool_put_full_page(rq->page_pool, page, true);
737753
goto drop;
738754
}
739755

740756
skb_reserve(nskb, VETH_XDP_HEADROOM);
757+
skb_copy_header(nskb, skb);
758+
skb_mark_for_recycle(nskb);
759+
741760
size = min_t(u32, skb->len, max_head_size);
742761
if (skb_copy_bits(skb, 0, nskb->data, size)) {
743762
consume_skb(nskb);
744763
goto drop;
745764
}
746765
skb_put(nskb, size);
747766

748-
skb_copy_header(nskb, skb);
749767
head_off = skb_headroom(nskb) - skb_headroom(skb);
750768
skb_headers_offset_update(nskb, head_off);
751769

@@ -754,7 +772,7 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
754772
len = skb->len - off;
755773

756774
for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
757-
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
775+
page = page_pool_dev_alloc_pages(rq->page_pool);
758776
if (!page) {
759777
consume_skb(nskb);
760778
goto drop;
@@ -1002,11 +1020,37 @@ static int veth_poll(struct napi_struct *napi, int budget)
10021020
return done;
10031021
}
10041022

1023+
static int veth_create_page_pool(struct veth_rq *rq)
1024+
{
1025+
struct page_pool_params pp_params = {
1026+
.order = 0,
1027+
.pool_size = VETH_RING_SIZE,
1028+
.nid = NUMA_NO_NODE,
1029+
.dev = &rq->dev->dev,
1030+
};
1031+
1032+
rq->page_pool = page_pool_create(&pp_params);
1033+
if (IS_ERR(rq->page_pool)) {
1034+
int err = PTR_ERR(rq->page_pool);
1035+
1036+
rq->page_pool = NULL;
1037+
return err;
1038+
}
1039+
1040+
return 0;
1041+
}
1042+
10051043
static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
10061044
{
10071045
struct veth_priv *priv = netdev_priv(dev);
10081046
int err, i;
10091047

1048+
for (i = start; i < end; i++) {
1049+
err = veth_create_page_pool(&priv->rq[i]);
1050+
if (err)
1051+
goto err_page_pool;
1052+
}
1053+
10101054
for (i = start; i < end; i++) {
10111055
struct veth_rq *rq = &priv->rq[i];
10121056

@@ -1027,6 +1071,11 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
10271071
err_xdp_ring:
10281072
for (i--; i >= start; i--)
10291073
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
1074+
err_page_pool:
1075+
for (i = start; i < end; i++) {
1076+
page_pool_destroy(priv->rq[i].page_pool);
1077+
priv->rq[i].page_pool = NULL;
1078+
}
10301079

10311080
return err;
10321081
}
@@ -1056,6 +1105,11 @@ static void veth_napi_del_range(struct net_device *dev, int start, int end)
10561105
rq->rx_notify_masked = false;
10571106
ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
10581107
}
1108+
1109+
for (i = start; i < end; i++) {
1110+
page_pool_destroy(priv->rq[i].page_pool);
1111+
priv->rq[i].page_pool = NULL;
1112+
}
10591113
}
10601114

10611115
static void veth_napi_del(struct net_device *dev)

0 commit comments

Comments
 (0)