Skip to content

Commit 8301432

Browse files
tirthendusAlexei Starovoitov
authored andcommitted
i40e: handle multi-buffer packets that are shrunk by xdp prog
XDP programs can shrink packets by calling the bpf_xdp_adjust_tail() helper function. For multi-buffer packets this may lead to reduction of frag count stored in skb_shared_info area of the xdp_buff struct. This results in issues with the current handling of XDP_PASS and XDP_DROP cases. For XDP_PASS, currently skb is being built using frag count of xdp_buffer before it was processed by XDP prog and thus will result in an inconsistent skb when frag count gets reduced by XDP prog. To fix this, get correct frag count while building the skb instead of using pre-obtained frag count. For XDP_DROP, current page recycling logic will not reuse the page but instead will adjust the pagecnt_bias so that the page can be freed. This again results in inconsistent behavior as the page refcnt has already been changed by the helper while freeing the frag(s) as part of shrinking the packet. To fix this, only adjust pagecnt_bias for buffers that are stillpart of the packet post-xdp prog run. Fixes: e213ced ("i40e: add support for XDP multi-buffer Rx") Reported-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Tirthendu Sarkar <tirthendu.sarkar@intel.com> Link: https://lore.kernel.org/r/20240124191602.566724-6-maciej.fijalkowski@intel.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent ad2047c commit 8301432

1 file changed

Lines changed: 23 additions & 17 deletions

File tree

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2087,7 +2087,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
20872087
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
20882088
struct xdp_buff *xdp)
20892089
{
2090-
u32 next = rx_ring->next_to_clean;
2090+
u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
2091+
u32 next = rx_ring->next_to_clean, i = 0;
20912092
struct i40e_rx_buffer *rx_buffer;
20922093

20932094
xdp->flags = 0;
@@ -2100,10 +2101,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
21002101
if (!rx_buffer->page)
21012102
continue;
21022103

2103-
if (xdp_res == I40E_XDP_CONSUMED)
2104-
rx_buffer->pagecnt_bias++;
2105-
else
2104+
if (xdp_res != I40E_XDP_CONSUMED)
21062105
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2106+
else if (i++ <= nr_frags)
2107+
rx_buffer->pagecnt_bias++;
21072108

21082109
/* EOP buffer will be put in i40e_clean_rx_irq() */
21092110
if (next == rx_ring->next_to_process)
@@ -2117,20 +2118,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
21172118
* i40e_construct_skb - Allocate skb and populate it
21182119
* @rx_ring: rx descriptor ring to transact packets on
21192120
* @xdp: xdp_buff pointing to the data
2120-
* @nr_frags: number of buffers for the packet
21212121
*
21222122
* This function allocates an skb. It then populates it with the page
21232123
* data from the current receive descriptor, taking care to set up the
21242124
* skb correctly.
21252125
*/
21262126
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2127-
struct xdp_buff *xdp,
2128-
u32 nr_frags)
2127+
struct xdp_buff *xdp)
21292128
{
21302129
unsigned int size = xdp->data_end - xdp->data;
21312130
struct i40e_rx_buffer *rx_buffer;
2131+
struct skb_shared_info *sinfo;
21322132
unsigned int headlen;
21332133
struct sk_buff *skb;
2134+
u32 nr_frags = 0;
21342135

21352136
/* prefetch first cache line of first page */
21362137
net_prefetch(xdp->data);
@@ -2168,6 +2169,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
21682169
memcpy(__skb_put(skb, headlen), xdp->data,
21692170
ALIGN(headlen, sizeof(long)));
21702171

2172+
if (unlikely(xdp_buff_has_frags(xdp))) {
2173+
sinfo = xdp_get_shared_info_from_buff(xdp);
2174+
nr_frags = sinfo->nr_frags;
2175+
}
21712176
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
21722177
/* update all of the pointers */
21732178
size -= headlen;
@@ -2187,9 +2192,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
21872192
}
21882193

21892194
if (unlikely(xdp_buff_has_frags(xdp))) {
2190-
struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
2195+
struct skb_shared_info *skinfo = skb_shinfo(skb);
21912196

2192-
sinfo = xdp_get_shared_info_from_buff(xdp);
21932197
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
21942198
sizeof(skb_frag_t) * nr_frags);
21952199

@@ -2212,17 +2216,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
22122216
* i40e_build_skb - Build skb around an existing buffer
22132217
* @rx_ring: Rx descriptor ring to transact packets on
22142218
* @xdp: xdp_buff pointing to the data
2215-
* @nr_frags: number of buffers for the packet
22162219
*
22172220
* This function builds an skb around an existing Rx buffer, taking care
22182221
* to set up the skb correctly and avoid any memcpy overhead.
22192222
*/
22202223
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2221-
struct xdp_buff *xdp,
2222-
u32 nr_frags)
2224+
struct xdp_buff *xdp)
22232225
{
22242226
unsigned int metasize = xdp->data - xdp->data_meta;
2227+
struct skb_shared_info *sinfo;
22252228
struct sk_buff *skb;
2229+
u32 nr_frags;
22262230

22272231
/* Prefetch first cache line of first page. If xdp->data_meta
22282232
* is unused, this points exactly as xdp->data, otherwise we
@@ -2231,6 +2235,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
22312235
*/
22322236
net_prefetch(xdp->data_meta);
22332237

2238+
if (unlikely(xdp_buff_has_frags(xdp))) {
2239+
sinfo = xdp_get_shared_info_from_buff(xdp);
2240+
nr_frags = sinfo->nr_frags;
2241+
}
2242+
22342243
/* build an skb around the page buffer */
22352244
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
22362245
if (unlikely(!skb))
@@ -2243,9 +2252,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
22432252
skb_metadata_set(skb, metasize);
22442253

22452254
if (unlikely(xdp_buff_has_frags(xdp))) {
2246-
struct skb_shared_info *sinfo;
2247-
2248-
sinfo = xdp_get_shared_info_from_buff(xdp);
22492255
xdp_update_skb_shared_info(skb, nr_frags,
22502256
sinfo->xdp_frags_size,
22512257
nr_frags * xdp->frame_sz,
@@ -2589,9 +2595,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
25892595
total_rx_bytes += size;
25902596
} else {
25912597
if (ring_uses_build_skb(rx_ring))
2592-
skb = i40e_build_skb(rx_ring, xdp, nfrags);
2598+
skb = i40e_build_skb(rx_ring, xdp);
25932599
else
2594-
skb = i40e_construct_skb(rx_ring, xdp, nfrags);
2600+
skb = i40e_construct_skb(rx_ring, xdp);
25952601

25962602
/* drop if we failed to retrieve a buffer */
25972603
if (!skb) {

0 commit comments

Comments
 (0)