Skip to content

Commit 20f7973

Browse files
WeiFang-NXPPaolo Abeni
authored andcommitted
net: fec: recycle pages for transmitted XDP frames
Once the XDP frames have been successfully transmitted through the ndo_xdp_xmit() interface, it's the driver responsibility to free the frames so that the page_pool can recycle the pages and reuse them. However, this action is not implemented in the fec driver. This leads to a user-visible problem that the console will print the following warning log. [ 157.568851] page_pool_release_retry() stalled pool shutdown 1389 inflight 60 sec [ 217.983446] page_pool_release_retry() stalled pool shutdown 1389 inflight 120 sec [ 278.399006] page_pool_release_retry() stalled pool shutdown 1389 inflight 181 sec [ 338.812885] page_pool_release_retry() stalled pool shutdown 1389 inflight 241 sec [ 399.226946] page_pool_release_retry() stalled pool shutdown 1389 inflight 302 sec Therefore, to solve this issue, we free XDP frames via xdp_return_frame() while cleaning the tx BD ring. Fixes: 6d6b39f ("net: fec: add initial XDP support") Signed-off-by: Wei Fang <wei.fang@nxp.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
1 parent be7ecbe commit 20f7973

2 files changed

Lines changed: 115 additions & 48 deletions

File tree

drivers/net/ethernet/freescale/fec.h

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -544,10 +544,23 @@ enum {
544544
XDP_STATS_TOTAL,
545545
};
546546

547+
enum fec_txbuf_type {
548+
FEC_TXBUF_T_SKB,
549+
FEC_TXBUF_T_XDP_NDO,
550+
};
551+
552+
struct fec_tx_buffer {
553+
union {
554+
struct sk_buff *skb;
555+
struct xdp_frame *xdp;
556+
};
557+
enum fec_txbuf_type type;
558+
};
559+
547560
struct fec_enet_priv_tx_q {
548561
struct bufdesc_prop bd;
549562
unsigned char *tx_bounce[TX_RING_SIZE];
550-
struct sk_buff *tx_skbuff[TX_RING_SIZE];
563+
struct fec_tx_buffer tx_buf[TX_RING_SIZE];
551564

552565
unsigned short tx_stop_threshold;
553566
unsigned short tx_wake_threshold;

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 101 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)
397397
fec16_to_cpu(bdp->cbd_sc),
398398
fec32_to_cpu(bdp->cbd_bufaddr),
399399
fec16_to_cpu(bdp->cbd_datlen),
400-
txq->tx_skbuff[index]);
400+
txq->tx_buf[index].skb);
401401
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
402402
index++;
403403
} while (bdp != txq->bd.base);
@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
654654

655655
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
656656
/* Save skb pointer */
657-
txq->tx_skbuff[index] = skb;
657+
txq->tx_buf[index].skb = skb;
658658

659659
/* Make sure the updates to rest of the descriptor are performed before
660660
* transferring ownership.
@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
672672

673673
skb_tx_timestamp(skb);
674674

675-
/* Make sure the update to bdp and tx_skbuff are performed before
676-
* txq->bd.cur.
677-
*/
675+
/* Make sure the update to bdp is performed before txq->bd.cur. */
678676
wmb();
679677
txq->bd.cur = bdp;
680678

@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
862860
}
863861

864862
/* Save skb pointer */
865-
txq->tx_skbuff[index] = skb;
863+
txq->tx_buf[index].skb = skb;
866864

867865
skb_tx_timestamp(skb);
868866
txq->bd.cur = bdp;
@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)
952950
for (i = 0; i < txq->bd.ring_size; i++) {
953951
/* Initialize the BD for every fragment in the page. */
954952
bdp->cbd_sc = cpu_to_fec16(0);
955-
if (bdp->cbd_bufaddr &&
956-
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
957-
dma_unmap_single(&fep->pdev->dev,
958-
fec32_to_cpu(bdp->cbd_bufaddr),
959-
fec16_to_cpu(bdp->cbd_datlen),
960-
DMA_TO_DEVICE);
961-
if (txq->tx_skbuff[i]) {
962-
dev_kfree_skb_any(txq->tx_skbuff[i]);
963-
txq->tx_skbuff[i] = NULL;
953+
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
954+
if (bdp->cbd_bufaddr &&
955+
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
956+
dma_unmap_single(&fep->pdev->dev,
957+
fec32_to_cpu(bdp->cbd_bufaddr),
958+
fec16_to_cpu(bdp->cbd_datlen),
959+
DMA_TO_DEVICE);
960+
if (txq->tx_buf[i].skb) {
961+
dev_kfree_skb_any(txq->tx_buf[i].skb);
962+
txq->tx_buf[i].skb = NULL;
963+
}
964+
} else {
965+
if (bdp->cbd_bufaddr)
966+
dma_unmap_single(&fep->pdev->dev,
967+
fec32_to_cpu(bdp->cbd_bufaddr),
968+
fec16_to_cpu(bdp->cbd_datlen),
969+
DMA_TO_DEVICE);
970+
971+
if (txq->tx_buf[i].xdp) {
972+
xdp_return_frame(txq->tx_buf[i].xdp);
973+
txq->tx_buf[i].xdp = NULL;
974+
}
975+
976+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
977+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
964978
}
979+
965980
bdp->cbd_bufaddr = cpu_to_fec32(0);
966981
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
967982
}
@@ -1360,6 +1375,7 @@ static void
13601375
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
13611376
{
13621377
struct fec_enet_private *fep;
1378+
struct xdp_frame *xdpf;
13631379
struct bufdesc *bdp;
13641380
unsigned short status;
13651381
struct sk_buff *skb;
@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
13871403

13881404
index = fec_enet_get_bd_index(bdp, &txq->bd);
13891405

1390-
skb = txq->tx_skbuff[index];
1391-
txq->tx_skbuff[index] = NULL;
1392-
if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1393-
dma_unmap_single(&fep->pdev->dev,
1394-
fec32_to_cpu(bdp->cbd_bufaddr),
1395-
fec16_to_cpu(bdp->cbd_datlen),
1396-
DMA_TO_DEVICE);
1397-
bdp->cbd_bufaddr = cpu_to_fec32(0);
1398-
if (!skb)
1399-
goto skb_done;
1406+
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1407+
skb = txq->tx_buf[index].skb;
1408+
txq->tx_buf[index].skb = NULL;
1409+
if (bdp->cbd_bufaddr &&
1410+
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1411+
dma_unmap_single(&fep->pdev->dev,
1412+
fec32_to_cpu(bdp->cbd_bufaddr),
1413+
fec16_to_cpu(bdp->cbd_datlen),
1414+
DMA_TO_DEVICE);
1415+
bdp->cbd_bufaddr = cpu_to_fec32(0);
1416+
if (!skb)
1417+
goto tx_buf_done;
1418+
} else {
1419+
xdpf = txq->tx_buf[index].xdp;
1420+
if (bdp->cbd_bufaddr)
1421+
dma_unmap_single(&fep->pdev->dev,
1422+
fec32_to_cpu(bdp->cbd_bufaddr),
1423+
fec16_to_cpu(bdp->cbd_datlen),
1424+
DMA_TO_DEVICE);
1425+
bdp->cbd_bufaddr = cpu_to_fec32(0);
1426+
if (!xdpf) {
1427+
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1428+
goto tx_buf_done;
1429+
}
1430+
}
14001431

14011432
/* Check for errors. */
14021433
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1415,21 +1446,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
14151446
ndev->stats.tx_carrier_errors++;
14161447
} else {
14171448
ndev->stats.tx_packets++;
1418-
ndev->stats.tx_bytes += skb->len;
1419-
}
14201449

1421-
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1422-
* are to time stamp the packet, so we still need to check time
1423-
* stamping enabled flag.
1424-
*/
1425-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1426-
fep->hwts_tx_en) &&
1427-
fep->bufdesc_ex) {
1428-
struct skb_shared_hwtstamps shhwtstamps;
1429-
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1430-
1431-
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1432-
skb_tstamp_tx(skb, &shhwtstamps);
1450+
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1451+
ndev->stats.tx_bytes += skb->len;
1452+
else
1453+
ndev->stats.tx_bytes += xdpf->len;
14331454
}
14341455

14351456
/* Deferred means some collisions occurred during transmit,
@@ -1438,10 +1459,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
14381459
if (status & BD_ENET_TX_DEF)
14391460
ndev->stats.collisions++;
14401461

1441-
/* Free the sk buffer associated with this last transmit */
1442-
dev_kfree_skb_any(skb);
1443-
skb_done:
1444-
/* Make sure the update to bdp and tx_skbuff are performed
1462+
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1463+
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1464+
* are to time stamp the packet, so we still need to check time
1465+
* stamping enabled flag.
1466+
*/
1467+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1468+
fep->hwts_tx_en) && fep->bufdesc_ex) {
1469+
struct skb_shared_hwtstamps shhwtstamps;
1470+
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1471+
1472+
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1473+
skb_tstamp_tx(skb, &shhwtstamps);
1474+
}
1475+
1476+
/* Free the sk buffer associated with this last transmit */
1477+
dev_kfree_skb_any(skb);
1478+
} else {
1479+
xdp_return_frame(xdpf);
1480+
1481+
txq->tx_buf[index].xdp = NULL;
1482+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1483+
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1484+
}
1485+
1486+
tx_buf_done:
1487+
/* Make sure the update to bdp and tx_buf are performed
14451488
* before dirty_tx
14461489
*/
14471490
wmb();
@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
32493292
for (i = 0; i < txq->bd.ring_size; i++) {
32503293
kfree(txq->tx_bounce[i]);
32513294
txq->tx_bounce[i] = NULL;
3252-
skb = txq->tx_skbuff[i];
3253-
txq->tx_skbuff[i] = NULL;
3254-
dev_kfree_skb(skb);
3295+
3296+
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3297+
skb = txq->tx_buf[i].skb;
3298+
txq->tx_buf[i].skb = NULL;
3299+
dev_kfree_skb(skb);
3300+
} else {
3301+
if (txq->tx_buf[i].xdp) {
3302+
xdp_return_frame(txq->tx_buf[i].xdp);
3303+
txq->tx_buf[i].xdp = NULL;
3304+
}
3305+
3306+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3307+
}
32553308
}
32563309
}
32573310
}
@@ -3817,7 +3870,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
38173870
ebdp->cbd_esc = cpu_to_fec32(estatus);
38183871
}
38193872

3820-
txq->tx_skbuff[index] = NULL;
3873+
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3874+
txq->tx_buf[index].xdp = frame;
38213875

38223876
/* Make sure the updates to rest of the descriptor are performed before
38233877
* transferring ownership.

0 commit comments

Comments
 (0)