Skip to content

Commit a2ae70c

Browse files
WeiFang-NXPPaolo Abeni
authored andcommitted
net: fec: add fec_alloc_rxq_buffers_pp() to allocate buffers from page pool
Currently, the buffers of RX queue are allocated from the page pool. In the subsequent patches to support XDP zero copy, the RX buffers will be allocated from the UMEM. Therefore, extract fec_alloc_rxq_buffers_pp() from fec_enet_alloc_rxq_buffers() and we will add another helper to allocate RX buffers from UMEM for the XDP zero copy mode. In addition, fec_alloc_rxq_buffers_pp() only initializes bdp->bufaddr and does not initialize other fields of bdp, because these will be initialized in fec_enet_bd_init(). Signed-off-by: Wei Fang <wei.fang@nxp.com> Link: https://patch.msgid.link/20260205085742.2685134-14-wei.fang@nxp.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
1 parent edd3931 commit a2ae70c

1 file changed

Lines changed: 58 additions & 32 deletions

File tree

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 58 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -990,6 +990,13 @@ static void fec_enet_bd_init(struct net_device *dev)
990990
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
991991
else
992992
bdp->cbd_sc = cpu_to_fec16(0);
993+
994+
if (fep->bufdesc_ex) {
995+
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
996+
997+
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
998+
}
999+
9931000
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
9941001
}
9951002

@@ -3436,6 +3443,24 @@ static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
34363443
}
34373444
}
34383445

3446+
static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
3447+
{
3448+
int i;
3449+
3450+
for (i = 0; i < rxq->bd.ring_size; i++) {
3451+
struct page *page = rxq->rx_buf[i];
3452+
3453+
if (!page)
3454+
continue;
3455+
3456+
page_pool_put_full_page(rxq->page_pool, page, false);
3457+
rxq->rx_buf[i] = NULL;
3458+
}
3459+
3460+
page_pool_destroy(rxq->page_pool);
3461+
rxq->page_pool = NULL;
3462+
}
3463+
34393464
static void fec_enet_free_buffers(struct net_device *ndev)
34403465
{
34413466
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -3449,16 +3474,10 @@ static void fec_enet_free_buffers(struct net_device *ndev)
34493474
rxq = fep->rx_queue[q];
34503475

34513476
fec_xdp_rxq_info_unreg(rxq);
3452-
3453-
for (i = 0; i < rxq->bd.ring_size; i++)
3454-
page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
3455-
false);
3477+
fec_free_rxq_buffers(rxq);
34563478

34573479
for (i = 0; i < XDP_STATS_TOTAL; i++)
34583480
rxq->stats[i] = 0;
3459-
3460-
page_pool_destroy(rxq->page_pool);
3461-
rxq->page_pool = NULL;
34623481
}
34633482

34643483
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3557,22 +3576,18 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
35573576
return ret;
35583577
}
35593578

3560-
static int
3561-
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3579+
static int fec_alloc_rxq_buffers_pp(struct fec_enet_private *fep,
3580+
struct fec_enet_priv_rx_q *rxq)
35623581
{
3563-
struct fec_enet_private *fep = netdev_priv(ndev);
3564-
struct fec_enet_priv_rx_q *rxq;
3582+
struct bufdesc *bdp = rxq->bd.base;
35653583
dma_addr_t phys_addr;
3566-
struct bufdesc *bdp;
35673584
struct page *page;
35683585
int i, err;
35693586

3570-
rxq = fep->rx_queue[queue];
3571-
bdp = rxq->bd.base;
3572-
35733587
err = fec_enet_create_page_pool(fep, rxq);
35743588
if (err < 0) {
3575-
netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3589+
netdev_err(fep->netdev, "%s failed queue %d (%d)\n",
3590+
__func__, rxq->bd.qid, err);
35763591
return err;
35773592
}
35783593

@@ -3591,36 +3606,47 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
35913606

35923607
for (i = 0; i < rxq->bd.ring_size; i++) {
35933608
page = page_pool_dev_alloc_pages(rxq->page_pool);
3594-
if (!page)
3595-
goto err_alloc;
3609+
if (!page) {
3610+
err = -ENOMEM;
3611+
goto free_rx_buffers;
3612+
}
35963613

35973614
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
35983615
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3599-
36003616
rxq->rx_buf[i] = page;
3601-
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3602-
3603-
if (fep->bufdesc_ex) {
3604-
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3605-
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3606-
}
3607-
36083617
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
36093618
}
36103619

3611-
/* Set the last buffer to wrap. */
3612-
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3613-
bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
3620+
return 0;
3621+
3622+
free_rx_buffers:
3623+
fec_free_rxq_buffers(rxq);
3624+
3625+
return err;
3626+
}
3627+
3628+
static int
3629+
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3630+
{
3631+
struct fec_enet_private *fep = netdev_priv(ndev);
3632+
struct fec_enet_priv_rx_q *rxq;
3633+
int err;
3634+
3635+
rxq = fep->rx_queue[queue];
3636+
err = fec_alloc_rxq_buffers_pp(fep, rxq);
3637+
if (err)
3638+
goto free_buffers;
36143639

36153640
err = fec_xdp_rxq_info_reg(fep, rxq);
36163641
if (err)
3617-
goto err_alloc;
3642+
goto free_buffers;
36183643

36193644
return 0;
36203645

3621-
err_alloc:
3646+
free_buffers:
36223647
fec_enet_free_buffers(ndev);
3623-
return -ENOMEM;
3648+
3649+
return err;
36243650
}
36253651

36263652
static int

0 commit comments

Comments
 (0)