Skip to content

Commit ee24284

Browse files
shailend-gdavem330
authored andcommitted
gve: Alloc and free QPLs with the rings
Every tx and rx ring has its own queue-page-list (QPL) that serves as the bounce buffer. Previously we were allocating QPLs for all queues before the queues themselves were allocated and later associating a QPL with a queue. This is avoidable complexity: it is much more natural for each queue to allocate and free its own QPL. Moreover, the advent of new queue-manipulating ndo hooks make it hard to keep things as is: we would need to transfer a QPL from an old queue to a new queue, and that is unpleasant. Tested-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Signed-off-by: Shailend Chand <shailend@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent af9bcf9 commit ee24284

7 files changed

Lines changed: 171 additions & 331 deletions

File tree

drivers/net/ethernet/google/gve/gve.h

Lines changed: 6 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -638,26 +638,10 @@ struct gve_ptype_lut {
638638
struct gve_ptype ptypes[GVE_NUM_PTYPES];
639639
};
640640

641-
/* Parameters for allocating queue page lists */
642-
struct gve_qpls_alloc_cfg {
643-
struct gve_queue_config *tx_cfg;
644-
struct gve_queue_config *rx_cfg;
645-
646-
u16 num_xdp_queues;
647-
bool raw_addressing;
648-
bool is_gqi;
649-
650-
/* Allocated resources are returned here */
651-
struct gve_queue_page_list *qpls;
652-
};
653-
654641
/* Parameters for allocating resources for tx queues */
655642
struct gve_tx_alloc_rings_cfg {
656643
struct gve_queue_config *qcfg;
657644

658-
/* qpls must already be allocated */
659-
struct gve_queue_page_list *qpls;
660-
661645
u16 ring_size;
662646
u16 start_idx;
663647
u16 num_rings;
@@ -673,9 +657,6 @@ struct gve_rx_alloc_rings_cfg {
673657
struct gve_queue_config *qcfg;
674658
struct gve_queue_config *qcfg_tx;
675659

676-
/* qpls must already be allocated */
677-
struct gve_queue_page_list *qpls;
678-
679660
u16 ring_size;
680661
u16 packet_buffer_size;
681662
bool raw_addressing;
@@ -701,7 +682,6 @@ struct gve_priv {
701682
struct net_device *dev;
702683
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
703684
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
704-
struct gve_queue_page_list *qpls; /* array of num qpls */
705685
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
706686
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
707687
dma_addr_t irq_db_indices_bus;
@@ -1025,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
10251005
return priv->tx_cfg.max_queues + rx_qid;
10261006
}
10271007

1028-
/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
10291008
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
10301009
{
10311010
return tx_cfg->max_queues + rx_qid;
@@ -1036,7 +1015,6 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
10361015
return gve_tx_qpl_id(priv, 0);
10371016
}
10381017

1039-
/* Returns the index into priv->qpls where the first rx queue's QPL resides */
10401018
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
10411019
{
10421020
return gve_get_rx_qpl_id(tx_cfg, 0);
@@ -1090,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
10901068
enum dma_data_direction, gfp_t gfp_flags);
10911069
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
10921070
enum dma_data_direction);
1071+
/* qpls */
1072+
struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1073+
u32 id, int pages);
1074+
void gve_free_queue_page_list(struct gve_priv *priv,
1075+
struct gve_queue_page_list *qpl,
1076+
u32 id);
10931077
/* tx handling */
10941078
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
10951079
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -1126,11 +1110,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
11261110
void gve_schedule_reset(struct gve_priv *priv);
11271111
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
11281112
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1129-
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
11301113
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
11311114
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
11321115
int gve_adjust_config(struct gve_priv *priv,
1133-
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
11341116
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
11351117
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
11361118
int gve_adjust_queues(struct gve_priv *priv,

drivers/net/ethernet/google/gve/gve_ethtool.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -538,20 +538,17 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
538538
{
539539
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
540540
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
541-
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
542541
int err;
543542

544543
/* get current queue configuration */
545-
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
546-
&tx_alloc_cfg, &rx_alloc_cfg);
544+
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
547545

548546
/* copy over the new ring_size from ethtool */
549547
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
550548
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
551549

552550
if (netif_running(priv->dev)) {
553-
err = gve_adjust_config(priv, &qpls_alloc_cfg,
554-
&tx_alloc_cfg, &rx_alloc_cfg);
551+
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
555552
if (err)
556553
return err;
557554
}

0 commit comments

Comments
 (0)