Skip to content

Commit 770f52d

Browse files
shailend-gdavem330
authored andcommitted
gve: Reset Rx ring state in the ring-stop funcs
This does not fix any existing bug. In anticipation of the ndo queue api hooks that alloc/free/start/stop a single Rx queue, the already existing per-queue stop functions are being made more robust. Specifically for this use case: rx_queue_n.stop() + rx_queue_n.start() Note that this is not the use case being used in devmem tcp (the first place these new ndo hooks would be used). There the usecase is: new_queue.alloc() + old_queue.stop() + new_queue.start() + old_queue.free() Tested-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Signed-off-by: Shailend Chand <shailend@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 9a5e077 commit 770f52d

2 files changed

Lines changed: 120 additions & 30 deletions

File tree

drivers/net/ethernet/google/gve/gve_rx.c

Lines changed: 37 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
5353
rx->data.page_info = NULL;
5454
}
5555

56+
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
57+
{
58+
ctx->skb_head = NULL;
59+
ctx->skb_tail = NULL;
60+
ctx->total_size = 0;
61+
ctx->frag_cnt = 0;
62+
ctx->drop_pkt = false;
63+
}
64+
65+
static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
66+
{
67+
rx->desc.seqno = 1;
68+
rx->cnt = 0;
69+
gve_rx_ctx_clear(&rx->ctx);
70+
}
71+
72+
static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
73+
{
74+
struct gve_rx_ring *rx = &priv->rx[idx];
75+
const u32 slots = priv->rx_desc_cnt;
76+
size_t size;
77+
78+
/* Reset desc ring */
79+
if (rx->desc.desc_ring) {
80+
size = slots * sizeof(rx->desc.desc_ring[0]);
81+
memset(rx->desc.desc_ring, 0, size);
82+
}
83+
84+
/* Reset q_resources */
85+
if (rx->q_resources)
86+
memset(rx->q_resources, 0, sizeof(*rx->q_resources));
87+
88+
gve_rx_init_ring_state_gqi(rx);
89+
}
90+
5691
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
5792
{
5893
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -62,6 +97,7 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
6297

6398
gve_remove_napi(priv, ntfy_idx);
6499
gve_rx_remove_from_block(priv, idx);
100+
gve_rx_reset_ring_gqi(priv, idx);
65101
}
66102

67103
static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
@@ -222,15 +258,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
222258
return err;
223259
}
224260

225-
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
226-
{
227-
ctx->skb_head = NULL;
228-
ctx->skb_tail = NULL;
229-
ctx->total_size = 0;
230-
ctx->frag_cnt = 0;
231-
ctx->drop_pkt = false;
232-
}
233-
234261
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
235262
{
236263
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -309,9 +336,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
309336
err = -ENOMEM;
310337
goto abort_with_q_resources;
311338
}
312-
rx->cnt = 0;
313339
rx->db_threshold = slots / 2;
314-
rx->desc.seqno = 1;
340+
gve_rx_init_ring_state_gqi(rx);
315341

316342
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
317343
gve_rx_ctx_clear(&rx->ctx);

drivers/net/ethernet/google/gve/gve_rx_dqo.c

Lines changed: 83 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
211211
}
212212
}
213213

214+
static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
215+
const u32 buffer_queue_slots,
216+
const u32 completion_queue_slots)
217+
{
218+
int i;
219+
220+
/* Set buffer queue state */
221+
rx->dqo.bufq.mask = buffer_queue_slots - 1;
222+
rx->dqo.bufq.head = 0;
223+
rx->dqo.bufq.tail = 0;
224+
225+
/* Set completion queue state */
226+
rx->dqo.complq.num_free_slots = completion_queue_slots;
227+
rx->dqo.complq.mask = completion_queue_slots - 1;
228+
rx->dqo.complq.cur_gen_bit = 0;
229+
rx->dqo.complq.head = 0;
230+
231+
/* Set RX SKB context */
232+
rx->ctx.skb_head = NULL;
233+
rx->ctx.skb_tail = NULL;
234+
235+
/* Set up linked list of buffer IDs */
236+
if (rx->dqo.buf_states) {
237+
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
238+
rx->dqo.buf_states[i].next = i + 1;
239+
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
240+
}
241+
242+
rx->dqo.free_buf_states = 0;
243+
rx->dqo.recycled_buf_states.head = -1;
244+
rx->dqo.recycled_buf_states.tail = -1;
245+
rx->dqo.used_buf_states.head = -1;
246+
rx->dqo.used_buf_states.tail = -1;
247+
}
248+
249+
static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
250+
{
251+
struct gve_rx_ring *rx = &priv->rx[idx];
252+
size_t size;
253+
int i;
254+
255+
const u32 buffer_queue_slots = priv->rx_desc_cnt;
256+
const u32 completion_queue_slots = priv->rx_desc_cnt;
257+
258+
/* Reset buffer queue */
259+
if (rx->dqo.bufq.desc_ring) {
260+
size = sizeof(rx->dqo.bufq.desc_ring[0]) *
261+
buffer_queue_slots;
262+
memset(rx->dqo.bufq.desc_ring, 0, size);
263+
}
264+
265+
/* Reset completion queue */
266+
if (rx->dqo.complq.desc_ring) {
267+
size = sizeof(rx->dqo.complq.desc_ring[0]) *
268+
completion_queue_slots;
269+
memset(rx->dqo.complq.desc_ring, 0, size);
270+
}
271+
272+
/* Reset q_resources */
273+
if (rx->q_resources)
274+
memset(rx->q_resources, 0, sizeof(*rx->q_resources));
275+
276+
/* Reset buf states */
277+
if (rx->dqo.buf_states) {
278+
for (i = 0; i < rx->dqo.num_buf_states; i++) {
279+
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
280+
281+
if (bs->page_info.page)
282+
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
283+
}
284+
}
285+
286+
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
287+
completion_queue_slots);
288+
}
289+
214290
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
215291
{
216292
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -220,6 +296,7 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
220296

221297
gve_remove_napi(priv, ntfy_idx);
222298
gve_rx_remove_from_block(priv, idx);
299+
gve_rx_reset_ring_dqo(priv, idx);
223300
}
224301

225302
static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
@@ -273,10 +350,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
273350
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
274351
}
275352

276-
static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
353+
static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
354+
const u32 buf_count)
277355
{
278356
struct device *hdev = &priv->pdev->dev;
279-
int buf_count = rx->dqo.bufq.mask + 1;
280357

281358
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
282359
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
@@ -301,7 +378,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
301378
{
302379
struct device *hdev = &priv->pdev->dev;
303380
size_t size;
304-
int i;
305381

306382
const u32 buffer_queue_slots = cfg->ring_size;
307383
const u32 completion_queue_slots = cfg->ring_size;
@@ -311,11 +387,6 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
311387
memset(rx, 0, sizeof(*rx));
312388
rx->gve = priv;
313389
rx->q_num = idx;
314-
rx->dqo.bufq.mask = buffer_queue_slots - 1;
315-
rx->dqo.complq.num_free_slots = completion_queue_slots;
316-
rx->dqo.complq.mask = completion_queue_slots - 1;
317-
rx->ctx.skb_head = NULL;
318-
rx->ctx.skb_tail = NULL;
319390

320391
rx->dqo.num_buf_states = cfg->raw_addressing ?
321392
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
@@ -328,19 +399,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
328399

329400
/* Allocate header buffers for header-split */
330401
if (cfg->enable_header_split)
331-
if (gve_rx_alloc_hdr_bufs(priv, rx))
402+
if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
332403
goto err;
333404

334-
/* Set up linked list of buffer IDs */
335-
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
336-
rx->dqo.buf_states[i].next = i + 1;
337-
338-
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
339-
rx->dqo.recycled_buf_states.head = -1;
340-
rx->dqo.recycled_buf_states.tail = -1;
341-
rx->dqo.used_buf_states.head = -1;
342-
rx->dqo.used_buf_states.tail = -1;
343-
344405
/* Allocate RX completion queue */
345406
size = sizeof(rx->dqo.complq.desc_ring[0]) *
346407
completion_queue_slots;
@@ -368,6 +429,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
368429
if (!rx->q_resources)
369430
goto err;
370431

432+
gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
433+
completion_queue_slots);
434+
371435
return 0;
372436

373437
err:

0 commit comments

Comments
 (0)