Skip to content

Commit df3aafe

Browse files
Itamar-Gozlankuba-moo
authored andcommitted
Revert "net/mlx5: DR, Supporting inline WQE when possible"
This reverts commit 95c337c. The revert is required due to the suspicion it cause some tests fail and will be moved to further investigation. Fixes: 95c337c ("net/mlx5: DR, Supporting inline WQE when possible") Signed-off-by: Itamar Gozlan <igozlan@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Link: https://lore.kernel.org/r/20231114215846.5902-2-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent a6a6a0a commit df3aafe

1 file changed

Lines changed: 13 additions & 102 deletions

File tree

  • drivers/net/ethernet/mellanox/mlx5/core/steering

drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c

Lines changed: 13 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ struct dr_qp_init_attr {
5252
u32 cqn;
5353
u32 pdn;
5454
u32 max_send_wr;
55-
u32 max_send_sge;
5655
struct mlx5_uars_page *uar;
5756
u8 isolate_vl_tc:1;
5857
};
@@ -247,45 +246,13 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
247246
return err == CQ_POLL_ERR ? err : npolled;
248247
}
249248

250-
static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
251-
{
252-
return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
253-
sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
254-
sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
255-
}
256-
257-
/* We calculate for specific RC QP with the required functionality */
258-
static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
259-
{
260-
int update_arg_size;
261-
int inl_size = 0;
262-
int tot_size;
263-
int size;
264-
265-
update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
266-
267-
size = sizeof(struct mlx5_wqe_ctrl_seg) +
268-
sizeof(struct mlx5_wqe_raddr_seg);
269-
inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
270-
DR_STE_SIZE, 16);
271-
272-
size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
273-
274-
size = max(size, update_arg_size);
275-
276-
tot_size = max(size, inl_size);
277-
278-
return ALIGN(tot_size, MLX5_SEND_WQE_BB);
279-
}
280-
281249
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
282250
struct dr_qp_init_attr *attr)
283251
{
284252
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
285253
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
286254
struct mlx5_wq_param wqp;
287255
struct mlx5dr_qp *dr_qp;
288-
int wqe_size;
289256
int inlen;
290257
void *qpc;
291258
void *in;
@@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
365332
if (err)
366333
goto err_in;
367334
dr_qp->uar = attr->uar;
368-
wqe_size = dr_qp_calc_rc_send_wqe(attr);
369-
dr_qp->max_inline_data = min(wqe_size -
370-
(sizeof(struct mlx5_wqe_ctrl_seg) +
371-
sizeof(struct mlx5_wqe_raddr_seg) +
372-
sizeof(struct mlx5_wqe_inline_seg)),
373-
(2 * MLX5_SEND_WQE_BB -
374-
(sizeof(struct mlx5_wqe_ctrl_seg) +
375-
sizeof(struct mlx5_wqe_raddr_seg) +
376-
sizeof(struct mlx5_wqe_inline_seg))));
377335

378336
return dr_qp;
379337

@@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
437395
MLX5_SEND_WQE_DS;
438396
}
439397

440-
static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
441-
struct dr_data_seg *data_seg, void *wqe)
442-
{
443-
int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
444-
sizeof(struct mlx5_wqe_raddr_seg) +
445-
sizeof(struct mlx5_wqe_inline_seg);
446-
struct mlx5_wqe_inline_seg *seg;
447-
int left_space;
448-
int inl = 0;
449-
void *addr;
450-
int len;
451-
int idx;
452-
453-
seg = wqe;
454-
wqe += sizeof(*seg);
455-
addr = (void *)(unsigned long)(data_seg->addr);
456-
len = data_seg->length;
457-
inl += len;
458-
left_space = MLX5_SEND_WQE_BB - inline_header_size;
459-
460-
if (likely(len > left_space)) {
461-
memcpy(wqe, addr, left_space);
462-
len -= left_space;
463-
addr += left_space;
464-
idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
465-
wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
466-
}
467-
468-
memcpy(wqe, addr, len);
469-
470-
if (likely(inl)) {
471-
seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
472-
return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
473-
MLX5_SEND_WQE_DS);
474-
} else {
475-
return 0;
476-
}
477-
}
478-
479398
static void
480-
dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
481-
struct mlx5_wqe_ctrl_seg *wq_ctrl,
399+
dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
482400
u64 remote_addr,
483401
u32 rkey,
484402
struct dr_data_seg *data_seg,
@@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
494412
wq_raddr->reserved = 0;
495413

496414
wq_dseg = (void *)(wq_raddr + 1);
497-
/* WQE ctrl segment + WQE remote addr segment */
498-
*size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
499415

500-
if (data_seg->send_flags & IB_SEND_INLINE) {
501-
*size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
502-
} else {
503-
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
504-
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
505-
wq_dseg->addr = cpu_to_be64(data_seg->addr);
506-
*size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
507-
}
416+
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
417+
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
418+
wq_dseg->addr = cpu_to_be64(data_seg->addr);
419+
420+
*size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
421+
sizeof(*wq_dseg) + /* WQE data segment */
422+
sizeof(*wq_raddr)) / /* WQE remote addr segment */
423+
MLX5_SEND_WQE_DS;
508424
}
509425

510426
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
@@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
535451
switch (opcode) {
536452
case MLX5_OPCODE_RDMA_READ:
537453
case MLX5_OPCODE_RDMA_WRITE:
538-
dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
454+
dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
539455
rkey, data_seg, &size);
540456
break;
541457
case MLX5_OPCODE_FLOW_TBL_ACCESS:
@@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
656572
if (send_ring->pending_wqe % send_ring->signal_th == 0)
657573
send_info->write.send_flags |= IB_SEND_SIGNALED;
658574
else
659-
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
575+
send_info->write.send_flags = 0;
660576
}
661577

662578
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
@@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
680596
}
681597

682598
send_ring->pending_wqe++;
683-
if (!send_info->write.lkey)
684-
send_info->write.send_flags |= IB_SEND_INLINE;
685599

686600
if (send_ring->pending_wqe % send_ring->signal_th == 0)
687601
send_info->write.send_flags |= IB_SEND_SIGNALED;
688-
else
689-
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
690602

691603
send_ring->pending_wqe++;
692604
send_info->read.length = send_info->write.length;
@@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
696608
send_info->read.lkey = send_ring->sync_mr->mkey;
697609

698610
if (send_ring->pending_wqe % send_ring->signal_th == 0)
699-
send_info->read.send_flags |= IB_SEND_SIGNALED;
611+
send_info->read.send_flags = IB_SEND_SIGNALED;
700612
else
701-
send_info->read.send_flags &= ~IB_SEND_SIGNALED;
613+
send_info->read.send_flags = 0;
702614
}
703615

704616
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
@@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
13451257
dmn->send_ring->cq->qp = dmn->send_ring->qp;
13461258

13471259
dmn->info.max_send_wr = QUEUE_SIZE;
1348-
init_attr.max_send_sge = 1;
13491260
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
13501261
DR_STE_SIZE);
13511262

0 commit comments

Comments
 (0)