Skip to content

Commit 98e891b

Browse files
rpearsonhpe-designjgunthorpe
authored andcommitted
RDMA/rxe: Remove qp->req.state
The rxe driver has four different QP state variables, qp->attr.qp_state, qp->req.state, qp->comp.state, and qp->resp.state. All of these basically carry the same information. This patch replaces uses of qp->req.state by qp->attr.qp_state and enum rxe_qp_state. This is the third of three patches which will remove all but the qp->attr.qp_state variable. This will bring the driver closer to the IBA description. Link: https://lore.kernel.org/r/20230405042611.6467-3-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent f55efc2 commit 98e891b

7 files changed

Lines changed: 34 additions & 66 deletions

File tree

drivers/infiniband/sw/rxe/rxe_comp.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -491,12 +491,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
491491
}
492492
}
493493

494-
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
494+
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
495495
/* state_lock used by requester & completer */
496496
spin_lock_bh(&qp->state_lock);
497-
if ((qp->req.state == QP_STATE_DRAIN) &&
498-
(qp->comp.psn == qp->req.psn)) {
499-
qp->req.state = QP_STATE_DRAINED;
497+
if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
498+
qp->attr.sq_draining = 0;
500499
spin_unlock_bh(&qp->state_lock);
501500

502501
if (qp->ibqp.event_handler) {
@@ -723,7 +722,7 @@ int rxe_completer(struct rxe_qp *qp)
723722
* (4) the timeout parameter is set
724723
*/
725724
if ((qp_type(qp) == IB_QPT_RC) &&
726-
(qp->req.state == QP_STATE_READY) &&
725+
(qp_state(qp) >= IB_QPS_RTS) &&
727726
(psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
728727
qp->qp_timeout_jiffies)
729728
mod_timer(&qp->retrans_timer,

drivers/infiniband/sw/rxe/rxe_net.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -413,8 +413,8 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
413413
int is_request = pkt->mask & RXE_REQ_MASK;
414414
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
415415

416-
if ((is_request && (qp->req.state != QP_STATE_READY)) ||
417-
(!is_request && (qp_state(qp) <= IB_QPS_RTR))) {
416+
if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
417+
(!is_request && (qp_state(qp) < IB_QPS_RTR))) {
418418
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
419419
goto drop;
420420
}

drivers/infiniband/sw/rxe/rxe_qp.c

Lines changed: 14 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
231231
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
232232
QUEUE_TYPE_FROM_CLIENT);
233233

234-
qp->req.state = QP_STATE_RESET;
235234
qp->req.opcode = -1;
236235
qp->comp.opcode = -1;
237236

@@ -394,12 +393,9 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
394393
goto err1;
395394
}
396395

397-
if (mask & IB_QP_STATE) {
398-
if (cur_state == IB_QPS_SQD) {
399-
if (qp->req.state == QP_STATE_DRAIN &&
400-
new_state != IB_QPS_ERR)
401-
goto err1;
402-
}
396+
if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
397+
if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
398+
goto err1;
403399
}
404400

405401
if (mask & IB_QP_PORT) {
@@ -474,9 +470,6 @@ static void rxe_qp_reset(struct rxe_qp *qp)
474470
rxe_disable_task(&qp->comp.task);
475471
rxe_disable_task(&qp->req.task);
476472

477-
/* move qp to the reset state */
478-
qp->req.state = QP_STATE_RESET;
479-
480473
/* drain work and packet queuesc */
481474
rxe_requester(qp);
482475
rxe_completer(qp);
@@ -512,22 +505,9 @@ static void rxe_qp_reset(struct rxe_qp *qp)
512505
rxe_enable_task(&qp->req.task);
513506
}
514507

515-
/* drain the send queue */
516-
static void rxe_qp_drain(struct rxe_qp *qp)
517-
{
518-
if (qp->sq.queue) {
519-
if (qp->req.state != QP_STATE_DRAINED) {
520-
qp->req.state = QP_STATE_DRAIN;
521-
rxe_sched_task(&qp->comp.task);
522-
rxe_sched_task(&qp->req.task);
523-
}
524-
}
525-
}
526-
527508
/* move the qp to the error state */
528509
void rxe_qp_error(struct rxe_qp *qp)
529510
{
530-
qp->req.state = QP_STATE_ERROR;
531511
qp->attr.qp_state = IB_QPS_ERR;
532512

533513
/* drain work and packet queues */
@@ -540,6 +520,8 @@ void rxe_qp_error(struct rxe_qp *qp)
540520
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
541521
struct ib_udata *udata)
542522
{
523+
enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
524+
attr->cur_qp_state : qp->attr.qp_state;
543525
int err;
544526

545527
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
@@ -656,7 +638,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
656638

657639
case IB_QPS_INIT:
658640
rxe_dbg_qp(qp, "state -> INIT\n");
659-
qp->req.state = QP_STATE_INIT;
660641
break;
661642

662643
case IB_QPS_RTR:
@@ -665,12 +646,15 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
665646

666647
case IB_QPS_RTS:
667648
rxe_dbg_qp(qp, "state -> RTS\n");
668-
qp->req.state = QP_STATE_READY;
669649
break;
670650

671651
case IB_QPS_SQD:
672652
rxe_dbg_qp(qp, "state -> SQD\n");
673-
rxe_qp_drain(qp);
653+
if (cur_state != IB_QPS_SQD) {
654+
qp->attr.sq_draining = 1;
655+
rxe_sched_task(&qp->comp.task);
656+
rxe_sched_task(&qp->req.task);
657+
}
674658
break;
675659

676660
case IB_QPS_SQE:
@@ -708,16 +692,11 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
708692
rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
709693
rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
710694

711-
if (qp->req.state == QP_STATE_DRAIN) {
712-
attr->sq_draining = 1;
713-
/* applications that get this state
714-
* typically spin on it. yield the
715-
* processor
716-
*/
695+
/* Applications that get this state typically spin on it.
696+
* Yield the processor
697+
*/
698+
if (qp->attr.sq_draining)
717699
cond_resched();
718-
} else {
719-
attr->sq_draining = 0;
720-
}
721700

722701
rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining);
723702

drivers/infiniband/sw/rxe/rxe_recv.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,11 +39,12 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
3939
}
4040

4141
if (pkt->mask & RXE_REQ_MASK) {
42-
if (unlikely(qp_state(qp) <= IB_QPS_RTR))
42+
if (unlikely(qp_state(qp) < IB_QPS_RTR))
4343
return -EINVAL;
44-
} else if (unlikely(qp->req.state < QP_STATE_READY ||
45-
qp->req.state > QP_STATE_DRAINED))
46-
return -EINVAL;
44+
} else {
45+
if (unlikely(qp_state(qp) < IB_QPS_RTS))
46+
return -EINVAL;
47+
}
4748

4849
return 0;
4950
}

drivers/infiniband/sw/rxe/rxe_req.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -120,13 +120,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
120120
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121121
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
122122

123-
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
123+
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
124124
/* check to see if we are drained;
125125
* state_lock used by requester and completer
126126
*/
127127
spin_lock_bh(&qp->state_lock);
128128
do {
129-
if (qp->req.state != QP_STATE_DRAIN) {
129+
if (!qp->attr.sq_draining) {
130130
/* comp just finished */
131131
spin_unlock_bh(&qp->state_lock);
132132
break;
@@ -139,7 +139,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
139139
break;
140140
}
141141

142-
qp->req.state = QP_STATE_DRAINED;
142+
qp->attr.sq_draining = 0;
143143
spin_unlock_bh(&qp->state_lock);
144144

145145
if (qp->ibqp.event_handler) {
@@ -159,8 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
159159

160160
wqe = queue_addr_from_index(q, index);
161161

162-
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
163-
qp->req.state == QP_STATE_DRAINED) &&
162+
if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
164163
(wqe->state != wqe_state_processing)))
165164
return NULL;
166165

@@ -656,7 +655,7 @@ int rxe_requester(struct rxe_qp *qp)
656655
if (unlikely(!qp->valid))
657656
goto exit;
658657

659-
if (unlikely(qp->req.state == QP_STATE_ERROR)) {
658+
if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
660659
wqe = req_next_wqe(qp);
661660
if (wqe)
662661
/*
@@ -667,7 +666,7 @@ int rxe_requester(struct rxe_qp *qp)
667666
goto exit;
668667
}
669668

670-
if (unlikely(qp->req.state == QP_STATE_RESET)) {
669+
if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
671670
qp->req.wqe_index = queue_get_consumer(q,
672671
QUEUE_TYPE_FROM_CLIENT);
673672
qp->req.opcode = -1;
@@ -836,7 +835,7 @@ int rxe_requester(struct rxe_qp *qp)
836835
/* update wqe_index for each wqe completion */
837836
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
838837
wqe->state = wqe_state_error;
839-
qp->req.state = QP_STATE_ERROR;
838+
qp->attr.qp_state = IB_QPS_ERR;
840839
rxe_sched_task(&qp->comp.task);
841840
exit:
842841
ret = -EAGAIN;

drivers/infiniband/sw/rxe/rxe_verbs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -881,7 +881,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
881881
if (!err)
882882
rxe_sched_task(&qp->req.task);
883883

884-
if (unlikely(qp->req.state == QP_STATE_ERROR))
884+
if (unlikely(qp_state(qp) == IB_QPS_ERR))
885885
rxe_sched_task(&qp->comp.task);
886886

887887
return err;
@@ -900,7 +900,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
900900
goto err_out;
901901
}
902902

903-
if (unlikely(qp->req.state < QP_STATE_READY)) {
903+
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
904904
*bad_wr = wr;
905905
err = -EINVAL;
906906
rxe_dbg_qp(qp, "qp not ready to send");

drivers/infiniband/sw/rxe/rxe_verbs.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -102,17 +102,7 @@ struct rxe_srq {
102102
int error;
103103
};
104104

105-
enum rxe_qp_state {
106-
QP_STATE_RESET,
107-
QP_STATE_INIT,
108-
QP_STATE_READY,
109-
QP_STATE_DRAIN, /* req only */
110-
QP_STATE_DRAINED, /* req only */
111-
QP_STATE_ERROR
112-
};
113-
114105
struct rxe_req_info {
115-
enum rxe_qp_state state;
116106
int wqe_index;
117107
u32 psn;
118108
int opcode;

0 commit comments

Comments
 (0)