@@ -325,8 +325,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
325325 if (err )
326326 goto err2 ;
327327
328+ spin_lock_bh (& qp -> state_lock );
328329 qp -> attr .qp_state = IB_QPS_RESET ;
329330 qp -> valid = 1 ;
331+ spin_unlock_bh (& qp -> state_lock );
330332
331333 return 0 ;
332334
@@ -377,27 +379,9 @@ int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
377379 return 0 ;
378380}
379381
380- /* called by the modify qp verb, this routine checks all the parameters before
381- * making any changes
382- */
383382int rxe_qp_chk_attr (struct rxe_dev * rxe , struct rxe_qp * qp ,
384383 struct ib_qp_attr * attr , int mask )
385384{
386- enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE ) ?
387- attr -> cur_qp_state : qp -> attr .qp_state ;
388- enum ib_qp_state new_state = (mask & IB_QP_STATE ) ?
389- attr -> qp_state : cur_state ;
390-
391- if (!ib_modify_qp_is_ok (cur_state , new_state , qp_type (qp ), mask )) {
392- rxe_dbg_qp (qp , "invalid mask or state\n" );
393- goto err1 ;
394- }
395-
396- if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD ) {
397- if (qp -> attr .sq_draining && new_state != IB_QPS_ERR )
398- goto err1 ;
399- }
400-
401385 if (mask & IB_QP_PORT ) {
402386 if (!rdma_is_port_valid (& rxe -> ib_dev , attr -> port_num )) {
403387 rxe_dbg_qp (qp , "invalid port %d\n" , attr -> port_num );
@@ -508,22 +492,96 @@ static void rxe_qp_reset(struct rxe_qp *qp)
508492/* move the qp to the error state */
509493void rxe_qp_error (struct rxe_qp * qp )
510494{
495+ spin_lock_bh (& qp -> state_lock );
511496 qp -> attr .qp_state = IB_QPS_ERR ;
512497
513498 /* drain work and packet queues */
514499 rxe_sched_task (& qp -> resp .task );
515500 rxe_sched_task (& qp -> comp .task );
516501 rxe_sched_task (& qp -> req .task );
502+ spin_unlock_bh (& qp -> state_lock );
503+ }
504+
505+ static void rxe_qp_sqd (struct rxe_qp * qp , struct ib_qp_attr * attr ,
506+ int mask )
507+ {
508+ spin_lock_bh (& qp -> state_lock );
509+ qp -> attr .sq_draining = 1 ;
510+ rxe_sched_task (& qp -> comp .task );
511+ rxe_sched_task (& qp -> req .task );
512+ spin_unlock_bh (& qp -> state_lock );
513+ }
514+
515+ /* caller should hold qp->state_lock */
516+ static int __qp_chk_state (struct rxe_qp * qp , struct ib_qp_attr * attr ,
517+ int mask )
518+ {
519+ enum ib_qp_state cur_state ;
520+ enum ib_qp_state new_state ;
521+
522+ cur_state = (mask & IB_QP_CUR_STATE ) ?
523+ attr -> cur_qp_state : qp -> attr .qp_state ;
524+ new_state = (mask & IB_QP_STATE ) ?
525+ attr -> qp_state : cur_state ;
526+
527+ if (!ib_modify_qp_is_ok (cur_state , new_state , qp_type (qp ), mask ))
528+ return - EINVAL ;
529+
530+ if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD ) {
531+ if (qp -> attr .sq_draining && new_state != IB_QPS_ERR )
532+ return - EINVAL ;
533+ }
534+
535+ return 0 ;
517536}
518537
538+ static const char * const qps2str [] = {
539+ [IB_QPS_RESET ] = "RESET" ,
540+ [IB_QPS_INIT ] = "INIT" ,
541+ [IB_QPS_RTR ] = "RTR" ,
542+ [IB_QPS_RTS ] = "RTS" ,
543+ [IB_QPS_SQD ] = "SQD" ,
544+ [IB_QPS_SQE ] = "SQE" ,
545+ [IB_QPS_ERR ] = "ERR" ,
546+ };
547+
519548/* called by the modify qp verb */
520549int rxe_qp_from_attr (struct rxe_qp * qp , struct ib_qp_attr * attr , int mask ,
521550 struct ib_udata * udata )
522551{
523- enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE ) ?
524- attr -> cur_qp_state : qp -> attr .qp_state ;
525552 int err ;
526553
554+ if (mask & IB_QP_CUR_STATE )
555+ qp -> attr .cur_qp_state = attr -> qp_state ;
556+
557+ if (mask & IB_QP_STATE ) {
558+ spin_lock_bh (& qp -> state_lock );
559+ err = __qp_chk_state (qp , attr , mask );
560+ if (!err ) {
561+ qp -> attr .qp_state = attr -> qp_state ;
562+ rxe_dbg_qp (qp , "state -> %s\n" ,
563+ qps2str [attr -> qp_state ]);
564+ }
565+ spin_unlock_bh (& qp -> state_lock );
566+
567+ if (err )
568+ return err ;
569+
570+ switch (attr -> qp_state ) {
571+ case IB_QPS_RESET :
572+ rxe_qp_reset (qp );
573+ break ;
574+ case IB_QPS_SQD :
575+ rxe_qp_sqd (qp , attr , mask );
576+ break ;
577+ case IB_QPS_ERR :
578+ rxe_qp_error (qp );
579+ break ;
580+ default :
581+ break ;
582+ }
583+ }
584+
527585 if (mask & IB_QP_MAX_QP_RD_ATOMIC ) {
528586 int max_rd_atomic = attr -> max_rd_atomic ?
529587 roundup_pow_of_two (attr -> max_rd_atomic ) : 0 ;
@@ -545,9 +603,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
545603 return err ;
546604 }
547605
548- if (mask & IB_QP_CUR_STATE )
549- qp -> attr .cur_qp_state = attr -> qp_state ;
550-
551606 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY )
552607 qp -> attr .en_sqd_async_notify = attr -> en_sqd_async_notify ;
553608
@@ -627,48 +682,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
627682 if (mask & IB_QP_DEST_QPN )
628683 qp -> attr .dest_qp_num = attr -> dest_qp_num ;
629684
630- if (mask & IB_QP_STATE ) {
631- qp -> attr .qp_state = attr -> qp_state ;
632-
633- switch (attr -> qp_state ) {
634- case IB_QPS_RESET :
635- rxe_dbg_qp (qp , "state -> RESET\n" );
636- rxe_qp_reset (qp );
637- break ;
638-
639- case IB_QPS_INIT :
640- rxe_dbg_qp (qp , "state -> INIT\n" );
641- break ;
642-
643- case IB_QPS_RTR :
644- rxe_dbg_qp (qp , "state -> RTR\n" );
645- break ;
646-
647- case IB_QPS_RTS :
648- rxe_dbg_qp (qp , "state -> RTS\n" );
649- break ;
650-
651- case IB_QPS_SQD :
652- rxe_dbg_qp (qp , "state -> SQD\n" );
653- if (cur_state != IB_QPS_SQD ) {
654- qp -> attr .sq_draining = 1 ;
655- rxe_sched_task (& qp -> comp .task );
656- rxe_sched_task (& qp -> req .task );
657- }
658- break ;
659-
660- case IB_QPS_SQE :
661- rxe_dbg_qp (qp , "state -> SQE !!?\n" );
662- /* Not possible from modify_qp. */
663- break ;
664-
665- case IB_QPS_ERR :
666- rxe_dbg_qp (qp , "state -> ERR\n" );
667- rxe_qp_error (qp );
668- break ;
669- }
670- }
671-
672685 return 0 ;
673686}
674687
@@ -695,10 +708,12 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
695708 /* Applications that get this state typically spin on it.
696709 * Yield the processor
697710 */
698- if (qp -> attr .sq_draining )
711+ spin_lock_bh (& qp -> state_lock );
712+ if (qp -> attr .sq_draining ) {
713+ spin_unlock_bh (& qp -> state_lock );
699714 cond_resched ();
700-
701- rxe_dbg_qp ( qp , "attr->sq_draining = %d\n" , attr -> sq_draining );
715+ }
716+ spin_unlock_bh ( & qp -> state_lock );
702717
703718 return 0 ;
704719}
@@ -722,7 +737,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
722737{
723738 struct rxe_qp * qp = container_of (work , typeof (* qp ), cleanup_work .work );
724739
740+ spin_lock_bh (& qp -> state_lock );
725741 qp -> valid = 0 ;
742+ spin_unlock_bh (& qp -> state_lock );
726743 qp -> qp_timeout_jiffies = 0 ;
727744
728745 if (qp_type (qp ) == IB_QPT_RC ) {
0 commit comments