@@ -545,6 +545,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
545545 irdma_cqp_qp_destroy_cmd (& iwdev -> rf -> sc_dev , & iwqp -> sc_qp );
546546
547547 irdma_remove_push_mmap_entries (iwqp );
548+
549+ if (iwqp -> sc_qp .qp_uk .qp_id == 1 )
550+ iwdev -> rf -> hwqp1_rsvd = false;
548551 irdma_free_qp_rsrc (iwqp );
549552
550553 return 0 ;
@@ -723,6 +726,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
723726 info -> rq_pa + (ukinfo -> rq_depth * IRDMA_QP_WQE_MIN_SIZE );
724727 ukinfo -> sq_size = ukinfo -> sq_depth >> ukinfo -> sq_shift ;
725728 ukinfo -> rq_size = ukinfo -> rq_depth >> ukinfo -> rq_shift ;
729+ ukinfo -> qp_id = info -> qp_uk_init_info .qp_id ;
726730
727731 iwqp -> max_send_wr = (ukinfo -> sq_depth - IRDMA_SQ_RSVD ) >> ukinfo -> sq_shift ;
728732 iwqp -> max_recv_wr = (ukinfo -> rq_depth - IRDMA_RQ_RSVD ) >> ukinfo -> rq_shift ;
@@ -779,6 +783,8 @@ static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
779783 roce_info = & iwqp -> roce_info ;
780784 ether_addr_copy (roce_info -> mac_addr , iwdev -> netdev -> dev_addr );
781785
786+ if (iwqp -> ibqp .qp_type == IB_QPT_GSI && iwqp -> ibqp .qp_num != 1 )
787+ roce_info -> is_qp1 = true;
782788 roce_info -> rd_en = true;
783789 roce_info -> wr_rdresp_en = true;
784790 roce_info -> bind_en = true;
@@ -868,6 +874,47 @@ static void irdma_flush_worker(struct work_struct *work)
868874 irdma_generate_flush_completions (iwqp );
869875}
870876
877+ static int irdma_setup_gsi_qp_rsrc (struct irdma_qp * iwqp , u32 * qp_num )
878+ {
879+ struct irdma_device * iwdev = iwqp -> iwdev ;
880+ struct irdma_pci_f * rf = iwdev -> rf ;
881+ unsigned long flags ;
882+ int ret ;
883+
884+ if (rf -> rdma_ver <= IRDMA_GEN_2 ) {
885+ * qp_num = 1 ;
886+ return 0 ;
887+ }
888+
889+ spin_lock_irqsave (& rf -> rsrc_lock , flags );
890+ if (!rf -> hwqp1_rsvd ) {
891+ * qp_num = 1 ;
892+ rf -> hwqp1_rsvd = true;
893+ spin_unlock_irqrestore (& rf -> rsrc_lock , flags );
894+ } else {
895+ spin_unlock_irqrestore (& rf -> rsrc_lock , flags );
896+ ret = irdma_alloc_rsrc (rf , rf -> allocated_qps , rf -> max_qp ,
897+ qp_num , & rf -> next_qp );
898+ if (ret )
899+ return ret ;
900+ }
901+
902+ ret = irdma_vchnl_req_add_vport (& rf -> sc_dev , iwdev -> vport_id , * qp_num ,
903+ (& iwdev -> vsi )-> qos );
904+ if (ret ) {
905+ if (* qp_num != 1 ) {
906+ irdma_free_rsrc (rf , rf -> allocated_qps , * qp_num );
907+ } else {
908+ spin_lock_irqsave (& rf -> rsrc_lock , flags );
909+ rf -> hwqp1_rsvd = false;
910+ spin_unlock_irqrestore (& rf -> rsrc_lock , flags );
911+ }
912+ return ret ;
913+ }
914+
915+ return 0 ;
916+ }
917+
871918/**
872919 * irdma_create_qp - create qp
873920 * @ibqp: ptr of qp
@@ -929,16 +976,20 @@ static int irdma_create_qp(struct ib_qp *ibqp,
929976 init_info .host_ctx = (__le64 * )(init_info .q2 + IRDMA_Q2_BUF_SIZE );
930977 init_info .host_ctx_pa = init_info .q2_pa + IRDMA_Q2_BUF_SIZE ;
931978
932- if (init_attr -> qp_type == IB_QPT_GSI )
933- qp_num = 1 ;
934- else
979+ if (init_attr -> qp_type == IB_QPT_GSI ) {
980+ err_code = irdma_setup_gsi_qp_rsrc (iwqp , & qp_num );
981+ if (err_code )
982+ goto error ;
983+ iwqp -> ibqp .qp_num = 1 ;
984+ } else {
935985 err_code = irdma_alloc_rsrc (rf , rf -> allocated_qps , rf -> max_qp ,
936986 & qp_num , & rf -> next_qp );
937- if (err_code )
938- goto error ;
987+ if (err_code )
988+ goto error ;
989+ iwqp -> ibqp .qp_num = qp_num ;
990+ }
939991
940992 iwqp -> iwpd = iwpd ;
941- iwqp -> ibqp .qp_num = qp_num ;
942993 qp = & iwqp -> sc_qp ;
943994 iwqp -> iwscq = to_iwcq (init_attr -> send_cq );
944995 iwqp -> iwrcq = to_iwcq (init_attr -> recv_cq );
@@ -998,10 +1049,17 @@ static int irdma_create_qp(struct ib_qp *ibqp,
9981049 ctx_info -> send_cq_num = iwqp -> iwscq -> sc_cq .cq_uk .cq_id ;
9991050 ctx_info -> rcv_cq_num = iwqp -> iwrcq -> sc_cq .cq_uk .cq_id ;
10001051
1001- if (rdma_protocol_roce (& iwdev -> ibdev , 1 ))
1052+ if (rdma_protocol_roce (& iwdev -> ibdev , 1 )) {
1053+ if (dev -> ws_add (& iwdev -> vsi , 0 )) {
1054+ irdma_cqp_qp_destroy_cmd (& rf -> sc_dev , & iwqp -> sc_qp );
1055+ err_code = - EINVAL ;
1056+ goto error ;
1057+ }
1058+ irdma_qp_add_qos (& iwqp -> sc_qp );
10021059 irdma_roce_fill_and_set_qpctx_info (iwqp , ctx_info );
1003- else
1060+ } else {
10041061 irdma_iw_fill_and_set_qpctx_info (iwqp , ctx_info );
1062+ }
10051063
10061064 err_code = irdma_cqp_create_qp_cmd (iwqp );
10071065 if (err_code )
@@ -1013,16 +1071,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
10131071 iwqp -> sig_all = init_attr -> sq_sig_type == IB_SIGNAL_ALL_WR ;
10141072 rf -> qp_table [qp_num ] = iwqp ;
10151073
1016- if (rdma_protocol_roce (& iwdev -> ibdev , 1 )) {
1017- if (dev -> ws_add (& iwdev -> vsi , 0 )) {
1018- irdma_cqp_qp_destroy_cmd (& rf -> sc_dev , & iwqp -> sc_qp );
1019- err_code = - EINVAL ;
1020- goto error ;
1021- }
1022-
1023- irdma_qp_add_qos (& iwqp -> sc_qp );
1024- }
1025-
10261074 if (udata ) {
10271075 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
10281076 if (udata -> outlen < sizeof (uresp )) {
0 commit comments