@@ -412,7 +412,8 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
412412 pble_obj_cnt = info -> pd -> dev -> hmc_info -> hmc_obj [IRDMA_HMC_IW_PBLE ].cnt ;
413413
414414 if ((info -> virtual_map && info -> sq_pa >= pble_obj_cnt ) ||
415- (info -> virtual_map && info -> rq_pa >= pble_obj_cnt ))
415+ (!info -> qp_uk_init_info .srq_uk &&
416+ info -> virtual_map && info -> rq_pa >= pble_obj_cnt ))
416417 return - EINVAL ;
417418
418419 qp -> llp_stream_handle = (void * )(-1 );
@@ -446,6 +447,208 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
446447 return 0 ;
447448}
448449
450+ /**
451+ * irdma_sc_srq_init - init sc_srq structure
452+ * @srq: srq sc struct
453+ * @info: parameters for srq init
454+ */
455+ int irdma_sc_srq_init (struct irdma_sc_srq * srq ,
456+ struct irdma_srq_init_info * info )
457+ {
458+ u32 srq_size_quanta ;
459+ int ret_code ;
460+
461+ ret_code = irdma_uk_srq_init (& srq -> srq_uk , & info -> srq_uk_init_info );
462+ if (ret_code )
463+ return ret_code ;
464+
465+ srq -> dev = info -> pd -> dev ;
466+ srq -> pd = info -> pd ;
467+ srq -> vsi = info -> vsi ;
468+ srq -> srq_pa = info -> srq_pa ;
469+ srq -> first_pm_pbl_idx = info -> first_pm_pbl_idx ;
470+ srq -> pasid = info -> pasid ;
471+ srq -> pasid_valid = info -> pasid_valid ;
472+ srq -> srq_limit = info -> srq_limit ;
473+ srq -> leaf_pbl_size = info -> leaf_pbl_size ;
474+ srq -> virtual_map = info -> virtual_map ;
475+ srq -> tph_en = info -> tph_en ;
476+ srq -> arm_limit_event = info -> arm_limit_event ;
477+ srq -> tph_val = info -> tph_value ;
478+ srq -> shadow_area_pa = info -> shadow_area_pa ;
479+
480+ /* Smallest SRQ size is 256B i.e. 8 quanta */
481+ srq_size_quanta = max ((u32 )IRDMA_SRQ_MIN_QUANTA ,
482+ srq -> srq_uk .srq_size *
483+ srq -> srq_uk .wqe_size_multiplier );
484+ srq -> hw_srq_size = irdma_get_encoded_wqe_size (srq_size_quanta ,
485+ IRDMA_QUEUE_TYPE_SRQ );
486+
487+ return 0 ;
488+ }
489+
490+ /**
491+ * irdma_sc_srq_create - send srq create CQP WQE
492+ * @srq: srq sc struct
493+ * @scratch: u64 saved to be used during cqp completion
494+ * @post_sq: flag for cqp db to ring
495+ */
496+ static int irdma_sc_srq_create (struct irdma_sc_srq * srq , u64 scratch ,
497+ bool post_sq )
498+ {
499+ struct irdma_sc_cqp * cqp ;
500+ __le64 * wqe ;
501+ u64 hdr ;
502+
503+ cqp = srq -> pd -> dev -> cqp ;
504+ if (srq -> srq_uk .srq_id < cqp -> dev -> hw_attrs .min_hw_srq_id ||
505+ srq -> srq_uk .srq_id >
506+ (cqp -> dev -> hmc_info -> hmc_obj [IRDMA_HMC_IW_SRQ ].max_cnt - 1 ))
507+ return - EINVAL ;
508+
509+ wqe = irdma_sc_cqp_get_next_send_wqe (cqp , scratch );
510+ if (!wqe )
511+ return - ENOMEM ;
512+
513+ set_64bit_val (wqe , 0 ,
514+ FIELD_PREP (IRDMA_CQPSQ_SRQ_SRQ_LIMIT , srq -> srq_limit ) |
515+ FIELD_PREP (IRDMA_CQPSQ_SRQ_RQSIZE , srq -> hw_srq_size ) |
516+ FIELD_PREP (IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE , srq -> srq_uk .wqe_size ));
517+ set_64bit_val (wqe , 8 , (uintptr_t )srq );
518+ set_64bit_val (wqe , 16 ,
519+ FIELD_PREP (IRDMA_CQPSQ_SRQ_PD_ID , srq -> pd -> pd_id ));
520+ set_64bit_val (wqe , 32 ,
521+ FIELD_PREP (IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR ,
522+ srq -> srq_pa >>
523+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S ));
524+ set_64bit_val (wqe , 40 ,
525+ FIELD_PREP (IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR ,
526+ srq -> shadow_area_pa >>
527+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S ));
528+ set_64bit_val (wqe , 48 ,
529+ FIELD_PREP (IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX ,
530+ srq -> first_pm_pbl_idx ));
531+
532+ hdr = srq -> srq_uk .srq_id |
533+ FIELD_PREP (IRDMA_CQPSQ_OPCODE , IRDMA_CQP_OP_CREATE_SRQ ) |
534+ FIELD_PREP (IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE , srq -> leaf_pbl_size ) |
535+ FIELD_PREP (IRDMA_CQPSQ_SRQ_VIRTMAP , srq -> virtual_map ) |
536+ FIELD_PREP (IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT ,
537+ srq -> arm_limit_event ) |
538+ FIELD_PREP (IRDMA_CQPSQ_WQEVALID , cqp -> polarity );
539+
540+ dma_wmb (); /* make sure WQE is written before valid bit is set */
541+
542+ set_64bit_val (wqe , 24 , hdr );
543+
544+ print_hex_dump_debug ("WQE: SRQ_CREATE WQE" , DUMP_PREFIX_OFFSET , 16 , 8 ,
545+ wqe , IRDMA_CQP_WQE_SIZE * 8 , false);
546+ if (post_sq )
547+ irdma_sc_cqp_post_sq (cqp );
548+
549+ return 0 ;
550+ }
551+
552+ /**
553+ * irdma_sc_srq_modify - send modify_srq CQP WQE
554+ * @srq: srq sc struct
555+ * @info: parameters for srq modification
556+ * @scratch: u64 saved to be used during cqp completion
557+ * @post_sq: flag for cqp db to ring
558+ */
559+ static int irdma_sc_srq_modify (struct irdma_sc_srq * srq ,
560+ struct irdma_modify_srq_info * info , u64 scratch ,
561+ bool post_sq )
562+ {
563+ struct irdma_sc_cqp * cqp ;
564+ __le64 * wqe ;
565+ u64 hdr ;
566+
567+ cqp = srq -> dev -> cqp ;
568+ if (srq -> srq_uk .srq_id < cqp -> dev -> hw_attrs .min_hw_srq_id ||
569+ srq -> srq_uk .srq_id >
570+ (cqp -> dev -> hmc_info -> hmc_obj [IRDMA_HMC_IW_SRQ ].max_cnt - 1 ))
571+ return - EINVAL ;
572+
573+ wqe = irdma_sc_cqp_get_next_send_wqe (cqp , scratch );
574+ if (!wqe )
575+ return - ENOMEM ;
576+
577+ set_64bit_val (wqe , 0 ,
578+ FIELD_PREP (IRDMA_CQPSQ_SRQ_SRQ_LIMIT , info -> srq_limit ) |
579+ FIELD_PREP (IRDMA_CQPSQ_SRQ_RQSIZE , srq -> hw_srq_size ) |
580+ FIELD_PREP (IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE , srq -> srq_uk .wqe_size ));
581+ set_64bit_val (wqe , 8 ,
582+ FIELD_PREP (IRDMA_CQPSQ_SRQ_SRQCTX , srq -> srq_uk .srq_id ));
583+ set_64bit_val (wqe , 16 ,
584+ FIELD_PREP (IRDMA_CQPSQ_SRQ_PD_ID , srq -> pd -> pd_id ));
585+ set_64bit_val (wqe , 32 ,
586+ FIELD_PREP (IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR ,
587+ srq -> srq_pa >>
588+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S ));
589+ set_64bit_val (wqe , 40 ,
590+ FIELD_PREP (IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR ,
591+ srq -> shadow_area_pa >>
592+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S ));
593+ set_64bit_val (wqe , 48 ,
594+ FIELD_PREP (IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX ,
595+ srq -> first_pm_pbl_idx ));
596+
597+ hdr = srq -> srq_uk .srq_id |
598+ FIELD_PREP (IRDMA_CQPSQ_OPCODE , IRDMA_CQP_OP_MODIFY_SRQ ) |
599+ FIELD_PREP (IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE , srq -> leaf_pbl_size ) |
600+ FIELD_PREP (IRDMA_CQPSQ_SRQ_VIRTMAP , srq -> virtual_map ) |
601+ FIELD_PREP (IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT ,
602+ info -> arm_limit_event ) |
603+ FIELD_PREP (IRDMA_CQPSQ_WQEVALID , cqp -> polarity );
604+ dma_wmb (); /* make sure WQE is written before valid bit is set */
605+
606+ set_64bit_val (wqe , 24 , hdr );
607+
608+ print_hex_dump_debug ("WQE: SRQ_MODIFY WQE" , DUMP_PREFIX_OFFSET , 16 , 8 ,
609+ wqe , IRDMA_CQP_WQE_SIZE * 8 , false);
610+ if (post_sq )
611+ irdma_sc_cqp_post_sq (cqp );
612+
613+ return 0 ;
614+ }
615+
616+ /**
617+ * irdma_sc_srq_destroy - send srq_destroy CQP WQE
618+ * @srq: srq sc struct
619+ * @scratch: u64 saved to be used during cqp completion
620+ * @post_sq: flag for cqp db to ring
621+ */
622+ static int irdma_sc_srq_destroy (struct irdma_sc_srq * srq , u64 scratch ,
623+ bool post_sq )
624+ {
625+ struct irdma_sc_cqp * cqp ;
626+ __le64 * wqe ;
627+ u64 hdr ;
628+
629+ cqp = srq -> dev -> cqp ;
630+
631+ wqe = irdma_sc_cqp_get_next_send_wqe (cqp , scratch );
632+ if (!wqe )
633+ return - ENOMEM ;
634+
635+ set_64bit_val (wqe , 8 , (uintptr_t )srq );
636+
637+ hdr = srq -> srq_uk .srq_id |
638+ FIELD_PREP (IRDMA_CQPSQ_OPCODE , IRDMA_CQP_OP_DESTROY_SRQ ) |
639+ FIELD_PREP (IRDMA_CQPSQ_WQEVALID , cqp -> polarity );
640+ dma_wmb (); /* make sure WQE is written before valid bit is set */
641+
642+ set_64bit_val (wqe , 24 , hdr );
643+
644+ print_hex_dump_debug ("WQE: SRQ_DESTROY WQE" , DUMP_PREFIX_OFFSET , 16 ,
645+ 8 , wqe , IRDMA_CQP_WQE_SIZE * 8 , false);
646+ if (post_sq )
647+ irdma_sc_cqp_post_sq (cqp );
648+
649+ return 0 ;
650+ }
651+
449652/**
450653 * irdma_sc_qp_create - create qp
451654 * @qp: sc qp
@@ -837,6 +1040,7 @@ static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
8371040 FIELD_PREP (IRDMAQPC_ISQP1 , roce_info -> is_qp1 ) |
8381041 FIELD_PREP (IRDMAQPC_ROCE_TVER , roce_info -> roce_tver ) |
8391042 FIELD_PREP (IRDMAQPC_IPV4 , udp -> ipv4 ) |
1043+ FIELD_PREP (IRDMAQPC_USE_SRQ , !qp -> qp_uk .srq_uk ? 0 : 1 ) |
8401044 FIELD_PREP (IRDMAQPC_INSERTVLANTAG , udp -> insert_vlan_tag );
8411045 set_64bit_val (qp_ctx , 0 , qw0 );
8421046 set_64bit_val (qp_ctx , 8 , qp -> sq_pa );
@@ -921,6 +1125,9 @@ static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
9211125 FIELD_PREP (IRDMAQPC_LOCAL_IPADDR0 , udp -> local_ipaddr [0 ]));
9221126 set_64bit_val (qp_ctx , 200 ,
9231127 FIELD_PREP (IRDMAQPC_THIGH , roce_info -> t_high ) |
1128+ FIELD_PREP (IRDMAQPC_SRQ_ID ,
1129+ !qp -> qp_uk .srq_uk ?
1130+ 0 : qp -> qp_uk .srq_uk -> srq_id ) |
9241131 FIELD_PREP (IRDMAQPC_TLOW , roce_info -> t_low ));
9251132 set_64bit_val (qp_ctx , 208 , roce_info -> pd_id |
9261133 FIELD_PREP (IRDMAQPC_STAT_INDEX_GEN3 , info -> stats_idx ) |
@@ -2219,6 +2426,14 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
22192426{
22202427 u8 encoded_size = 0 ;
22212428
2429+ if (queue_type == IRDMA_QUEUE_TYPE_SRQ ) {
2430+ /* Smallest SRQ size is 256B (8 quanta) that gets
2431+ * encoded to 0.
2432+ */
2433+ encoded_size = ilog2 (wqsize ) - 3 ;
2434+
2435+ return encoded_size ;
2436+ }
22222437 /* cqp sq's hw coded value starts from 1 for size of 4
22232438 * while it starts from 0 for qp' wq's.
22242439 */
@@ -4585,7 +4800,7 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
45854800 case IRDMA_AE_SRQ_LIMIT :
45864801 info -> srq = true;
45874802 /* [63:6] from CMPL_CTXT, [5:0] from WQDESCIDX. */
4588- info -> compl_ctx = compl_ctx | info -> wqe_idx ;
4803+ info -> compl_ctx = compl_ctx ;
45894804 ae_src = IRDMA_AE_SOURCE_RSVD ;
45904805 break ;
45914806 case IRDMA_AE_PRIV_OPERATION_DENIED :
@@ -6161,6 +6376,22 @@ static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
61616376 & pcmdinfo -> in .u .mc_modify .info ,
61626377 pcmdinfo -> in .u .mc_modify .scratch );
61636378 break ;
6379+ case IRDMA_OP_SRQ_CREATE :
6380+ status = irdma_sc_srq_create (pcmdinfo -> in .u .srq_create .srq ,
6381+ pcmdinfo -> in .u .srq_create .scratch ,
6382+ pcmdinfo -> post_sq );
6383+ break ;
6384+ case IRDMA_OP_SRQ_MODIFY :
6385+ status = irdma_sc_srq_modify (pcmdinfo -> in .u .srq_modify .srq ,
6386+ & pcmdinfo -> in .u .srq_modify .info ,
6387+ pcmdinfo -> in .u .srq_modify .scratch ,
6388+ pcmdinfo -> post_sq );
6389+ break ;
6390+ case IRDMA_OP_SRQ_DESTROY :
6391+ status = irdma_sc_srq_destroy (pcmdinfo -> in .u .srq_destroy .srq ,
6392+ pcmdinfo -> in .u .srq_destroy .scratch ,
6393+ pcmdinfo -> post_sq );
6394+ break ;
61646395 default :
61656396 status = - EOPNOTSUPP ;
61666397 break ;
@@ -6318,6 +6549,7 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
63186549 dev -> protocol_used = info -> protocol_used ;
63196550 /* Setup the hardware limits, hmc may limit further */
63206551 dev -> hw_attrs .min_hw_qp_id = IRDMA_MIN_IW_QP_ID ;
6552+ dev -> hw_attrs .min_hw_srq_id = IRDMA_MIN_IW_SRQ_ID ;
63216553 dev -> hw_attrs .min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES ;
63226554 if (dev -> hw_attrs .uk_attrs .hw_rev >= IRDMA_GEN_3 )
63236555 dev -> hw_attrs .max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES_GEN_3 ;
0 commit comments