@@ -748,8 +748,6 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
748748 */
749749static inline u32 ufshcd_get_intr_mask (struct ufs_hba * hba )
750750{
751- if (hba -> ufs_version == ufshci_version (1 , 0 ))
752- return INTERRUPT_MASK_ALL_VER_10 ;
753751 if (hba -> ufs_version <= ufshci_version (2 , 0 ))
754752 return INTERRUPT_MASK_ALL_VER_11 ;
755753
@@ -990,30 +988,6 @@ bool ufshcd_is_hba_active(struct ufs_hba *hba)
990988}
991989EXPORT_SYMBOL_GPL (ufshcd_is_hba_active );
992990
993- u32 ufshcd_get_local_unipro_ver (struct ufs_hba * hba )
994- {
995- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
996- if (hba -> ufs_version <= ufshci_version (1 , 1 ))
997- return UFS_UNIPRO_VER_1_41 ;
998- else
999- return UFS_UNIPRO_VER_1_6 ;
1000- }
1001- EXPORT_SYMBOL (ufshcd_get_local_unipro_ver );
1002-
1003- static bool ufshcd_is_unipro_pa_params_tuning_req (struct ufs_hba * hba )
1004- {
1005- /*
1006- * If both host and device support UniPro ver1.6 or later, PA layer
1007- * parameters tuning happens during link startup itself.
1008- *
1009- * We can manually tune PA layer parameters if either host or device
1010- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1011- * logic simple, we will only do manual tuning if local unipro version
1012- * doesn't support ver1.6 or later.
1013- */
1014- return ufshcd_get_local_unipro_ver (hba ) < UFS_UNIPRO_VER_1_6 ;
1015- }
1016-
1017991/**
1018992 * ufshcd_pm_qos_init - initialize PM QoS request
1019993 * @hba: per adapter instance
@@ -2674,14 +2648,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
26742648{
26752649 u32 set = ufshcd_readl (hba , REG_INTERRUPT_ENABLE );
26762650
2677- if (hba -> ufs_version == ufshci_version (1 , 0 )) {
2678- u32 rw ;
2679- rw = set & INTERRUPT_MASK_RW_VER_10 ;
2680- set = rw | ((set ^ intrs ) & intrs );
2681- } else {
2682- set |= intrs ;
2683- }
2684-
2651+ set |= intrs ;
26852652 ufshcd_writel (hba , set , REG_INTERRUPT_ENABLE );
26862653}
26872654
@@ -2694,16 +2661,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
26942661{
26952662 u32 set = ufshcd_readl (hba , REG_INTERRUPT_ENABLE );
26962663
2697- if (hba -> ufs_version == ufshci_version (1 , 0 )) {
2698- u32 rw ;
2699- rw = (set & INTERRUPT_MASK_RW_VER_10 ) &
2700- ~(intrs & INTERRUPT_MASK_RW_VER_10 );
2701- set = rw | ((set & intrs ) & ~INTERRUPT_MASK_RW_VER_10 );
2702-
2703- } else {
2704- set &= ~intrs ;
2705- }
2706-
2664+ set &= ~intrs ;
27072665 ufshcd_writel (hba , set , REG_INTERRUPT_ENABLE );
27082666}
27092667
@@ -2715,21 +2673,17 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
27152673 * @upiu_flags: flags required in the header
27162674 * @cmd_dir: requests data direction
27172675 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2718- * @legacy_type: UTP_CMD_TYPE_SCSI or UTP_CMD_TYPE_DEV_MANAGE
27192676 */
27202677static void
27212678ufshcd_prepare_req_desc_hdr (struct ufs_hba * hba , struct ufshcd_lrb * lrbp ,
27222679 u8 * upiu_flags , enum dma_data_direction cmd_dir ,
2723- int ehs_length , enum utp_cmd_type legacy_type )
2680+ int ehs_length )
27242681{
27252682 struct utp_transfer_req_desc * req_desc = lrbp -> utr_descriptor_ptr ;
27262683 struct request_desc_header * h = & req_desc -> header ;
27272684 enum utp_data_direction data_direction ;
27282685
2729- if (hba -> ufs_version <= ufshci_version (1 , 1 ))
2730- lrbp -> command_type = legacy_type ;
2731- else
2732- lrbp -> command_type = UTP_CMD_TYPE_UFS_STORAGE ;
2686+ lrbp -> command_type = UTP_CMD_TYPE_UFS_STORAGE ;
27332687
27342688 * h = (typeof (* h )){ };
27352689
@@ -2863,7 +2817,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
28632817 u8 upiu_flags ;
28642818 int ret = 0 ;
28652819
2866- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 , UTP_CMD_TYPE_DEV_MANAGE );
2820+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 );
28672821
28682822 if (hba -> dev_cmd .type == DEV_CMD_TYPE_QUERY )
28692823 ufshcd_prepare_utp_query_req_upiu (hba , lrbp , upiu_flags );
@@ -2887,8 +2841,7 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
28872841 unsigned int ioprio_class = IOPRIO_PRIO_CLASS (req_get_ioprio (rq ));
28882842 u8 upiu_flags ;
28892843
2890- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags ,
2891- lrbp -> cmd -> sc_data_direction , 0 , UTP_CMD_TYPE_SCSI );
2844+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , lrbp -> cmd -> sc_data_direction , 0 );
28922845 if (ioprio_class == IOPRIO_CLASS_RT )
28932846 upiu_flags |= UPIU_CMD_FLAGS_CP ;
28942847 ufshcd_prepare_utp_scsi_cmd_upiu (lrbp , upiu_flags );
@@ -5559,15 +5512,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
55595512 ufshcd_release_scsi_cmd (hba , lrbp );
55605513 /* Do not touch lrbp after scsi done */
55615514 scsi_done (cmd );
5562- } else if (lrbp -> command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5563- lrbp -> command_type == UTP_CMD_TYPE_UFS_STORAGE ) {
5564- if (hba -> dev_cmd .complete ) {
5565- if (cqe ) {
5566- ocs = le32_to_cpu (cqe -> status ) & MASK_OCS ;
5567- lrbp -> utr_descriptor_ptr -> header .ocs = ocs ;
5568- }
5569- complete (hba -> dev_cmd .complete );
5515+ } else if (hba -> dev_cmd .complete ) {
5516+ if (cqe ) {
5517+ ocs = le32_to_cpu (cqe -> status ) & MASK_OCS ;
5518+ lrbp -> utr_descriptor_ptr -> header .ocs = ocs ;
55705519 }
5520+ complete (hba -> dev_cmd .complete );
55715521 }
55725522}
55735523
@@ -7220,7 +7170,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
72207170
72217171 ufshcd_setup_dev_cmd (hba , lrbp , cmd_type , 0 , tag );
72227172
7223- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 , UTP_CMD_TYPE_DEV_MANAGE );
7173+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 );
72247174
72257175 /* update the task tag in the request upiu */
72267176 req_upiu -> header .task_tag = tag ;
@@ -7372,7 +7322,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
73727322
73737323 ufshcd_setup_dev_cmd (hba , lrbp , DEV_CMD_TYPE_RPMB , UFS_UPIU_RPMB_WLUN , tag );
73747324
7375- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , ehs , UTP_CMD_TYPE_DEV_MANAGE );
7325+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , ehs );
73767326
73777327 /* update the task tag */
73787328 req_upiu -> header .task_tag = tag ;
@@ -8359,83 +8309,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
83598309 dev_info -> model = NULL ;
83608310}
83618311
8362- /**
8363- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8364- * @hba: per-adapter instance
8365- *
8366- * PA_TActivate parameter can be tuned manually if UniPro version is less than
8367- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8368- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8369- * the hibern8 exit latency.
8370- *
8371- * Return: zero on success, non-zero error value on failure.
8372- */
8373- static int ufshcd_tune_pa_tactivate (struct ufs_hba * hba )
8374- {
8375- int ret = 0 ;
8376- u32 peer_rx_min_activatetime = 0 , tuned_pa_tactivate ;
8377-
8378- ret = ufshcd_dme_peer_get (hba ,
8379- UIC_ARG_MIB_SEL (
8380- RX_MIN_ACTIVATETIME_CAPABILITY ,
8381- UIC_ARG_MPHY_RX_GEN_SEL_INDEX (0 )),
8382- & peer_rx_min_activatetime );
8383- if (ret )
8384- goto out ;
8385-
8386- /* make sure proper unit conversion is applied */
8387- tuned_pa_tactivate =
8388- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US )
8389- / PA_TACTIVATE_TIME_UNIT_US );
8390- ret = ufshcd_dme_set (hba , UIC_ARG_MIB (PA_TACTIVATE ),
8391- tuned_pa_tactivate );
8392-
8393- out :
8394- return ret ;
8395- }
8396-
8397- /**
8398- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8399- * @hba: per-adapter instance
8400- *
8401- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8402- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8403- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8404- * This optimal value can help reduce the hibern8 exit latency.
8405- *
8406- * Return: zero on success, non-zero error value on failure.
8407- */
8408- static int ufshcd_tune_pa_hibern8time (struct ufs_hba * hba )
8409- {
8410- int ret = 0 ;
8411- u32 local_tx_hibern8_time_cap = 0 , peer_rx_hibern8_time_cap = 0 ;
8412- u32 max_hibern8_time , tuned_pa_hibern8time ;
8413-
8414- ret = ufshcd_dme_get (hba ,
8415- UIC_ARG_MIB_SEL (TX_HIBERN8TIME_CAPABILITY ,
8416- UIC_ARG_MPHY_TX_GEN_SEL_INDEX (0 )),
8417- & local_tx_hibern8_time_cap );
8418- if (ret )
8419- goto out ;
8420-
8421- ret = ufshcd_dme_peer_get (hba ,
8422- UIC_ARG_MIB_SEL (RX_HIBERN8TIME_CAPABILITY ,
8423- UIC_ARG_MPHY_RX_GEN_SEL_INDEX (0 )),
8424- & peer_rx_hibern8_time_cap );
8425- if (ret )
8426- goto out ;
8427-
8428- max_hibern8_time = max (local_tx_hibern8_time_cap ,
8429- peer_rx_hibern8_time_cap );
8430- /* make sure proper unit conversion is applied */
8431- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US )
8432- / PA_HIBERN8_TIME_UNIT_US );
8433- ret = ufshcd_dme_set (hba , UIC_ARG_MIB (PA_HIBERN8TIME ),
8434- tuned_pa_hibern8time );
8435- out :
8436- return ret ;
8437- }
8438-
84398312/**
84408313 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
84418314 * less than device PA_TACTIVATE time.
@@ -8508,11 +8381,6 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
85088381
85098382static void ufshcd_tune_unipro_params (struct ufs_hba * hba )
85108383{
8511- if (ufshcd_is_unipro_pa_params_tuning_req (hba )) {
8512- ufshcd_tune_pa_tactivate (hba );
8513- ufshcd_tune_pa_hibern8time (hba );
8514- }
8515-
85168384 ufshcd_vops_apply_dev_quirks (hba );
85178385
85188386 if (hba -> dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE )
0 commit comments