@@ -224,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
224224
225225static bool a6xx_gmu_check_idle_level (struct a6xx_gmu * gmu )
226226{
227- u32 val ;
227+ struct a6xx_gpu * a6xx_gpu = container_of (gmu , struct a6xx_gpu , gmu );
228+ struct adreno_gpu * adreno_gpu = & a6xx_gpu -> base ;
228229 int local = gmu -> idle_level ;
230+ u32 val ;
229231
230232 /* SPTP and IFPC both report as IFPC */
231233 if (gmu -> idle_level == GMU_IDLE_STATE_SPTP )
232234 local = GMU_IDLE_STATE_IFPC ;
233235
234- val = gmu_read (gmu , REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE );
236+ if (adreno_is_a8xx (adreno_gpu ))
237+ val = gmu_read (gmu , REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE );
238+ else
239+ val = gmu_read (gmu , REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE );
235240
236241 if (val == local ) {
237242 if (gmu -> idle_level != GMU_IDLE_STATE_IFPC ||
@@ -269,7 +274,9 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
269274 /* Set the log wptr index
270275 * note: downstream saves the value in poweroff and restores it here
271276 */
272- if (adreno_is_a7xx (adreno_gpu ))
277+ if (adreno_is_a8xx (adreno_gpu ))
278+ gmu_write (gmu , REG_A8XX_GMU_GENERAL_9 , 0 );
279+ else if (adreno_is_a7xx (adreno_gpu ))
273280 gmu_write (gmu , REG_A7XX_GMU_GENERAL_9 , 0 );
274281 else
275282 gmu_write (gmu , REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP , 0 );
@@ -511,18 +518,25 @@ static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
511518 * in the power down sequence not being fully executed. That in turn can
512519 * prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
513520 */
514- if (adreno_is_a7xx (adreno_gpu ) || (adreno_is_a621 (adreno_gpu ) ||
521+ if (adreno_is_a8xx (adreno_gpu ))
522+ gmu_write (gmu , REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF , BIT (0 ));
523+ else if (adreno_is_a7xx (adreno_gpu ) || (adreno_is_a621 (adreno_gpu ) ||
515524 adreno_is_7c3 (adreno_gpu )))
516525 gmu_write (gmu , REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF , BIT (0 ));
517526}
518527
519528/* Let the GMU know that we are about to go into slumber */
520529static int a6xx_gmu_notify_slumber (struct a6xx_gmu * gmu )
521530{
531+ struct a6xx_gpu * a6xx_gpu = container_of (gmu , struct a6xx_gpu , gmu );
532+ struct adreno_gpu * adreno_gpu = & a6xx_gpu -> base ;
522533 int ret ;
523534
524535 /* Disable the power counter so the GMU isn't busy */
525- gmu_write (gmu , REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE , 0 );
536+ if (adreno_is_a8xx (adreno_gpu ))
537+ gmu_write (gmu , REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE , 0 );
538+ else
539+ gmu_write (gmu , REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE , 0 );
526540
527541 /* Disable SPTP_PC if the CPU is responsible for it */
528542 if (gmu -> idle_level < GMU_IDLE_STATE_SPTP )
@@ -615,12 +629,17 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
615629 struct a6xx_gpu * a6xx_gpu = container_of (gmu , struct a6xx_gpu , gmu );
616630 struct adreno_gpu * adreno_gpu = & a6xx_gpu -> base ;
617631 struct platform_device * pdev = to_platform_device (gmu -> dev );
618- void __iomem * pdcptr = devm_platform_ioremap_resource_byname (pdev , "gmu_pdc" );
619632 u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0 ;
620633 void __iomem * seqptr = NULL ;
621634 uint32_t pdc_address_offset ;
635+ void __iomem * pdcptr ;
622636 bool pdc_in_aop = false;
623637
638+ /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */
639+ if (adreno_is_a8xx (adreno_gpu ))
640+ return ;
641+
642+ pdcptr = devm_platform_ioremap_resource_byname (pdev , "gmu_pdc" );
624643 if (IS_ERR (pdcptr ))
625644 return ;
626645
@@ -749,7 +768,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
749768 gmu_write (gmu , REG_A6XX_GMU_DCACHE_CONFIG , 0x1 );
750769
751770 /* A7xx knows better by default! */
752- if (adreno_is_a7xx (adreno_gpu ))
771+ if (adreno_is_a7xx (adreno_gpu ) || adreno_is_a8xx ( adreno_gpu ) )
753772 return ;
754773
755774 gmu_write (gmu , REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL , 0x9c40400 );
@@ -812,7 +831,9 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
812831 u32 itcm_base = 0x00000000 ;
813832 u32 dtcm_base = 0x00040000 ;
814833
815- if (adreno_is_a650_family (adreno_gpu ) || adreno_is_a7xx (adreno_gpu ))
834+ if (adreno_is_a650_family (adreno_gpu ) ||
835+ adreno_is_a7xx (adreno_gpu ) ||
836+ adreno_is_a8xx (adreno_gpu ))
816837 dtcm_base = 0x10004000 ;
817838
818839 if (gmu -> legacy ) {
@@ -876,12 +897,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
876897 if (adreno_is_a650_family (adreno_gpu ) || adreno_is_a7xx (adreno_gpu )) {
877898 gmu_write (gmu , REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF , 1 );
878899 gmu_write (gmu , REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF , 1 );
900+ } else if (adreno_is_a8xx (adreno_gpu )) {
901+ gmu_write (gmu , REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF , 1 );
902+ gmu_write (gmu , REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF , 1 );
879903 }
880904
881905 /* Turn on TCM (Tightly Coupled Memory) retention */
882906 if (adreno_is_a7xx (adreno_gpu ))
883907 a6xx_llc_write (a6xx_gpu , REG_A7XX_CX_MISC_TCM_RET_CNTL , 1 );
884- else
908+ else if (! adreno_is_a8xx ( adreno_gpu ))
885909 gmu_write (gmu , REG_A6XX_GMU_GENERAL_7 , 1 );
886910
887911 ret = a6xx_rpmh_start (gmu );
@@ -906,7 +930,10 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
906930 gmu_write (gmu , REG_A6XX_GMU_HFI_QTBL_ADDR , gmu -> hfi .iova );
907931 gmu_write (gmu , REG_A6XX_GMU_HFI_QTBL_INFO , 1 );
908932
909- if (adreno_is_a7xx (adreno_gpu )) {
933+ if (adreno_is_a8xx (adreno_gpu )) {
934+ fence_range_upper = 0x32 ;
935+ fence_range_lower = 0x8c0 ;
936+ } else if (adreno_is_a7xx (adreno_gpu )) {
910937 fence_range_upper = 0x32 ;
911938 fence_range_lower = 0x8a0 ;
912939 } else {
@@ -940,7 +967,12 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
940967 chipid |= (adreno_gpu -> chip_id << 8 ) & 0x0f00 ; /* patchid */
941968 }
942969
943- if (adreno_is_a7xx (adreno_gpu )) {
970+ if (adreno_is_a8xx (adreno_gpu )) {
971+ gmu_write (gmu , REG_A8XX_GMU_GENERAL_10 , chipid );
972+ gmu_write (gmu , REG_A8XX_GMU_GENERAL_8 ,
973+ (gmu -> log .iova & GENMASK (31 , 12 )) |
974+ ((gmu -> log .size / SZ_4K - 1 ) & GENMASK (7 , 0 )));
975+ } else if (adreno_is_a7xx (adreno_gpu )) {
944976 gmu_write (gmu , REG_A7XX_GMU_GENERAL_10 , chipid );
945977 gmu_write (gmu , REG_A7XX_GMU_GENERAL_8 ,
946978 (gmu -> log .iova & GENMASK (31 , 12 )) |
@@ -1003,7 +1035,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
10031035 u32 val , seqmem_off = 0 ;
10041036
10051037 /* The second spin of A7xx GPUs messed with some register offsets.. */
1006- if (adreno_is_a740_family (adreno_gpu ))
1038+ if (adreno_is_a740_family (adreno_gpu ) || adreno_is_a8xx ( adreno_gpu ) )
10071039 seqmem_off = 4 ;
10081040
10091041 /* Make sure there are no outstanding RPMh votes */
@@ -1016,7 +1048,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
10161048 gmu_poll_timeout_rscc (gmu , REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off ,
10171049 val , (val & 1 ), 100 , 1000 );
10181050
1019- if (!adreno_is_a740_family (adreno_gpu ))
1051+ if (!adreno_is_a740_family (adreno_gpu ) && ! adreno_is_a8xx ( adreno_gpu ) )
10201052 return ;
10211053
10221054 gmu_poll_timeout_rscc (gmu , REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off ,
@@ -1044,7 +1076,10 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
10441076 * Turn off keep alive that might have been enabled by the hang
10451077 * interrupt
10461078 */
1047- gmu_write (& a6xx_gpu -> gmu , REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE , 0 );
1079+ if (adreno_is_a8xx (adreno_gpu ))
1080+ gmu_write (& a6xx_gpu -> gmu , REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE , 0 );
1081+ else
1082+ gmu_write (& a6xx_gpu -> gmu , REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE , 0 );
10481083
10491084 /* Flush all the queues */
10501085 a6xx_hfi_stop (gmu );
@@ -1148,7 +1183,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
11481183 enable_irq (gmu -> gmu_irq );
11491184
11501185 /* Check to see if we are doing a cold or warm boot */
1151- if (adreno_is_a7xx (adreno_gpu )) {
1186+ if (adreno_is_a7xx (adreno_gpu ) || adreno_is_a8xx ( adreno_gpu ) ) {
11521187 status = a6xx_llc_read (a6xx_gpu , REG_A7XX_CX_MISC_TCM_RET_CNTL ) == 1 ?
11531188 GMU_WARM_BOOT : GMU_COLD_BOOT ;
11541189 } else if (gmu -> legacy ) {
@@ -1477,7 +1512,7 @@ static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
14771512 vote = clamp (peak , 1 , BCM_TCS_CMD_VOTE_MASK );
14781513
14791514 /* GMUs on A7xx votes on both x & y */
1480- if (adreno_is_a7xx (adreno_gpu ))
1515+ if (adreno_is_a7xx (adreno_gpu ) || adreno_is_a8xx ( adreno_gpu ) )
14811516 data [bcm_index ] = BCM_TCS_CMD (commit , true, vote , vote );
14821517 else
14831518 data [bcm_index ] = BCM_TCS_CMD (commit , true, 0 , vote );
@@ -2070,13 +2105,14 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
20702105 */
20712106 gmu -> dummy .size = SZ_4K ;
20722107 if (adreno_is_a660_family (adreno_gpu ) ||
2073- adreno_is_a7xx (adreno_gpu )) {
2108+ adreno_is_a7xx (adreno_gpu ) ||
2109+ adreno_is_a8xx (adreno_gpu )) {
20742110 ret = a6xx_gmu_memory_alloc (gmu , & gmu -> debug , SZ_4K * 7 ,
20752111 0x60400000 , "debug" );
20762112 if (ret )
20772113 goto err_memory ;
20782114
2079- gmu -> dummy .size = SZ_8K ;
2115+ gmu -> dummy .size = SZ_16K ;
20802116 }
20812117
20822118 /* Allocate memory for the GMU dummy page */
@@ -2087,7 +2123,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
20872123
20882124 /* Note that a650 family also includes a660 family: */
20892125 if (adreno_is_a650_family (adreno_gpu ) ||
2090- adreno_is_a7xx (adreno_gpu )) {
2126+ adreno_is_a7xx (adreno_gpu ) ||
2127+ adreno_is_a8xx (adreno_gpu )) {
20912128 ret = a6xx_gmu_memory_alloc (gmu , & gmu -> icache ,
20922129 SZ_16M - SZ_16K , 0x04000 , "icache" );
20932130 if (ret )
@@ -2151,6 +2188,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
21512188 ret = - ENODEV ;
21522189 goto err_mmio ;
21532190 }
2191+ } else if (adreno_is_a8xx (adreno_gpu )) {
2192+ gmu -> rscc = gmu -> mmio + 0x19000 ;
21542193 } else {
21552194 gmu -> rscc = gmu -> mmio + 0x23000 ;
21562195 }
0 commit comments