@@ -1252,7 +1252,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
12521252}
12531253
12541254static void arm_smmu_write_strtab_ent (struct arm_smmu_master * master , u32 sid ,
1255- __le64 * dst )
1255+ struct arm_smmu_ste * dst )
12561256{
12571257 /*
12581258 * This is hideously complicated, but we only really care about
@@ -1270,7 +1270,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
12701270 * 2. Write everything apart from dword 0, sync, write dword 0, sync
12711271 * 3. Update Config, sync
12721272 */
1273- u64 val = le64_to_cpu (dst [0 ]);
1273+ u64 val = le64_to_cpu (dst -> data [0 ]);
12741274 bool ste_live = false;
12751275 struct arm_smmu_device * smmu = NULL ;
12761276 struct arm_smmu_ctx_desc_cfg * cd_table = NULL ;
@@ -1328,10 +1328,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13281328 else
13291329 val |= FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_BYPASS );
13301330
1331- dst [0 ] = cpu_to_le64 (val );
1332- dst [1 ] = cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_SHCFG ,
1331+ dst -> data [0 ] = cpu_to_le64 (val );
1332+ dst -> data [1 ] = cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_SHCFG ,
13331333 STRTAB_STE_1_SHCFG_INCOMING ));
1334- dst [2 ] = 0 ; /* Nuke the VMID */
1334+ dst -> data [2 ] = 0 ; /* Nuke the VMID */
13351335 /*
13361336 * The SMMU can perform negative caching, so we must sync
13371337 * the STE regardless of whether the old value was live.
@@ -1346,7 +1346,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13461346 STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1 ;
13471347
13481348 BUG_ON (ste_live );
1349- dst [1 ] = cpu_to_le64 (
1349+ dst -> data [1 ] = cpu_to_le64 (
13501350 FIELD_PREP (STRTAB_STE_1_S1DSS , STRTAB_STE_1_S1DSS_SSID0 ) |
13511351 FIELD_PREP (STRTAB_STE_1_S1CIR , STRTAB_STE_1_S1C_CACHE_WBRA ) |
13521352 FIELD_PREP (STRTAB_STE_1_S1COR , STRTAB_STE_1_S1C_CACHE_WBRA ) |
@@ -1355,7 +1355,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13551355
13561356 if (smmu -> features & ARM_SMMU_FEAT_STALLS &&
13571357 !master -> stall_enabled )
1358- dst [1 ] |= cpu_to_le64 (STRTAB_STE_1_S1STALLD );
1358+ dst -> data [1 ] |= cpu_to_le64 (STRTAB_STE_1_S1STALLD );
13591359
13601360 val |= (cd_table -> cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK ) |
13611361 FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_S1_TRANS ) |
@@ -1365,7 +1365,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13651365
13661366 if (s2_cfg ) {
13671367 BUG_ON (ste_live );
1368- dst [2 ] = cpu_to_le64 (
1368+ dst -> data [2 ] = cpu_to_le64 (
13691369 FIELD_PREP (STRTAB_STE_2_S2VMID , s2_cfg -> vmid ) |
13701370 FIELD_PREP (STRTAB_STE_2_VTCR , s2_cfg -> vtcr ) |
13711371#ifdef __BIG_ENDIAN
@@ -1374,26 +1374,27 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13741374 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
13751375 STRTAB_STE_2_S2R );
13761376
1377- dst [3 ] = cpu_to_le64 (s2_cfg -> vttbr & STRTAB_STE_3_S2TTB_MASK );
1377+ dst -> data [3 ] = cpu_to_le64 (s2_cfg -> vttbr & STRTAB_STE_3_S2TTB_MASK );
13781378
13791379 val |= FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_S2_TRANS );
13801380 }
13811381
13821382 if (master -> ats_enabled )
1383- dst [1 ] |= cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_EATS ,
1383+ dst -> data [1 ] |= cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_EATS ,
13841384 STRTAB_STE_1_EATS_TRANS ));
13851385
13861386 arm_smmu_sync_ste_for_sid (smmu , sid );
13871387 /* See comment in arm_smmu_write_ctx_desc() */
1388- WRITE_ONCE (dst [0 ], cpu_to_le64 (val ));
1388+ WRITE_ONCE (dst -> data [0 ], cpu_to_le64 (val ));
13891389 arm_smmu_sync_ste_for_sid (smmu , sid );
13901390
13911391 /* It's likely that we'll want to use the new STE soon */
13921392 if (!(smmu -> options & ARM_SMMU_OPT_SKIP_PREFETCH ))
13931393 arm_smmu_cmdq_issue_cmd (smmu , & prefetch_cmd );
13941394}
13951395
1396- static void arm_smmu_init_bypass_stes (__le64 * strtab , unsigned int nent , bool force )
1396+ static void arm_smmu_init_bypass_stes (struct arm_smmu_ste * strtab ,
1397+ unsigned int nent , bool force )
13971398{
13981399 unsigned int i ;
13991400 u64 val = STRTAB_STE_0_V ;
@@ -1404,11 +1405,11 @@ static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool fo
14041405 val |= FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_BYPASS );
14051406
14061407 for (i = 0 ; i < nent ; ++ i ) {
1407- strtab [0 ] = cpu_to_le64 (val );
1408- strtab [1 ] = cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_SHCFG ,
1409- STRTAB_STE_1_SHCFG_INCOMING ));
1410- strtab [2 ] = 0 ;
1411- strtab += STRTAB_STE_DWORDS ;
1408+ strtab -> data [0 ] = cpu_to_le64 (val );
1409+ strtab -> data [1 ] = cpu_to_le64 (FIELD_PREP (
1410+ STRTAB_STE_1_SHCFG , STRTAB_STE_1_SHCFG_INCOMING ));
1411+ strtab -> data [2 ] = 0 ;
1412+ strtab ++ ;
14121413 }
14131414}
14141415
@@ -2212,26 +2213,23 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
22122213 return 0 ;
22132214}
22142215
2215- static __le64 * arm_smmu_get_step_for_sid (struct arm_smmu_device * smmu , u32 sid )
2216+ static struct arm_smmu_ste *
2217+ arm_smmu_get_step_for_sid (struct arm_smmu_device * smmu , u32 sid )
22162218{
2217- __le64 * step ;
22182219 struct arm_smmu_strtab_cfg * cfg = & smmu -> strtab_cfg ;
22192220
22202221 if (smmu -> features & ARM_SMMU_FEAT_2_LVL_STRTAB ) {
2221- struct arm_smmu_strtab_l1_desc * l1_desc ;
2222- int idx ;
2222+ unsigned int idx1 , idx2 ;
22232223
22242224 /* Two-level walk */
2225- idx = (sid >> STRTAB_SPLIT ) * STRTAB_L1_DESC_DWORDS ;
2226- l1_desc = & cfg -> l1_desc [idx ];
2227- idx = (sid & ((1 << STRTAB_SPLIT ) - 1 )) * STRTAB_STE_DWORDS ;
2228- step = & l1_desc -> l2ptr [idx ];
2225+ idx1 = (sid >> STRTAB_SPLIT ) * STRTAB_L1_DESC_DWORDS ;
2226+ idx2 = sid & ((1 << STRTAB_SPLIT ) - 1 );
2227+ return & cfg -> l1_desc [idx1 ].l2ptr [idx2 ];
22292228 } else {
22302229 /* Simple linear lookup */
2231- step = & cfg -> strtab [sid * STRTAB_STE_DWORDS ];
2230+ return (struct arm_smmu_ste * )& cfg
2231+ -> strtab [sid * STRTAB_STE_DWORDS ];
22322232 }
2233-
2234- return step ;
22352233}
22362234
22372235static void arm_smmu_install_ste_for_dev (struct arm_smmu_master * master )
@@ -2241,7 +2239,8 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
22412239
22422240 for (i = 0 ; i < master -> num_streams ; ++ i ) {
22432241 u32 sid = master -> streams [i ].id ;
2244- __le64 * step = arm_smmu_get_step_for_sid (smmu , sid );
2242+ struct arm_smmu_ste * step =
2243+ arm_smmu_get_step_for_sid (smmu , sid );
22452244
22462245 /* Bridged PCI devices may end up with duplicated IDs */
22472246 for (j = 0 ; j < i ; j ++ )
@@ -3772,7 +3771,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
37723771 iort_get_rmr_sids (dev_fwnode (smmu -> dev ), & rmr_list );
37733772
37743773 list_for_each_entry (e , & rmr_list , list ) {
3775- __le64 * step ;
3774+ struct arm_smmu_ste * step ;
37763775 struct iommu_iort_rmr_data * rmr ;
37773776 int ret , i ;
37783777
0 commit comments