@@ -1063,6 +1063,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
10631063 bool cd_live ;
10641064 __le64 * cdptr ;
10651065 struct arm_smmu_ctx_desc_cfg * cd_table = & master -> cd_table ;
1066+ struct arm_smmu_device * smmu = master -> smmu ;
10661067
10671068 if (WARN_ON (ssid >= (1 << cd_table -> s1cdmax )))
10681069 return - E2BIG ;
@@ -1077,6 +1078,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
10771078 if (!cd ) { /* (5) */
10781079 val = 0 ;
10791080 } else if (cd == & quiet_cd ) { /* (4) */
1081+ if (!(smmu -> features & ARM_SMMU_FEAT_STALL_FORCE ))
1082+ val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R );
10801083 val |= CTXDESC_CD_0_TCR_EPD0 ;
10811084 } else if (cd_live ) { /* (3) */
10821085 val &= ~CTXDESC_CD_0_ASID ;
@@ -1249,7 +1252,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
12491252}
12501253
12511254static void arm_smmu_write_strtab_ent (struct arm_smmu_master * master , u32 sid ,
1252- __le64 * dst )
1255+ struct arm_smmu_ste * dst )
12531256{
12541257 /*
12551258 * This is hideously complicated, but we only really care about
@@ -1267,31 +1270,25 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
12671270 * 2. Write everything apart from dword 0, sync, write dword 0, sync
12681271 * 3. Update Config, sync
12691272 */
1270- u64 val = le64_to_cpu (dst [0 ]);
1273+ u64 val = le64_to_cpu (dst -> data [0 ]);
12711274 bool ste_live = false;
1272- struct arm_smmu_device * smmu = NULL ;
1275+ struct arm_smmu_device * smmu = master -> smmu ;
12731276 struct arm_smmu_ctx_desc_cfg * cd_table = NULL ;
12741277 struct arm_smmu_s2_cfg * s2_cfg = NULL ;
1275- struct arm_smmu_domain * smmu_domain = NULL ;
1278+ struct arm_smmu_domain * smmu_domain = master -> domain ;
12761279 struct arm_smmu_cmdq_ent prefetch_cmd = {
12771280 .opcode = CMDQ_OP_PREFETCH_CFG ,
12781281 .prefetch = {
12791282 .sid = sid ,
12801283 },
12811284 };
12821285
1283- if (master ) {
1284- smmu_domain = master -> domain ;
1285- smmu = master -> smmu ;
1286- }
1287-
12881286 if (smmu_domain ) {
12891287 switch (smmu_domain -> stage ) {
12901288 case ARM_SMMU_DOMAIN_S1 :
12911289 cd_table = & master -> cd_table ;
12921290 break ;
12931291 case ARM_SMMU_DOMAIN_S2 :
1294- case ARM_SMMU_DOMAIN_NESTED :
12951292 s2_cfg = & smmu_domain -> s2_cfg ;
12961293 break ;
12971294 default :
@@ -1325,10 +1322,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13251322 else
13261323 val |= FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_BYPASS );
13271324
1328- dst [0 ] = cpu_to_le64 (val );
1329- dst [1 ] = cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_SHCFG ,
1325+ dst -> data [0 ] = cpu_to_le64 (val );
1326+ dst -> data [1 ] = cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_SHCFG ,
13301327 STRTAB_STE_1_SHCFG_INCOMING ));
1331- dst [2 ] = 0 ; /* Nuke the VMID */
1328+ dst -> data [2 ] = 0 ; /* Nuke the VMID */
13321329 /*
13331330 * The SMMU can perform negative caching, so we must sync
13341331 * the STE regardless of whether the old value was live.
@@ -1343,7 +1340,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13431340 STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1 ;
13441341
13451342 BUG_ON (ste_live );
1346- dst [1 ] = cpu_to_le64 (
1343+ dst -> data [1 ] = cpu_to_le64 (
13471344 FIELD_PREP (STRTAB_STE_1_S1DSS , STRTAB_STE_1_S1DSS_SSID0 ) |
13481345 FIELD_PREP (STRTAB_STE_1_S1CIR , STRTAB_STE_1_S1C_CACHE_WBRA ) |
13491346 FIELD_PREP (STRTAB_STE_1_S1COR , STRTAB_STE_1_S1C_CACHE_WBRA ) |
@@ -1352,7 +1349,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13521349
13531350 if (smmu -> features & ARM_SMMU_FEAT_STALLS &&
13541351 !master -> stall_enabled )
1355- dst [1 ] |= cpu_to_le64 (STRTAB_STE_1_S1STALLD );
1352+ dst -> data [1 ] |= cpu_to_le64 (STRTAB_STE_1_S1STALLD );
13561353
13571354 val |= (cd_table -> cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK ) |
13581355 FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_S1_TRANS ) |
@@ -1362,7 +1359,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13621359
13631360 if (s2_cfg ) {
13641361 BUG_ON (ste_live );
1365- dst [2 ] = cpu_to_le64 (
1362+ dst -> data [2 ] = cpu_to_le64 (
13661363 FIELD_PREP (STRTAB_STE_2_S2VMID , s2_cfg -> vmid ) |
13671364 FIELD_PREP (STRTAB_STE_2_VTCR , s2_cfg -> vtcr ) |
13681365#ifdef __BIG_ENDIAN
@@ -1371,26 +1368,27 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
13711368 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
13721369 STRTAB_STE_2_S2R );
13731370
1374- dst [3 ] = cpu_to_le64 (s2_cfg -> vttbr & STRTAB_STE_3_S2TTB_MASK );
1371+ dst -> data [3 ] = cpu_to_le64 (s2_cfg -> vttbr & STRTAB_STE_3_S2TTB_MASK );
13751372
13761373 val |= FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_S2_TRANS );
13771374 }
13781375
13791376 if (master -> ats_enabled )
1380- dst [1 ] |= cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_EATS ,
1377+ dst -> data [1 ] |= cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_EATS ,
13811378 STRTAB_STE_1_EATS_TRANS ));
13821379
13831380 arm_smmu_sync_ste_for_sid (smmu , sid );
13841381 /* See comment in arm_smmu_write_ctx_desc() */
1385- WRITE_ONCE (dst [0 ], cpu_to_le64 (val ));
1382+ WRITE_ONCE (dst -> data [0 ], cpu_to_le64 (val ));
13861383 arm_smmu_sync_ste_for_sid (smmu , sid );
13871384
13881385 /* It's likely that we'll want to use the new STE soon */
13891386 if (!(smmu -> options & ARM_SMMU_OPT_SKIP_PREFETCH ))
13901387 arm_smmu_cmdq_issue_cmd (smmu , & prefetch_cmd );
13911388}
13921389
1393- static void arm_smmu_init_bypass_stes (__le64 * strtab , unsigned int nent , bool force )
1390+ static void arm_smmu_init_bypass_stes (struct arm_smmu_ste * strtab ,
1391+ unsigned int nent , bool force )
13941392{
13951393 unsigned int i ;
13961394 u64 val = STRTAB_STE_0_V ;
@@ -1401,11 +1399,11 @@ static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool fo
14011399 val |= FIELD_PREP (STRTAB_STE_0_CFG , STRTAB_STE_0_CFG_BYPASS );
14021400
14031401 for (i = 0 ; i < nent ; ++ i ) {
1404- strtab [0 ] = cpu_to_le64 (val );
1405- strtab [1 ] = cpu_to_le64 (FIELD_PREP (STRTAB_STE_1_SHCFG ,
1406- STRTAB_STE_1_SHCFG_INCOMING ));
1407- strtab [2 ] = 0 ;
1408- strtab += STRTAB_STE_DWORDS ;
1402+ strtab -> data [0 ] = cpu_to_le64 (val );
1403+ strtab -> data [1 ] = cpu_to_le64 (FIELD_PREP (
1404+ STRTAB_STE_1_SHCFG , STRTAB_STE_1_SHCFG_INCOMING ));
1405+ strtab -> data [2 ] = 0 ;
1406+ strtab ++ ;
14091407 }
14101408}
14111409
@@ -2171,7 +2169,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
21712169 fmt = ARM_64_LPAE_S1 ;
21722170 finalise_stage_fn = arm_smmu_domain_finalise_s1 ;
21732171 break ;
2174- case ARM_SMMU_DOMAIN_NESTED :
21752172 case ARM_SMMU_DOMAIN_S2 :
21762173 ias = smmu -> ias ;
21772174 oas = smmu -> oas ;
@@ -2209,26 +2206,23 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
22092206 return 0 ;
22102207}
22112208
2212- static __le64 * arm_smmu_get_step_for_sid (struct arm_smmu_device * smmu , u32 sid )
2209+ static struct arm_smmu_ste *
2210+ arm_smmu_get_step_for_sid (struct arm_smmu_device * smmu , u32 sid )
22132211{
2214- __le64 * step ;
22152212 struct arm_smmu_strtab_cfg * cfg = & smmu -> strtab_cfg ;
22162213
22172214 if (smmu -> features & ARM_SMMU_FEAT_2_LVL_STRTAB ) {
2218- struct arm_smmu_strtab_l1_desc * l1_desc ;
2219- int idx ;
2215+ unsigned int idx1 , idx2 ;
22202216
22212217 /* Two-level walk */
2222- idx = (sid >> STRTAB_SPLIT ) * STRTAB_L1_DESC_DWORDS ;
2223- l1_desc = & cfg -> l1_desc [idx ];
2224- idx = (sid & ((1 << STRTAB_SPLIT ) - 1 )) * STRTAB_STE_DWORDS ;
2225- step = & l1_desc -> l2ptr [idx ];
2218+ idx1 = (sid >> STRTAB_SPLIT ) * STRTAB_L1_DESC_DWORDS ;
2219+ idx2 = sid & ((1 << STRTAB_SPLIT ) - 1 );
2220+ return & cfg -> l1_desc [idx1 ].l2ptr [idx2 ];
22262221 } else {
22272222 /* Simple linear lookup */
2228- step = & cfg -> strtab [sid * STRTAB_STE_DWORDS ];
2223+ return (struct arm_smmu_ste * )& cfg
2224+ -> strtab [sid * STRTAB_STE_DWORDS ];
22292225 }
2230-
2231- return step ;
22322226}
22332227
22342228static void arm_smmu_install_ste_for_dev (struct arm_smmu_master * master )
@@ -2238,7 +2232,8 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
22382232
22392233 for (i = 0 ; i < master -> num_streams ; ++ i ) {
22402234 u32 sid = master -> streams [i ].id ;
2241- __le64 * step = arm_smmu_get_step_for_sid (smmu , sid );
2235+ struct arm_smmu_ste * step =
2236+ arm_smmu_get_step_for_sid (smmu , sid );
22422237
22432238 /* Bridged PCI devices may end up with duplicated IDs */
22442239 for (j = 0 ; j < i ; j ++ )
@@ -2742,7 +2737,7 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
27422737 if (smmu_domain -> smmu )
27432738 ret = - EPERM ;
27442739 else
2745- smmu_domain -> stage = ARM_SMMU_DOMAIN_NESTED ;
2740+ smmu_domain -> stage = ARM_SMMU_DOMAIN_S2 ;
27462741 mutex_unlock (& smmu_domain -> init_mutex );
27472742
27482743 return ret ;
@@ -3769,7 +3764,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
37693764 iort_get_rmr_sids (dev_fwnode (smmu -> dev ), & rmr_list );
37703765
37713766 list_for_each_entry (e , & rmr_list , list ) {
3772- __le64 * step ;
3767+ struct arm_smmu_ste * step ;
37733768 struct iommu_iort_rmr_data * rmr ;
37743769 int ret , i ;
37753770
0 commit comments