@@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
8282 pm_runtime_put_autosuspend (smmu -> dev );
8383}
8484
85+ static void arm_smmu_rpm_use_autosuspend (struct arm_smmu_device * smmu )
86+ {
87+ /*
88+ * Setup an autosuspend delay to avoid bouncing runpm state.
89+ * Otherwise, if a driver for a suspended consumer device
90+ * unmaps buffers, it will runpm resume/suspend for each one.
91+ *
92+ * For example, when used by a GPU device, when an application
93+ * or game exits, it can trigger unmapping 100s or 1000s of
94+ * buffers. With a runpm cycle for each buffer, that adds up
95+ * to 5-10sec worth of reprogramming the context bank, while
96+ * the system appears to be locked up to the user.
97+ */
98+ pm_runtime_set_autosuspend_delay (smmu -> dev , 20 );
99+ pm_runtime_use_autosuspend (smmu -> dev );
100+ }
101+
85102static struct arm_smmu_domain * to_smmu_domain (struct iommu_domain * dom )
86103{
87104 return container_of (dom , struct arm_smmu_domain , domain );
@@ -624,12 +641,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
624641 if (smmu_domain -> smmu )
625642 goto out_unlock ;
626643
627- if (domain -> type == IOMMU_DOMAIN_IDENTITY ) {
628- smmu_domain -> stage = ARM_SMMU_DOMAIN_BYPASS ;
629- smmu_domain -> smmu = smmu ;
630- goto out_unlock ;
631- }
632-
633644 /*
634645 * Mapping the requested stage onto what we support is surprisingly
635646 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -825,7 +836,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
825836 struct arm_smmu_cfg * cfg = & smmu_domain -> cfg ;
826837 int ret , irq ;
827838
828- if (!smmu || domain -> type == IOMMU_DOMAIN_IDENTITY )
839+ if (!smmu )
829840 return ;
830841
831842 ret = arm_smmu_rpm_get (smmu );
@@ -854,7 +865,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
854865{
855866 struct arm_smmu_domain * smmu_domain ;
856867
857- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY ) {
868+ if (type != IOMMU_DOMAIN_UNMANAGED ) {
858869 if (using_legacy_binding || type != IOMMU_DOMAIN_DMA )
859870 return NULL ;
860871 }
@@ -1145,32 +1156,45 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
11451156 }
11461157
11471158 /* Looks ok, so add the device to the domain */
1148- arm_smmu_master_install_s2crs (cfg ,
1149- smmu_domain -> stage ==
1150- ARM_SMMU_DOMAIN_BYPASS ?
1151- S2CR_TYPE_BYPASS :
1152- S2CR_TYPE_TRANS ,
1159+ arm_smmu_master_install_s2crs (cfg , S2CR_TYPE_TRANS ,
11531160 smmu_domain -> cfg .cbndx , fwspec );
1154-
1155- /*
1156- * Setup an autosuspend delay to avoid bouncing runpm state.
1157- * Otherwise, if a driver for a suspended consumer device
1158- * unmaps buffers, it will runpm resume/suspend for each one.
1159- *
1160- * For example, when used by a GPU device, when an application
1161- * or game exits, it can trigger unmapping 100s or 1000s of
1162- * buffers. With a runpm cycle for each buffer, that adds up
1163- * to 5-10sec worth of reprogramming the context bank, while
1164- * the system appears to be locked up to the user.
1165- */
1166- pm_runtime_set_autosuspend_delay (smmu -> dev , 20 );
1167- pm_runtime_use_autosuspend (smmu -> dev );
1168-
1161+ arm_smmu_rpm_use_autosuspend (smmu );
11691162rpm_put :
11701163 arm_smmu_rpm_put (smmu );
11711164 return ret ;
11721165}
11731166
1167+ static int arm_smmu_attach_dev_identity (struct iommu_domain * domain ,
1168+ struct device * dev )
1169+ {
1170+ struct arm_smmu_master_cfg * cfg = dev_iommu_priv_get (dev );
1171+ struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (dev );
1172+ struct arm_smmu_device * smmu ;
1173+ int ret ;
1174+
1175+ if (!cfg )
1176+ return - ENODEV ;
1177+ smmu = cfg -> smmu ;
1178+
1179+ ret = arm_smmu_rpm_get (smmu );
1180+ if (ret < 0 )
1181+ return ret ;
1182+
1183+ arm_smmu_master_install_s2crs (cfg , S2CR_TYPE_BYPASS , 0 , fwspec );
1184+ arm_smmu_rpm_use_autosuspend (smmu );
1185+ arm_smmu_rpm_put (smmu );
1186+ return 0 ;
1187+ }
1188+
1189+ static const struct iommu_domain_ops arm_smmu_identity_ops = {
1190+ .attach_dev = arm_smmu_attach_dev_identity ,
1191+ };
1192+
1193+ static struct iommu_domain arm_smmu_identity_domain = {
1194+ .type = IOMMU_DOMAIN_IDENTITY ,
1195+ .ops = & arm_smmu_identity_ops ,
1196+ };
1197+
11741198static int arm_smmu_map_pages (struct iommu_domain * domain , unsigned long iova ,
11751199 phys_addr_t paddr , size_t pgsize , size_t pgcount ,
11761200 int prot , gfp_t gfp , size_t * mapped )
@@ -1557,6 +1581,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
15571581}
15581582
15591583static struct iommu_ops arm_smmu_ops = {
1584+ .identity_domain = & arm_smmu_identity_domain ,
15601585 .capable = arm_smmu_capable ,
15611586 .domain_alloc = arm_smmu_domain_alloc ,
15621587 .probe_device = arm_smmu_probe_device ,
0 commit comments