@@ -215,8 +215,134 @@ arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
215215 return & nested_domain -> domain ;
216216}
217217
218+ static int arm_vsmmu_vsid_to_sid (struct arm_vsmmu * vsmmu , u32 vsid , u32 * sid )
219+ {
220+ struct arm_smmu_master * master ;
221+ struct device * dev ;
222+ int ret = 0 ;
223+
224+ xa_lock (& vsmmu -> core .vdevs );
225+ dev = iommufd_viommu_find_dev (& vsmmu -> core , (unsigned long )vsid );
226+ if (!dev ) {
227+ ret = - EIO ;
228+ goto unlock ;
229+ }
230+ master = dev_iommu_priv_get (dev );
231+
232+ /* At this moment, iommufd only supports PCI device that has one SID */
233+ if (sid )
234+ * sid = master -> streams [0 ].id ;
235+ unlock :
236+ xa_unlock (& vsmmu -> core .vdevs );
237+ return ret ;
238+ }
239+
240+ /* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */
241+ struct arm_vsmmu_invalidation_cmd {
242+ union {
243+ u64 cmd [2 ];
244+ struct iommu_viommu_arm_smmuv3_invalidate ucmd ;
245+ };
246+ };
247+
248+ /*
249+ * Convert, in place, the raw invalidation command into an internal format that
250+ * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
251+ * stored in CPU endian.
252+ *
253+ * Enforce the VMID or SID on the command.
254+ */
255+ static int arm_vsmmu_convert_user_cmd (struct arm_vsmmu * vsmmu ,
256+ struct arm_vsmmu_invalidation_cmd * cmd )
257+ {
258+ /* Commands are le64 stored in u64 */
259+ cmd -> cmd [0 ] = le64_to_cpu (cmd -> ucmd .cmd [0 ]);
260+ cmd -> cmd [1 ] = le64_to_cpu (cmd -> ucmd .cmd [1 ]);
261+
262+ switch (cmd -> cmd [0 ] & CMDQ_0_OP ) {
263+ case CMDQ_OP_TLBI_NSNH_ALL :
264+ /* Convert to NH_ALL */
265+ cmd -> cmd [0 ] = CMDQ_OP_TLBI_NH_ALL |
266+ FIELD_PREP (CMDQ_TLBI_0_VMID , vsmmu -> vmid );
267+ cmd -> cmd [1 ] = 0 ;
268+ break ;
269+ case CMDQ_OP_TLBI_NH_VA :
270+ case CMDQ_OP_TLBI_NH_VAA :
271+ case CMDQ_OP_TLBI_NH_ALL :
272+ case CMDQ_OP_TLBI_NH_ASID :
273+ cmd -> cmd [0 ] &= ~CMDQ_TLBI_0_VMID ;
274+ cmd -> cmd [0 ] |= FIELD_PREP (CMDQ_TLBI_0_VMID , vsmmu -> vmid );
275+ break ;
276+ case CMDQ_OP_ATC_INV :
277+ case CMDQ_OP_CFGI_CD :
278+ case CMDQ_OP_CFGI_CD_ALL : {
279+ u32 sid , vsid = FIELD_GET (CMDQ_CFGI_0_SID , cmd -> cmd [0 ]);
280+
281+ if (arm_vsmmu_vsid_to_sid (vsmmu , vsid , & sid ))
282+ return - EIO ;
283+ cmd -> cmd [0 ] &= ~CMDQ_CFGI_0_SID ;
284+ cmd -> cmd [0 ] |= FIELD_PREP (CMDQ_CFGI_0_SID , sid );
285+ break ;
286+ }
287+ default :
288+ return - EIO ;
289+ }
290+ return 0 ;
291+ }
292+
293+ static int arm_vsmmu_cache_invalidate (struct iommufd_viommu * viommu ,
294+ struct iommu_user_data_array * array )
295+ {
296+ struct arm_vsmmu * vsmmu = container_of (viommu , struct arm_vsmmu , core );
297+ struct arm_smmu_device * smmu = vsmmu -> smmu ;
298+ struct arm_vsmmu_invalidation_cmd * last ;
299+ struct arm_vsmmu_invalidation_cmd * cmds ;
300+ struct arm_vsmmu_invalidation_cmd * cur ;
301+ struct arm_vsmmu_invalidation_cmd * end ;
302+ int ret ;
303+
304+ cmds = kcalloc (array -> entry_num , sizeof (* cmds ), GFP_KERNEL );
305+ if (!cmds )
306+ return - ENOMEM ;
307+ cur = cmds ;
308+ end = cmds + array -> entry_num ;
309+
310+ static_assert (sizeof (* cmds ) == 2 * sizeof (u64 ));
311+ ret = iommu_copy_struct_from_full_user_array (
312+ cmds , sizeof (* cmds ), array ,
313+ IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 );
314+ if (ret )
315+ goto out ;
316+
317+ last = cmds ;
318+ while (cur != end ) {
319+ ret = arm_vsmmu_convert_user_cmd (vsmmu , cur );
320+ if (ret )
321+ goto out ;
322+
323+ /* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
324+ cur ++ ;
325+ if (cur != end && (cur - last ) != CMDQ_BATCH_ENTRIES - 1 )
326+ continue ;
327+
328+ /* FIXME always uses the main cmdq rather than trying to group by type */
329+ ret = arm_smmu_cmdq_issue_cmdlist (smmu , & smmu -> cmdq , last -> cmd ,
330+ cur - last , true);
331+ if (ret ) {
332+ cur -- ;
333+ goto out ;
334+ }
335+ last = cur ;
336+ }
337+ out :
338+ array -> entry_num = cur - cmds ;
339+ kfree (cmds );
340+ return ret ;
341+ }
342+
218343static const struct iommufd_viommu_ops arm_vsmmu_ops = {
219344 .alloc_domain_nested = arm_vsmmu_alloc_domain_nested ,
345+ .cache_invalidate = arm_vsmmu_cache_invalidate ,
220346};
221347
222348struct iommufd_viommu * arm_vsmmu_alloc (struct device * dev ,
@@ -239,6 +365,14 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
239365 if (s2_parent -> smmu != master -> smmu )
240366 return ERR_PTR (- EINVAL );
241367
368+ /*
369+ * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW
370+ * defect is needed to determine if arm_vsmmu_cache_invalidate() needs
371+ * any change to remove this.
372+ */
373+ if (WARN_ON (smmu -> options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC ))
374+ return ERR_PTR (- EOPNOTSUPP );
375+
242376 /*
243377 * Must support some way to prevent the VM from bypassing the cache
244378 * because VFIO currently does not do any cache maintenance. canwbs
0 commit comments