@@ -345,6 +345,149 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
345345 return phys ;
346346}
347347
348+ static phys_addr_t qcom_smmu_iova_to_phys_hard (struct arm_smmu_domain * smmu_domain , dma_addr_t iova )
349+ {
350+ struct arm_smmu_device * smmu = smmu_domain -> smmu ;
351+ int idx = smmu_domain -> cfg .cbndx ;
352+ u32 frsynra ;
353+ u16 sid ;
354+
355+ frsynra = arm_smmu_gr1_read (smmu , ARM_SMMU_GR1_CBFRSYNRA (idx ));
356+ sid = FIELD_GET (ARM_SMMU_CBFRSYNRA_SID , frsynra );
357+
358+ return qcom_iova_to_phys (smmu_domain , iova , sid );
359+ }
360+
361+ static phys_addr_t qcom_smmu_verify_fault (struct arm_smmu_domain * smmu_domain , dma_addr_t iova , u32 fsr )
362+ {
363+ struct io_pgtable * iop = io_pgtable_ops_to_pgtable (smmu_domain -> pgtbl_ops );
364+ struct arm_smmu_device * smmu = smmu_domain -> smmu ;
365+ phys_addr_t phys_post_tlbiall ;
366+ phys_addr_t phys ;
367+
368+ phys = qcom_smmu_iova_to_phys_hard (smmu_domain , iova );
369+ io_pgtable_tlb_flush_all (iop );
370+ phys_post_tlbiall = qcom_smmu_iova_to_phys_hard (smmu_domain , iova );
371+
372+ if (phys != phys_post_tlbiall ) {
373+ dev_err (smmu -> dev ,
374+ "ATOS results differed across TLBIALL... (before: %pa after: %pa)\n" ,
375+ & phys , & phys_post_tlbiall );
376+ }
377+
378+ return (phys == 0 ? phys_post_tlbiall : phys );
379+ }
380+
381+ irqreturn_t qcom_smmu_context_fault (int irq , void * dev )
382+ {
383+ struct arm_smmu_domain * smmu_domain = dev ;
384+ struct io_pgtable_ops * ops = smmu_domain -> pgtbl_ops ;
385+ struct arm_smmu_device * smmu = smmu_domain -> smmu ;
386+ u32 fsr , fsynr , cbfrsynra , resume = 0 ;
387+ int idx = smmu_domain -> cfg .cbndx ;
388+ phys_addr_t phys_soft ;
389+ unsigned long iova ;
390+ int ret , tmp ;
391+
392+ static DEFINE_RATELIMIT_STATE (_rs ,
393+ DEFAULT_RATELIMIT_INTERVAL ,
394+ DEFAULT_RATELIMIT_BURST );
395+
396+ fsr = arm_smmu_cb_read (smmu , idx , ARM_SMMU_CB_FSR );
397+ if (!(fsr & ARM_SMMU_FSR_FAULT ))
398+ return IRQ_NONE ;
399+
400+ fsynr = arm_smmu_cb_read (smmu , idx , ARM_SMMU_CB_FSYNR0 );
401+ iova = arm_smmu_cb_readq (smmu , idx , ARM_SMMU_CB_FAR );
402+ cbfrsynra = arm_smmu_gr1_read (smmu , ARM_SMMU_GR1_CBFRSYNRA (idx ));
403+
404+ if (list_empty (& tbu_list )) {
405+ ret = report_iommu_fault (& smmu_domain -> domain , NULL , iova ,
406+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ );
407+
408+ if (ret == - ENOSYS )
409+ dev_err_ratelimited (smmu -> dev ,
410+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n" ,
411+ fsr , iova , fsynr , cbfrsynra , idx );
412+
413+ arm_smmu_cb_write (smmu , idx , ARM_SMMU_CB_FSR , fsr );
414+ return IRQ_HANDLED ;
415+ }
416+
417+ phys_soft = ops -> iova_to_phys (ops , iova );
418+
419+ tmp = report_iommu_fault (& smmu_domain -> domain , NULL , iova ,
420+ fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ );
421+ if (!tmp || tmp == - EBUSY ) {
422+ dev_dbg (smmu -> dev ,
423+ "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n" ,
424+ iova , fsr , fsynr , idx );
425+ dev_dbg (smmu -> dev , "soft iova-to-phys=%pa\n" , & phys_soft );
426+ ret = IRQ_HANDLED ;
427+ resume = ARM_SMMU_RESUME_TERMINATE ;
428+ } else {
429+ phys_addr_t phys_atos = qcom_smmu_verify_fault (smmu_domain , iova , fsr );
430+
431+ if (__ratelimit (& _rs )) {
432+ dev_err (smmu -> dev ,
433+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n" ,
434+ fsr , iova , fsynr , cbfrsynra , idx );
435+ dev_err (smmu -> dev ,
436+ "FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n" ,
437+ fsr ,
438+ (fsr & 0x02 ) ? "TF " : "" ,
439+ (fsr & 0x04 ) ? "AFF " : "" ,
440+ (fsr & 0x08 ) ? "PF " : "" ,
441+ (fsr & 0x10 ) ? "EF " : "" ,
442+ (fsr & 0x20 ) ? "TLBMCF " : "" ,
443+ (fsr & 0x40 ) ? "TLBLKF " : "" ,
444+ (fsr & 0x80 ) ? "MHF " : "" ,
445+ (fsr & 0x40000000 ) ? "SS " : "" ,
446+ (fsr & 0x80000000 ) ? "MULTI " : "" ,
447+ cbfrsynra );
448+
449+ dev_err (smmu -> dev ,
450+ "soft iova-to-phys=%pa\n" , & phys_soft );
451+ if (!phys_soft )
452+ dev_err (smmu -> dev ,
453+ "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n" ,
454+ dev_name (smmu -> dev ));
455+ if (phys_atos )
456+ dev_err (smmu -> dev , "hard iova-to-phys (ATOS)=%pa\n" ,
457+ & phys_atos );
458+ else
459+ dev_err (smmu -> dev , "hard iova-to-phys (ATOS) failed\n" );
460+ }
461+ ret = IRQ_NONE ;
462+ resume = ARM_SMMU_RESUME_TERMINATE ;
463+ }
464+
465+ /*
466+ * If the client returns -EBUSY, do not clear FSR and do not RESUME
467+ * if stalled. This is required to keep the IOMMU client stalled on
468+ * the outstanding fault. This gives the client a chance to take any
469+ * debug action and then terminate the stalled transaction.
470+ * So, the sequence in case of stall on fault should be:
471+ * 1) Do not clear FSR or write to RESUME here
472+ * 2) Client takes any debug action
473+ * 3) Client terminates the stalled transaction and resumes the IOMMU
474+ * 4) Client clears FSR. The FSR should only be cleared after 3) and
475+ * not before so that the fault remains outstanding. This ensures
476+ * SCTLR.HUPCF has the desired effect if subsequent transactions also
477+ * need to be terminated.
478+ */
479+ if (tmp != - EBUSY ) {
480+ /* Clear the faulting FSR */
481+ arm_smmu_cb_write (smmu , idx , ARM_SMMU_CB_FSR , fsr );
482+
483+ /* Retry or terminate any stalled transactions */
484+ if (fsr & ARM_SMMU_FSR_SS )
485+ arm_smmu_cb_write (smmu , idx , ARM_SMMU_CB_RESUME , resume );
486+ }
487+
488+ return ret ;
489+ }
490+
348491static int qcom_tbu_probe (struct platform_device * pdev )
349492{
350493 struct of_phandle_args args = { .args_count = 2 };
0 commit comments