@@ -395,13 +395,32 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
395395 pm_runtime_put_autosuspend (pfdev -> base .dev );
396396}
397397
398+ static void mmu_unmap_range (struct panfrost_mmu * mmu , u64 iova , size_t len )
399+ {
400+ struct io_pgtable_ops * ops = mmu -> pgtbl_ops ;
401+ size_t pgsize , unmapped_len = 0 ;
402+ size_t unmapped_page , pgcount ;
403+
404+ while (unmapped_len < len ) {
405+ pgsize = get_pgsize (iova , len - unmapped_len , & pgcount );
406+
407+ unmapped_page = ops -> unmap_pages (ops , iova , pgsize , pgcount , NULL );
408+ WARN_ON (unmapped_page != pgsize * pgcount );
409+
410+ iova += pgsize * pgcount ;
411+ unmapped_len += pgsize * pgcount ;
412+ }
413+ }
414+
398415static int mmu_map_sg (struct panfrost_device * pfdev , struct panfrost_mmu * mmu ,
399416 u64 iova , int prot , struct sg_table * sgt )
400417{
401418 unsigned int count ;
402419 struct scatterlist * sgl ;
403420 struct io_pgtable_ops * ops = mmu -> pgtbl_ops ;
421+ size_t total_mapped = 0 ;
404422 u64 start_iova = iova ;
423+ int ret ;
405424
406425 for_each_sgtable_dma_sg (sgt , sgl , count ) {
407426 unsigned long paddr = sg_dma_address (sgl );
@@ -415,10 +434,14 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
415434 size_t pgcount , mapped = 0 ;
416435 size_t pgsize = get_pgsize (iova | paddr , len , & pgcount );
417436
418- ops -> map_pages (ops , iova , paddr , pgsize , pgcount , prot ,
437+ ret = ops -> map_pages (ops , iova , paddr , pgsize , pgcount , prot ,
419438 GFP_KERNEL , & mapped );
439+ if (ret )
440+ goto err_unmap_pages ;
441+
420442 /* Don't get stuck if things have gone wrong */
421443 mapped = max (mapped , pgsize );
444+ total_mapped += mapped ;
422445 iova += mapped ;
423446 paddr += mapped ;
424447 len -= mapped ;
@@ -428,6 +451,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
428451 panfrost_mmu_flush_range (pfdev , mmu , start_iova , iova - start_iova );
429452
430453 return 0 ;
454+
455+ err_unmap_pages :
456+ mmu_unmap_range (mmu , start_iova , total_mapped );
457+ return ret ;
431458}
432459
433460int panfrost_mmu_map (struct panfrost_gem_mapping * mapping )
@@ -438,6 +465,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
438465 struct panfrost_device * pfdev = to_panfrost_device (obj -> dev );
439466 struct sg_table * sgt ;
440467 int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE ;
468+ int ret ;
441469
442470 if (WARN_ON (mapping -> active ))
443471 return 0 ;
@@ -449,11 +477,18 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
449477 if (WARN_ON (IS_ERR (sgt )))
450478 return PTR_ERR (sgt );
451479
452- mmu_map_sg (pfdev , mapping -> mmu , mapping -> mmnode .start << PAGE_SHIFT ,
453- prot , sgt );
480+ ret = mmu_map_sg (pfdev , mapping -> mmu , mapping -> mmnode .start << PAGE_SHIFT ,
481+ prot , sgt );
482+ if (ret )
483+ goto err_put_pages ;
484+
454485 mapping -> active = true;
455486
456487 return 0 ;
488+
489+ err_put_pages :
490+ drm_gem_shmem_put_pages_locked (shmem );
491+ return ret ;
457492}
458493
459494void panfrost_mmu_unmap (struct panfrost_gem_mapping * mapping )
@@ -638,8 +673,10 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
638673 if (ret )
639674 goto err_map ;
640675
641- mmu_map_sg (pfdev , bomapping -> mmu , addr ,
642- IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC , sgt );
676+ ret = mmu_map_sg (pfdev , bomapping -> mmu , addr ,
677+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC , sgt );
678+ if (ret )
679+ goto err_mmu_map_sg ;
643680
644681 bomapping -> active = true;
645682 bo -> heap_rss_size += SZ_2M ;
@@ -653,6 +690,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
653690
654691 return 0 ;
655692
693+ err_mmu_map_sg :
694+ dma_unmap_sgtable (pfdev -> base .dev , sgt , DMA_BIDIRECTIONAL , 0 );
656695err_map :
657696 sg_free_table (sgt );
658697err_unlock :
0 commit comments