@@ -572,6 +572,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
572572 * @size: Size of buffer in bytes
573573 * @dma_handle: Out argument for allocated DMA handle
574574 * @gfp: Allocation flags
575+ * @prot: pgprot_t to use for the remapped mapping
575576 * @attrs: DMA attributes for this allocation
576577 *
577578 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
@@ -580,14 +581,14 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
580581 * Return: Mapped virtual address, or NULL on failure.
581582 */
582583static void * iommu_dma_alloc_remap (struct device * dev , size_t size ,
583- dma_addr_t * dma_handle , gfp_t gfp , unsigned long attrs )
584+ dma_addr_t * dma_handle , gfp_t gfp , pgprot_t prot ,
585+ unsigned long attrs )
584586{
585587 struct iommu_domain * domain = iommu_get_dma_domain (dev );
586588 struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
587589 struct iova_domain * iovad = & cookie -> iovad ;
588590 bool coherent = dev_is_dma_coherent (dev );
589591 int ioprot = dma_info_to_prot (DMA_BIDIRECTIONAL , coherent , attrs );
590- pgprot_t prot = dma_pgprot (dev , PAGE_KERNEL , attrs );
591592 unsigned int count , min_size , alloc_sizes = domain -> pgsize_bitmap ;
592593 struct page * * pages ;
593594 struct sg_table sgt ;
@@ -1030,8 +1031,10 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
10301031 gfp |= __GFP_ZERO ;
10311032
10321033 if (IS_ENABLED (CONFIG_DMA_REMAP ) && gfpflags_allow_blocking (gfp ) &&
1033- !(attrs & DMA_ATTR_FORCE_CONTIGUOUS ))
1034- return iommu_dma_alloc_remap (dev , size , handle , gfp , attrs );
1034+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS )) {
1035+ return iommu_dma_alloc_remap (dev , size , handle , gfp ,
1036+ dma_pgprot (dev , PAGE_KERNEL , attrs ), attrs );
1037+ }
10351038
10361039 if (IS_ENABLED (CONFIG_DMA_DIRECT_REMAP ) &&
10371040 !gfpflags_allow_blocking (gfp ) && !coherent )
@@ -1052,6 +1055,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
10521055 return cpu_addr ;
10531056}
10541057
1058+ #ifdef CONFIG_DMA_REMAP
1059+ static void * iommu_dma_alloc_noncoherent (struct device * dev , size_t size ,
1060+ dma_addr_t * handle , enum dma_data_direction dir , gfp_t gfp )
1061+ {
1062+ if (!gfpflags_allow_blocking (gfp )) {
1063+ struct page * page ;
1064+
1065+ page = dma_common_alloc_pages (dev , size , handle , dir , gfp );
1066+ if (!page )
1067+ return NULL ;
1068+ return page_address (page );
1069+ }
1070+
1071+ return iommu_dma_alloc_remap (dev , size , handle , gfp | __GFP_ZERO ,
1072+ PAGE_KERNEL , 0 );
1073+ }
1074+
1075+ static void iommu_dma_free_noncoherent (struct device * dev , size_t size ,
1076+ void * cpu_addr , dma_addr_t handle , enum dma_data_direction dir )
1077+ {
1078+ __iommu_dma_unmap (dev , handle , size );
1079+ __iommu_dma_free (dev , size , cpu_addr );
1080+ }
1081+ #else
1082+ #define iommu_dma_alloc_noncoherent NULL
1083+ #define iommu_dma_free_noncoherent NULL
1084+ #endif /* CONFIG_DMA_REMAP */
1085+
10551086static int iommu_dma_mmap (struct device * dev , struct vm_area_struct * vma ,
10561087 void * cpu_addr , dma_addr_t dma_addr , size_t size ,
10571088 unsigned long attrs )
@@ -1122,6 +1153,8 @@ static const struct dma_map_ops iommu_dma_ops = {
11221153 .free = iommu_dma_free ,
11231154 .alloc_pages = dma_common_alloc_pages ,
11241155 .free_pages = dma_common_free_pages ,
1156+ .alloc_noncoherent = iommu_dma_alloc_noncoherent ,
1157+ .free_noncoherent = iommu_dma_free_noncoherent ,
11251158 .mmap = iommu_dma_mmap ,
11261159 .get_sgtable = iommu_dma_get_sgtable ,
11271160 .map_page = iommu_dma_map_page ,
0 commit comments