Skip to content

Commit 50b149b

Browse files
rleonmszyprow
authored andcommitted
ARM: dma-mapping: Switch to physical address mapping callbacks
Combine resource and page mappings routines to one function, which handles both these flows at the same manner. This conversion allows us to remove .map_resource/.unmap_resource callbacks completely. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-4-3bbfe3a25cdf@kernel.org
1 parent 52c9aa1 commit 50b149b

1 file changed

Lines changed: 23 additions & 77 deletions

File tree

arch/arm/mm/dma-mapping.c

Lines changed: 23 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -732,6 +732,9 @@ static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
732732
if (attrs & DMA_ATTR_PRIVILEGED)
733733
prot |= IOMMU_PRIV;
734734

735+
if (attrs & DMA_ATTR_MMIO)
736+
prot |= IOMMU_MMIO;
737+
735738
switch (dir) {
736739
case DMA_BIDIRECTIONAL:
737740
return prot | IOMMU_READ | IOMMU_WRITE;
@@ -1350,38 +1353,39 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
13501353
}
13511354

13521355
/**
1353-
* arm_iommu_map_page
1356+
* arm_iommu_map_phys
13541357
* @dev: valid struct device pointer
1355-
* @page: page that buffer resides in
1356-
* @offset: offset into page for start of buffer
1358+
* @phys: physical address that buffer resides in
13571359
* @size: size of buffer to map
13581360
* @dir: DMA transfer direction
1361+
* @attrs: DMA mapping attributes
13591362
*
13601363
* IOMMU aware version of arm_dma_map_page()
13611364
*/
1362-
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1363-
unsigned long offset, size_t size, enum dma_data_direction dir,
1364-
unsigned long attrs)
1365+
static dma_addr_t arm_iommu_map_phys(struct device *dev, phys_addr_t phys,
1366+
size_t size, enum dma_data_direction dir, unsigned long attrs)
13651367
{
13661368
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1369+
int len = PAGE_ALIGN(size + offset_in_page(phys));
1370+
phys_addr_t addr = phys & PAGE_MASK;
13671371
dma_addr_t dma_addr;
1368-
int ret, prot, len = PAGE_ALIGN(size + offset);
1372+
int ret, prot;
13691373

1370-
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1371-
arch_sync_dma_for_device(page_to_phys(page), offset, size, dir);
1374+
if (!dev->dma_coherent &&
1375+
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
1376+
arch_sync_dma_for_device(phys, size, dir);
13721377

13731378
dma_addr = __alloc_iova(mapping, len);
13741379
if (dma_addr == DMA_MAPPING_ERROR)
13751380
return dma_addr;
13761381

13771382
prot = __dma_info_to_prot(dir, attrs);
13781383

1379-
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
1380-
prot, GFP_KERNEL);
1384+
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
13811385
if (ret < 0)
13821386
goto fail;
13831387

1384-
return dma_addr + offset;
1388+
return dma_addr + offset_in_page(phys);
13851389
fail:
13861390
__free_iova(mapping, dma_addr, len);
13871391
return DMA_MAPPING_ERROR;
@@ -1393,10 +1397,11 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
13931397
* @handle: DMA address of buffer
13941398
* @size: size of buffer (same as passed to dma_map_page)
13951399
* @dir: DMA transfer direction (same as passed to dma_map_page)
1400+
* @attrs: DMA mapping attributes
13961401
*
1397-
* IOMMU aware version of arm_dma_unmap_page()
1402+
* IOMMU aware version of arm_dma_unmap_phys()
13981403
*/
1399-
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1404+
static void arm_iommu_unmap_phys(struct device *dev, dma_addr_t handle,
14001405
size_t size, enum dma_data_direction dir, unsigned long attrs)
14011406
{
14021407
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
@@ -1407,7 +1412,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
14071412
if (!iova)
14081413
return;
14091414

1410-
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1415+
if (!dev->dma_coherent &&
1416+
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
14111417
phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
14121418

14131419
arch_sync_dma_for_cpu(phys + offset, size, dir);
@@ -1417,63 +1423,6 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
14171423
__free_iova(mapping, iova, len);
14181424
}
14191425

1420-
/**
1421-
* arm_iommu_map_resource - map a device resource for DMA
1422-
* @dev: valid struct device pointer
1423-
* @phys_addr: physical address of resource
1424-
* @size: size of resource to map
1425-
* @dir: DMA transfer direction
1426-
*/
1427-
static dma_addr_t arm_iommu_map_resource(struct device *dev,
1428-
phys_addr_t phys_addr, size_t size,
1429-
enum dma_data_direction dir, unsigned long attrs)
1430-
{
1431-
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1432-
dma_addr_t dma_addr;
1433-
int ret, prot;
1434-
phys_addr_t addr = phys_addr & PAGE_MASK;
1435-
unsigned int offset = phys_addr & ~PAGE_MASK;
1436-
size_t len = PAGE_ALIGN(size + offset);
1437-
1438-
dma_addr = __alloc_iova(mapping, len);
1439-
if (dma_addr == DMA_MAPPING_ERROR)
1440-
return dma_addr;
1441-
1442-
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
1443-
1444-
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
1445-
if (ret < 0)
1446-
goto fail;
1447-
1448-
return dma_addr + offset;
1449-
fail:
1450-
__free_iova(mapping, dma_addr, len);
1451-
return DMA_MAPPING_ERROR;
1452-
}
1453-
1454-
/**
1455-
* arm_iommu_unmap_resource - unmap a device DMA resource
1456-
* @dev: valid struct device pointer
1457-
* @dma_handle: DMA address to resource
1458-
* @size: size of resource to map
1459-
* @dir: DMA transfer direction
1460-
*/
1461-
static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
1462-
size_t size, enum dma_data_direction dir,
1463-
unsigned long attrs)
1464-
{
1465-
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1466-
dma_addr_t iova = dma_handle & PAGE_MASK;
1467-
unsigned int offset = dma_handle & ~PAGE_MASK;
1468-
size_t len = PAGE_ALIGN(size + offset);
1469-
1470-
if (!iova)
1471-
return;
1472-
1473-
iommu_unmap(mapping->domain, iova, len);
1474-
__free_iova(mapping, iova, len);
1475-
}
1476-
14771426
static void arm_iommu_sync_single_for_cpu(struct device *dev,
14781427
dma_addr_t handle, size_t size, enum dma_data_direction dir)
14791428
{
@@ -1510,18 +1459,15 @@ static const struct dma_map_ops iommu_ops = {
15101459
.mmap = arm_iommu_mmap_attrs,
15111460
.get_sgtable = arm_iommu_get_sgtable,
15121461

1513-
.map_page = arm_iommu_map_page,
1514-
.unmap_page = arm_iommu_unmap_page,
1462+
.map_phys = arm_iommu_map_phys,
1463+
.unmap_phys = arm_iommu_unmap_phys,
15151464
.sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
15161465
.sync_single_for_device = arm_iommu_sync_single_for_device,
15171466

15181467
.map_sg = arm_iommu_map_sg,
15191468
.unmap_sg = arm_iommu_unmap_sg,
15201469
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
15211470
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
1522-
1523-
.map_resource = arm_iommu_map_resource,
1524-
.unmap_resource = arm_iommu_unmap_resource,
15251471
};
15261472

15271473
/**

0 commit comments

Comments
 (0)