Skip to content

Commit 52c9aa1

Browse files
rleonmszyprow
authored andcommitted
ARM: dma-mapping: Reduce struct page exposure in arch_sync_dma*()
As a preparation to changing from .map_page to use .map_phys DMA callbacks, convert arch_sync_dma*() functions to use physical addresses instead of struct page. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-3-3bbfe3a25cdf@kernel.org
1 parent 45fa6d1 commit 52c9aa1

1 file changed

Lines changed: 31 additions & 51 deletions

File tree

arch/arm/mm/dma-mapping.c

Lines changed: 31 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -624,16 +624,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
624624
kfree(buf);
625625
}
626626

627-
static void dma_cache_maint_page(struct page *page, unsigned long offset,
628-
size_t size, enum dma_data_direction dir,
627+
static void dma_cache_maint_page(phys_addr_t phys, size_t size,
628+
enum dma_data_direction dir,
629629
void (*op)(const void *, size_t, int))
630630
{
631-
unsigned long pfn;
631+
unsigned long offset = offset_in_page(phys);
632+
unsigned long pfn = __phys_to_pfn(phys);
632633
size_t left = size;
633634

634-
pfn = page_to_pfn(page) + offset / PAGE_SIZE;
635-
offset %= PAGE_SIZE;
636-
637635
/*
638636
* A single sg entry may refer to multiple physically contiguous
639637
* pages. But we still need to process highmem pages individually.
@@ -644,25 +642,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
644642
size_t len = left;
645643
void *vaddr;
646644

647-
page = pfn_to_page(pfn);
648-
649-
if (PageHighMem(page)) {
645+
phys = __pfn_to_phys(pfn);
646+
if (PhysHighMem(phys)) {
650647
if (len + offset > PAGE_SIZE)
651648
len = PAGE_SIZE - offset;
652649

653650
if (cache_is_vipt_nonaliasing()) {
654-
vaddr = kmap_atomic(page);
651+
vaddr = kmap_atomic_pfn(pfn);
655652
op(vaddr + offset, len, dir);
656653
kunmap_atomic(vaddr);
657654
} else {
655+
struct page *page = phys_to_page(phys);
656+
658657
vaddr = kmap_high_get(page);
659658
if (vaddr) {
660659
op(vaddr + offset, len, dir);
661660
kunmap_high(page);
662661
}
663662
}
664663
} else {
665-
vaddr = page_address(page) + offset;
664+
phys += offset;
665+
vaddr = phys_to_virt(phys);
666666
op(vaddr, len, dir);
667667
}
668668
offset = 0;
@@ -676,14 +676,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
676676
* Note: Drivers should NOT use this function directly.
677677
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
678678
*/
679-
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
680-
size_t size, enum dma_data_direction dir)
679+
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
680+
enum dma_data_direction dir)
681681
{
682-
phys_addr_t paddr;
683-
684-
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
682+
dma_cache_maint_page(paddr, size, dir, dmac_map_area);
685683

686-
paddr = page_to_phys(page) + off;
687684
if (dir == DMA_FROM_DEVICE) {
688685
outer_inv_range(paddr, paddr + size);
689686
} else {
@@ -692,17 +689,15 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
692689
/* FIXME: non-speculating: flush on bidirectional mappings? */
693690
}
694691

695-
static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
696-
size_t size, enum dma_data_direction dir)
692+
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
693+
enum dma_data_direction dir)
697694
{
698-
phys_addr_t paddr = page_to_phys(page) + off;
699-
700695
/* FIXME: non-speculating: not required */
701696
/* in any case, don't bother invalidating if DMA to device */
702697
if (dir != DMA_TO_DEVICE) {
703698
outer_inv_range(paddr, paddr + size);
704699

705-
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
700+
dma_cache_maint_page(paddr, size, dir, dmac_unmap_area);
706701
}
707702

708703
/*
@@ -1205,7 +1200,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
12051200
unsigned int len = PAGE_ALIGN(s->offset + s->length);
12061201

12071202
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1208-
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1203+
arch_sync_dma_for_device(sg_phys(s), s->length, dir);
12091204

12101205
prot = __dma_info_to_prot(dir, attrs);
12111206

@@ -1307,8 +1302,7 @@ static void arm_iommu_unmap_sg(struct device *dev,
13071302
__iommu_remove_mapping(dev, sg_dma_address(s),
13081303
sg_dma_len(s));
13091304
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1310-
__dma_page_dev_to_cpu(sg_page(s), s->offset,
1311-
s->length, dir);
1305+
arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
13121306
}
13131307
}
13141308

@@ -1330,7 +1324,7 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
13301324
return;
13311325

13321326
for_each_sg(sg, s, nents, i)
1333-
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1327+
arch_sync_dma_for_cpu(sg_phys(s), s->length, dir);
13341328

13351329
}
13361330

@@ -1352,7 +1346,7 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
13521346
return;
13531347

13541348
for_each_sg(sg, s, nents, i)
1355-
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1349+
arch_sync_dma_for_device(sg_phys(s), s->length, dir);
13561350
}
13571351

13581352
/**
@@ -1374,7 +1368,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
13741368
int ret, prot, len = PAGE_ALIGN(size + offset);
13751369

13761370
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1377-
__dma_page_cpu_to_dev(page, offset, size, dir);
1371+
arch_sync_dma_for_device(page_to_phys(page), offset, size, dir);
13781372

13791373
dma_addr = __alloc_iova(mapping, len);
13801374
if (dma_addr == DMA_MAPPING_ERROR)
@@ -1407,16 +1401,16 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
14071401
{
14081402
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14091403
dma_addr_t iova = handle & PAGE_MASK;
1410-
struct page *page;
14111404
int offset = handle & ~PAGE_MASK;
14121405
int len = PAGE_ALIGN(size + offset);
14131406

14141407
if (!iova)
14151408
return;
14161409

14171410
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
1418-
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1419-
__dma_page_dev_to_cpu(page, offset, size, dir);
1411+
phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
1412+
1413+
arch_sync_dma_for_cpu(phys + offset, size, dir);
14201414
}
14211415

14221416
iommu_unmap(mapping->domain, iova, len);
@@ -1485,29 +1479,29 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
14851479
{
14861480
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
14871481
dma_addr_t iova = handle & PAGE_MASK;
1488-
struct page *page;
14891482
unsigned int offset = handle & ~PAGE_MASK;
1483+
phys_addr_t phys;
14901484

14911485
if (dev->dma_coherent || !iova)
14921486
return;
14931487

1494-
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1495-
__dma_page_dev_to_cpu(page, offset, size, dir);
1488+
phys = iommu_iova_to_phys(mapping->domain, iova);
1489+
arch_sync_dma_for_cpu(phys + offset, size, dir);
14961490
}
14971491

14981492
static void arm_iommu_sync_single_for_device(struct device *dev,
14991493
dma_addr_t handle, size_t size, enum dma_data_direction dir)
15001494
{
15011495
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
15021496
dma_addr_t iova = handle & PAGE_MASK;
1503-
struct page *page;
15041497
unsigned int offset = handle & ~PAGE_MASK;
1498+
phys_addr_t phys;
15051499

15061500
if (dev->dma_coherent || !iova)
15071501
return;
15081502

1509-
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1510-
__dma_page_cpu_to_dev(page, offset, size, dir);
1503+
phys = iommu_iova_to_phys(mapping->domain, iova);
1504+
arch_sync_dma_for_device(phys + offset, size, dir);
15111505
}
15121506

15131507
static const struct dma_map_ops iommu_ops = {
@@ -1794,20 +1788,6 @@ void arch_teardown_dma_ops(struct device *dev)
17941788
set_dma_ops(dev, NULL);
17951789
}
17961790

1797-
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
1798-
enum dma_data_direction dir)
1799-
{
1800-
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1801-
size, dir);
1802-
}
1803-
1804-
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
1805-
enum dma_data_direction dir)
1806-
{
1807-
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
1808-
size, dir);
1809-
}
1810-
18111791
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
18121792
gfp_t gfp, unsigned long attrs)
18131793
{

0 commit comments

Comments
 (0)