Skip to content

Commit 96ddf2e

Browse files
rleonmszyprow
authored andcommitted
parisc: Convert DMA map_page to map_phys interface
Perform mechanical conversion from .map_page to .map_phys callback. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-9-3bbfe3a25cdf@kernel.org
1 parent e4e3fff commit 96ddf2e

3 files changed

Lines changed: 59 additions & 59 deletions

File tree

drivers/parisc/ccio-dma.c

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -517,10 +517,10 @@ static u32 hint_lookup[] = {
517517
* ccio_io_pdir_entry - Initialize an I/O Pdir.
518518
* @pdir_ptr: A pointer into I/O Pdir.
519519
* @sid: The Space Identifier.
520-
* @vba: The virtual address.
520+
* @pba: The physical address.
521521
* @hints: The DMA Hint.
522522
*
523-
* Given a virtual address (vba, arg2) and space id, (sid, arg1),
523+
* Given a physical address (pba, arg2) and space id, (sid, arg1),
524524
* load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
525525
* entry consists of 8 bytes as shown below (MSB == bit 0):
526526
*
@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
543543
* index are bits 12:19 of the value returned by LCI.
544544
*/
545545
static void
546-
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
546+
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
547547
unsigned long hints)
548548
{
549549
register unsigned long pa;
@@ -557,7 +557,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
557557
** "hints" parm includes the VALID bit!
558558
** "dep" clobbers the physical address offset bits as well.
559559
*/
560-
pa = lpa(vba);
560+
pa = pba;
561561
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
562562
((u32 *)pdir_ptr)[1] = (u32) pa;
563563

@@ -582,7 +582,7 @@ ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
582582
** Grab virtual index [0:11]
583583
** Deposit virt_idx bits into I/O PDIR word
584584
*/
585-
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
585+
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
586586
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
587587
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
588588

@@ -704,14 +704,14 @@ ccio_dma_supported(struct device *dev, u64 mask)
704704
/**
705705
* ccio_map_single - Map an address range into the IOMMU.
706706
* @dev: The PCI device.
707-
* @addr: The start address of the DMA region.
707+
* @addr: The physical address of the DMA region.
708708
* @size: The length of the DMA region.
709709
* @direction: The direction of the DMA transaction (to/from device).
710710
*
711711
* This function implements the pci_map_single function.
712712
*/
713713
static dma_addr_t
714-
ccio_map_single(struct device *dev, void *addr, size_t size,
714+
ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
715715
enum dma_data_direction direction)
716716
{
717717
int idx;
@@ -730,7 +730,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
730730
BUG_ON(size <= 0);
731731

732732
/* save offset bits */
733-
offset = ((unsigned long) addr) & ~IOVP_MASK;
733+
offset = offset_in_page(addr);
734734

735735
/* round up to nearest IOVP_SIZE */
736736
size = ALIGN(size + offset, IOVP_SIZE);
@@ -746,15 +746,15 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
746746

747747
pdir_start = &(ioc->pdir_base[idx]);
748748

749-
DBG_RUN("%s() %px -> %#lx size: %zu\n",
750-
__func__, addr, (long)(iovp | offset), size);
749+
DBG_RUN("%s() %pa -> %#lx size: %zu\n",
750+
__func__, &addr, (long)(iovp | offset), size);
751751

752752
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
753-
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
753+
if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
754754
hint |= HINT_SAFE_DMA;
755755

756756
while(size > 0) {
757-
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
757+
ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint);
758758

759759
DBG_RUN(" pdir %p %08x%08x\n",
760760
pdir_start,
@@ -773,25 +773,26 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
773773

774774

775775
static dma_addr_t
776-
ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
777-
size_t size, enum dma_data_direction direction,
778-
unsigned long attrs)
776+
ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
777+
enum dma_data_direction direction, unsigned long attrs)
779778
{
780-
return ccio_map_single(dev, page_address(page) + offset, size,
781-
direction);
779+
if (unlikely(attrs & DMA_ATTR_MMIO))
780+
return DMA_MAPPING_ERROR;
781+
782+
return ccio_map_single(dev, phys, size, direction);
782783
}
783784

784785

785786
/**
786-
* ccio_unmap_page - Unmap an address range from the IOMMU.
787+
* ccio_unmap_phys - Unmap an address range from the IOMMU.
787788
* @dev: The PCI device.
788789
* @iova: The start address of the DMA region.
789790
* @size: The length of the DMA region.
790791
* @direction: The direction of the DMA transaction (to/from device).
791792
* @attrs: attributes
792793
*/
793794
static void
794-
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
795+
ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
795796
enum dma_data_direction direction, unsigned long attrs)
796797
{
797798
struct ioc *ioc;
@@ -853,7 +854,8 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
853854

854855
if (ret) {
855856
memset(ret, 0, size);
856-
*dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL);
857+
*dma_handle = ccio_map_single(dev, virt_to_phys(ret), size,
858+
DMA_BIDIRECTIONAL);
857859
}
858860

859861
return ret;
@@ -873,7 +875,7 @@ static void
873875
ccio_free(struct device *dev, size_t size, void *cpu_addr,
874876
dma_addr_t dma_handle, unsigned long attrs)
875877
{
876-
ccio_unmap_page(dev, dma_handle, size, 0, 0);
878+
ccio_unmap_phys(dev, dma_handle, size, 0, 0);
877879
free_pages((unsigned long)cpu_addr, get_order(size));
878880
}
879881

@@ -920,7 +922,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
920922
/* Fast path single entry scatterlists. */
921923
if (nents == 1) {
922924
sg_dma_address(sglist) = ccio_map_single(dev,
923-
sg_virt(sglist), sglist->length,
925+
sg_phys(sglist), sglist->length,
924926
direction);
925927
sg_dma_len(sglist) = sglist->length;
926928
return 1;
@@ -1004,7 +1006,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
10041006
#ifdef CCIO_COLLECT_STATS
10051007
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
10061008
#endif
1007-
ccio_unmap_page(dev, sg_dma_address(sglist),
1009+
ccio_unmap_phys(dev, sg_dma_address(sglist),
10081010
sg_dma_len(sglist), direction, 0);
10091011
++sglist;
10101012
nents--;
@@ -1017,8 +1019,8 @@ static const struct dma_map_ops ccio_ops = {
10171019
.dma_supported = ccio_dma_supported,
10181020
.alloc = ccio_alloc,
10191021
.free = ccio_free,
1020-
.map_page = ccio_map_page,
1021-
.unmap_page = ccio_unmap_page,
1022+
.map_phys = ccio_map_phys,
1023+
.unmap_phys = ccio_unmap_phys,
10221024
.map_sg = ccio_map_sg,
10231025
.unmap_sg = ccio_unmap_sg,
10241026
.get_sgtable = dma_common_get_sgtable,
@@ -1072,7 +1074,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
10721074
ioc->msingle_calls, ioc->msingle_pages,
10731075
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
10741076

1075-
/* KLUGE - unmap_sg calls unmap_page for each mapped page */
1077+
/* KLUGE - unmap_sg calls unmap_phys for each mapped page */
10761078
min = ioc->usingle_calls - ioc->usg_calls;
10771079
max = ioc->usingle_pages - ioc->usg_pages;
10781080
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",

drivers/parisc/iommu-helpers.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
static inline unsigned int
1515
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
1616
unsigned long hint,
17-
void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
17+
void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t,
1818
unsigned long))
1919
{
2020
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
@@ -28,7 +28,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
2828
dma_sg--;
2929

3030
while (nents-- > 0) {
31-
unsigned long vaddr;
31+
phys_addr_t paddr;
3232
long size;
3333

3434
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
@@ -67,7 +67,7 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
6767

6868
BUG_ON(pdirp == NULL);
6969

70-
vaddr = (unsigned long)sg_virt(startsg);
70+
paddr = sg_phys(startsg);
7171
sg_dma_len(dma_sg) += startsg->length;
7272
size = startsg->length + dma_offset;
7373
dma_offset = 0;
@@ -76,8 +76,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
7676
#endif
7777
do {
7878
iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
79-
vaddr, hint);
80-
vaddr += IOVP_SIZE;
79+
paddr, hint);
80+
paddr += IOVP_SIZE;
8181
size -= IOVP_SIZE;
8282
pdirp++;
8383
} while(unlikely(size > 0));

drivers/parisc/sba_iommu.c

Lines changed: 26 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -532,7 +532,7 @@ typedef unsigned long space_t;
532532
* sba_io_pdir_entry - fill in one IO PDIR entry
533533
* @pdir_ptr: pointer to IO PDIR entry
534534
* @sid: process Space ID - currently only support KERNEL_SPACE
535-
* @vba: Virtual CPU address of buffer to map
535+
* @pba: Physical address of buffer to map
536536
* @hint: DMA hint set to use for this mapping
537537
*
538538
* SBA Mapping Routine
@@ -569,20 +569,17 @@ typedef unsigned long space_t;
569569
*/
570570

571571
static void
572-
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
572+
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
573573
unsigned long hint)
574574
{
575-
u64 pa; /* physical address */
576575
register unsigned ci; /* coherent index */
577576

578-
pa = lpa(vba);
579-
pa &= IOVP_MASK;
577+
asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba)));
578+
pba &= IOVP_MASK;
579+
pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
580580

581-
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
582-
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
583-
584-
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
585-
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
581+
pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
582+
*pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */
586583

587584
/*
588585
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
@@ -707,7 +704,7 @@ static int sba_dma_supported( struct device *dev, u64 mask)
707704
* See Documentation/core-api/dma-api-howto.rst
708705
*/
709706
static dma_addr_t
710-
sba_map_single(struct device *dev, void *addr, size_t size,
707+
sba_map_single(struct device *dev, phys_addr_t addr, size_t size,
711708
enum dma_data_direction direction)
712709
{
713710
struct ioc *ioc;
@@ -722,7 +719,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
722719
return DMA_MAPPING_ERROR;
723720

724721
/* save offset bits */
725-
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
722+
offset = offset_in_page(addr);
726723

727724
/* round up to nearest IOVP_SIZE */
728725
size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
@@ -739,13 +736,13 @@ sba_map_single(struct device *dev, void *addr, size_t size,
739736
pide = sba_alloc_range(ioc, dev, size);
740737
iovp = (dma_addr_t) pide << IOVP_SHIFT;
741738

742-
DBG_RUN("%s() 0x%p -> 0x%lx\n",
743-
__func__, addr, (long) iovp | offset);
739+
DBG_RUN("%s() 0x%pa -> 0x%lx\n",
740+
__func__, &addr, (long) iovp | offset);
744741

745742
pdir_start = &(ioc->pdir_base[pide]);
746743

747744
while (size > 0) {
748-
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
745+
sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0);
749746

750747
DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
751748
pdir_start,
@@ -778,17 +775,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
778775

779776

780777
static dma_addr_t
781-
sba_map_page(struct device *dev, struct page *page, unsigned long offset,
782-
size_t size, enum dma_data_direction direction,
783-
unsigned long attrs)
778+
sba_map_phys(struct device *dev, phys_addr_t phys, size_t size,
779+
enum dma_data_direction direction, unsigned long attrs)
784780
{
785-
return sba_map_single(dev, page_address(page) + offset, size,
786-
direction);
781+
if (unlikely(attrs & DMA_ATTR_MMIO))
782+
return DMA_MAPPING_ERROR;
783+
784+
return sba_map_single(dev, phys, size, direction);
787785
}
788786

789787

790788
/**
791-
* sba_unmap_page - unmap one IOVA and free resources
789+
* sba_unmap_phys - unmap one IOVA and free resources
792790
* @dev: instance of PCI owned by the driver that's asking.
793791
* @iova: IOVA of driver buffer previously mapped.
794792
* @size: number of bytes mapped in driver buffer.
@@ -798,7 +796,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
798796
* See Documentation/core-api/dma-api-howto.rst
799797
*/
800798
static void
801-
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
799+
sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
802800
enum dma_data_direction direction, unsigned long attrs)
803801
{
804802
struct ioc *ioc;
@@ -893,7 +891,7 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
893891

894892
if (ret) {
895893
memset(ret, 0, size);
896-
*dma_handle = sba_map_single(hwdev, ret, size, 0);
894+
*dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0);
897895
}
898896

899897
return ret;
@@ -914,7 +912,7 @@ static void
914912
sba_free(struct device *hwdev, size_t size, void *vaddr,
915913
dma_addr_t dma_handle, unsigned long attrs)
916914
{
917-
sba_unmap_page(hwdev, dma_handle, size, 0, 0);
915+
sba_unmap_phys(hwdev, dma_handle, size, 0, 0);
918916
free_pages((unsigned long) vaddr, get_order(size));
919917
}
920918

@@ -962,7 +960,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
962960

963961
/* Fast path single entry scatterlists. */
964962
if (nents == 1) {
965-
sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
963+
sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist),
966964
sglist->length, direction);
967965
sg_dma_len(sglist) = sglist->length;
968966
return 1;
@@ -1061,7 +1059,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
10611059

10621060
while (nents && sg_dma_len(sglist)) {
10631061

1064-
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1062+
sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist),
10651063
direction, 0);
10661064
#ifdef SBA_COLLECT_STATS
10671065
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
@@ -1085,8 +1083,8 @@ static const struct dma_map_ops sba_ops = {
10851083
.dma_supported = sba_dma_supported,
10861084
.alloc = sba_alloc,
10871085
.free = sba_free,
1088-
.map_page = sba_map_page,
1089-
.unmap_page = sba_unmap_page,
1086+
.map_phys = sba_map_phys,
1087+
.unmap_phys = sba_unmap_phys,
10901088
.map_sg = sba_map_sg,
10911089
.unmap_sg = sba_unmap_sg,
10921090
.get_sgtable = dma_common_get_sgtable,

0 commit comments

Comments
 (0)