Skip to content

Commit 38c0d0e

Browse files
rleonmszyprow
authored andcommitted
sparc: Use physical address DMA mapping
Convert sparc architecture DMA code to use .map_phys callback. Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-11-3bbfe3a25cdf@kernel.org
1 parent a10d648 commit 38c0d0e

4 files changed

Lines changed: 82 additions & 63 deletions

File tree

arch/sparc/kernel/iommu.c

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -260,26 +260,35 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
260260
free_pages((unsigned long)cpu, order);
261261
}
262262

263-
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264-
unsigned long offset, size_t sz,
265-
enum dma_data_direction direction,
263+
static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys,
264+
size_t sz, enum dma_data_direction direction,
266265
unsigned long attrs)
267266
{
268267
struct iommu *iommu;
269268
struct strbuf *strbuf;
270269
iopte_t *base;
271270
unsigned long flags, npages, oaddr;
272-
unsigned long i, base_paddr, ctx;
271+
unsigned long i, ctx;
273272
u32 bus_addr, ret;
274273
unsigned long iopte_protection;
275274

275+
if (unlikely(attrs & DMA_ATTR_MMIO))
276+
/*
277+
* This check is included because older versions of the code
278+
* lacked MMIO path support, and my ability to test this path
279+
* is limited. However, from a software technical standpoint,
280+
* there is no restriction, as the following code operates
281+
* solely on physical addresses.
282+
*/
283+
goto bad_no_ctx;
284+
276285
iommu = dev->archdata.iommu;
277286
strbuf = dev->archdata.stc;
278287

279288
if (unlikely(direction == DMA_NONE))
280289
goto bad_no_ctx;
281290

282-
oaddr = (unsigned long)(page_address(page) + offset);
291+
oaddr = (unsigned long)(phys_to_virt(phys));
283292
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284293
npages >>= IO_PAGE_SHIFT;
285294

@@ -296,16 +305,15 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
296305
bus_addr = (iommu->tbl.table_map_base +
297306
((base - iommu->page_table) << IO_PAGE_SHIFT));
298307
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299-
base_paddr = __pa(oaddr & IO_PAGE_MASK);
300308
if (strbuf->strbuf_enabled)
301309
iopte_protection = IOPTE_STREAMING(ctx);
302310
else
303311
iopte_protection = IOPTE_CONSISTENT(ctx);
304312
if (direction != DMA_TO_DEVICE)
305313
iopte_protection |= IOPTE_WRITE;
306314

307-
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308-
iopte_val(*base) = iopte_protection | base_paddr;
315+
for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE)
316+
iopte_val(*base) = iopte_protection | phys;
309317

310318
return ret;
311319

@@ -383,7 +391,7 @@ static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
383391
vaddr, ctx, npages);
384392
}
385393

386-
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
394+
static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr,
387395
size_t sz, enum dma_data_direction direction,
388396
unsigned long attrs)
389397
{
@@ -753,8 +761,8 @@ static int dma_4u_supported(struct device *dev, u64 device_mask)
753761
static const struct dma_map_ops sun4u_dma_ops = {
754762
.alloc = dma_4u_alloc_coherent,
755763
.free = dma_4u_free_coherent,
756-
.map_page = dma_4u_map_page,
757-
.unmap_page = dma_4u_unmap_page,
764+
.map_phys = dma_4u_map_phys,
765+
.unmap_phys = dma_4u_unmap_phys,
758766
.map_sg = dma_4u_map_sg,
759767
.unmap_sg = dma_4u_unmap_sg,
760768
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,

arch/sparc/kernel/pci_sun4v.c

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -352,28 +352,36 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
352352
free_pages((unsigned long)cpu, order);
353353
}
354354

355-
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
356-
unsigned long offset, size_t sz,
357-
enum dma_data_direction direction,
355+
static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys,
356+
size_t sz, enum dma_data_direction direction,
358357
unsigned long attrs)
359358
{
360359
struct iommu *iommu;
361360
struct atu *atu;
362361
struct iommu_map_table *tbl;
363362
u64 mask;
364363
unsigned long flags, npages, oaddr;
365-
unsigned long i, base_paddr;
366-
unsigned long prot;
364+
unsigned long i, prot;
367365
dma_addr_t bus_addr, ret;
368366
long entry;
369367

368+
if (unlikely(attrs & DMA_ATTR_MMIO))
369+
/*
370+
* This check is included because older versions of the code
371+
* lacked MMIO path support, and my ability to test this path
372+
* is limited. However, from a software technical standpoint,
373+
* there is no restriction, as the following code operates
374+
* solely on physical addresses.
375+
*/
376+
goto bad;
377+
370378
iommu = dev->archdata.iommu;
371379
atu = iommu->atu;
372380

373381
if (unlikely(direction == DMA_NONE))
374382
goto bad;
375383

376-
oaddr = (unsigned long)(page_address(page) + offset);
384+
oaddr = (unsigned long)(phys_to_virt(phys));
377385
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
378386
npages >>= IO_PAGE_SHIFT;
379387

@@ -391,7 +399,6 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
391399

392400
bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
393401
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
394-
base_paddr = __pa(oaddr & IO_PAGE_MASK);
395402
prot = HV_PCI_MAP_ATTR_READ;
396403
if (direction != DMA_TO_DEVICE)
397404
prot |= HV_PCI_MAP_ATTR_WRITE;
@@ -403,8 +410,8 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
403410

404411
iommu_batch_start(dev, prot, entry);
405412

406-
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
407-
long err = iommu_batch_add(base_paddr, mask);
413+
for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) {
414+
long err = iommu_batch_add(phys, mask);
408415
if (unlikely(err < 0L))
409416
goto iommu_map_fail;
410417
}
@@ -426,7 +433,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
426433
return DMA_MAPPING_ERROR;
427434
}
428435

429-
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
436+
static void dma_4v_unmap_phys(struct device *dev, dma_addr_t bus_addr,
430437
size_t sz, enum dma_data_direction direction,
431438
unsigned long attrs)
432439
{
@@ -686,8 +693,8 @@ static int dma_4v_supported(struct device *dev, u64 device_mask)
686693
static const struct dma_map_ops sun4v_dma_ops = {
687694
.alloc = dma_4v_alloc_coherent,
688695
.free = dma_4v_free_coherent,
689-
.map_page = dma_4v_map_page,
690-
.unmap_page = dma_4v_unmap_page,
696+
.map_phys = dma_4v_map_phys,
697+
.unmap_phys = dma_4v_unmap_phys,
691698
.map_sg = dma_4v_map_sg,
692699
.unmap_sg = dma_4v_unmap_sg,
693700
.dma_supported = dma_4v_supported,

arch/sparc/mm/io-unit.c

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -94,13 +94,14 @@ static int __init iounit_init(void)
9494
subsys_initcall(iounit_init);
9595

9696
/* One has to hold iounit->lock to call this */
97-
static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
97+
static dma_addr_t iounit_get_area(struct iounit_struct *iounit,
98+
phys_addr_t phys, int size)
9899
{
99100
int i, j, k, npages;
100101
unsigned long rotor, scan, limit;
101102
iopte_t iopte;
102103

103-
npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
104+
npages = (offset_in_page(phys) + size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
104105

105106
/* A tiny bit of magic ingredience :) */
106107
switch (npages) {
@@ -109,7 +110,7 @@ static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long
109110
default: i = 0x0213; break;
110111
}
111112

112-
IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
113+
IOD(("%s(%pa,%d[%d])=", __func__, &phys, size, npages));
113114

114115
next: j = (i & 15);
115116
rotor = iounit->rotor[j - 1];
@@ -124,38 +125,38 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
124125
}
125126
i >>= 4;
126127
if (!(i & 15))
127-
panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
128+
panic("iounit_get_area: Couldn't find free iopte slots for (%pa,%d)\n",
129+
&phys, size);
128130
goto next;
129131
}
130132
for (k = 1, scan++; k < npages; k++)
131133
if (test_bit(scan++, iounit->bmap))
132134
goto nexti;
133135
iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
134136
scan -= npages;
135-
iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
136-
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
137+
iopte = MKIOPTE(phys & PAGE_MASK);
138+
phys = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + offset_in_page(phys);
137139
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
138140
set_bit(scan, iounit->bmap);
139141
sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
140142
}
141-
IOD(("%08lx\n", vaddr));
142-
return vaddr;
143+
IOD(("%pa\n", &phys));
144+
return phys;
143145
}
144146

145-
static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
146-
unsigned long offset, size_t len, enum dma_data_direction dir,
147-
unsigned long attrs)
147+
static dma_addr_t iounit_map_phys(struct device *dev, phys_addr_t phys,
148+
size_t len, enum dma_data_direction dir, unsigned long attrs)
148149
{
149-
void *vaddr = page_address(page) + offset;
150150
struct iounit_struct *iounit = dev->archdata.iommu;
151-
unsigned long ret, flags;
151+
unsigned long flags;
152+
dma_addr_t ret;
152153

153154
/* XXX So what is maxphys for us and how do drivers know it? */
154155
if (!len || len > 256 * 1024)
155156
return DMA_MAPPING_ERROR;
156157

157158
spin_lock_irqsave(&iounit->lock, flags);
158-
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
159+
ret = iounit_get_area(iounit, phys, len);
159160
spin_unlock_irqrestore(&iounit->lock, flags);
160161
return ret;
161162
}
@@ -171,14 +172,15 @@ static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
171172
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
172173
spin_lock_irqsave(&iounit->lock, flags);
173174
for_each_sg(sgl, sg, nents, i) {
174-
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
175+
sg->dma_address =
176+
iounit_get_area(iounit, sg_phys(sg), sg->length);
175177
sg->dma_length = sg->length;
176178
}
177179
spin_unlock_irqrestore(&iounit->lock, flags);
178180
return nents;
179181
}
180182

181-
static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
183+
static void iounit_unmap_phys(struct device *dev, dma_addr_t vaddr, size_t len,
182184
enum dma_data_direction dir, unsigned long attrs)
183185
{
184186
struct iounit_struct *iounit = dev->archdata.iommu;
@@ -279,8 +281,8 @@ static const struct dma_map_ops iounit_dma_ops = {
279281
.alloc = iounit_alloc,
280282
.free = iounit_free,
281283
#endif
282-
.map_page = iounit_map_page,
283-
.unmap_page = iounit_unmap_page,
284+
.map_phys = iounit_map_phys,
285+
.unmap_phys = iounit_unmap_phys,
284286
.map_sg = iounit_map_sg,
285287
.unmap_sg = iounit_unmap_sg,
286288
};

arch/sparc/mm/iommu.c

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -181,18 +181,20 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
181181
}
182182
}
183183

184-
static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
185-
unsigned long offset, size_t len, bool per_page_flush)
184+
static dma_addr_t __sbus_iommu_map_phys(struct device *dev, phys_addr_t paddr,
185+
size_t len, bool per_page_flush, unsigned long attrs)
186186
{
187187
struct iommu_struct *iommu = dev->archdata.iommu;
188-
phys_addr_t paddr = page_to_phys(page) + offset;
189-
unsigned long off = paddr & ~PAGE_MASK;
188+
unsigned long off = offset_in_page(paddr);
190189
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
191190
unsigned long pfn = __phys_to_pfn(paddr);
192191
unsigned int busa, busa0;
193192
iopte_t *iopte, *iopte0;
194193
int ioptex, i;
195194

195+
if (unlikely(attrs & DMA_ATTR_MMIO))
196+
return DMA_MAPPING_ERROR;
197+
196198
/* XXX So what is maxphys for us and how do drivers know it? */
197199
if (!len || len > 256 * 1024)
198200
return DMA_MAPPING_ERROR;
@@ -202,10 +204,10 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
202204
* XXX Is this a good assumption?
203205
* XXX What if someone else unmaps it here and races us?
204206
*/
205-
if (per_page_flush && !PageHighMem(page)) {
207+
if (per_page_flush && !PhysHighMem(paddr)) {
206208
unsigned long vaddr, p;
207209

208-
vaddr = (unsigned long)page_address(page) + offset;
210+
vaddr = (unsigned long)phys_to_virt(paddr);
209211
for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
210212
flush_page_for_dma(p);
211213
}
@@ -231,19 +233,19 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
231233
return busa0 + off;
232234
}
233235

234-
static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
235-
struct page *page, unsigned long offset, size_t len,
236-
enum dma_data_direction dir, unsigned long attrs)
236+
static dma_addr_t sbus_iommu_map_phys_gflush(struct device *dev,
237+
phys_addr_t phys, size_t len, enum dma_data_direction dir,
238+
unsigned long attrs)
237239
{
238240
flush_page_for_dma(0);
239-
return __sbus_iommu_map_page(dev, page, offset, len, false);
241+
return __sbus_iommu_map_phys(dev, phys, len, false, attrs);
240242
}
241243

242-
static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
243-
struct page *page, unsigned long offset, size_t len,
244-
enum dma_data_direction dir, unsigned long attrs)
244+
static dma_addr_t sbus_iommu_map_phys_pflush(struct device *dev,
245+
phys_addr_t phys, size_t len, enum dma_data_direction dir,
246+
unsigned long attrs)
245247
{
246-
return __sbus_iommu_map_page(dev, page, offset, len, true);
248+
return __sbus_iommu_map_phys(dev, phys, len, true, attrs);
247249
}
248250

249251
static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
@@ -254,8 +256,8 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
254256
int j;
255257

256258
for_each_sg(sgl, sg, nents, j) {
257-
sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
258-
sg->offset, sg->length, per_page_flush);
259+
sg->dma_address = __sbus_iommu_map_phys(dev, sg_phys(sg),
260+
sg->length, per_page_flush, attrs);
259261
if (sg->dma_address == DMA_MAPPING_ERROR)
260262
return -EIO;
261263
sg->dma_length = sg->length;
@@ -277,7 +279,7 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
277279
return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
278280
}
279281

280-
static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
282+
static void sbus_iommu_unmap_phys(struct device *dev, dma_addr_t dma_addr,
281283
size_t len, enum dma_data_direction dir, unsigned long attrs)
282284
{
283285
struct iommu_struct *iommu = dev->archdata.iommu;
@@ -303,7 +305,7 @@ static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
303305
int i;
304306

305307
for_each_sg(sgl, sg, nents, i) {
306-
sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
308+
sbus_iommu_unmap_phys(dev, sg->dma_address, sg->length, dir,
307309
attrs);
308310
sg->dma_address = 0x21212121;
309311
}
@@ -426,8 +428,8 @@ static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
426428
.alloc = sbus_iommu_alloc,
427429
.free = sbus_iommu_free,
428430
#endif
429-
.map_page = sbus_iommu_map_page_gflush,
430-
.unmap_page = sbus_iommu_unmap_page,
431+
.map_phys = sbus_iommu_map_phys_gflush,
432+
.unmap_phys = sbus_iommu_unmap_phys,
431433
.map_sg = sbus_iommu_map_sg_gflush,
432434
.unmap_sg = sbus_iommu_unmap_sg,
433435
};
@@ -437,8 +439,8 @@ static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
437439
.alloc = sbus_iommu_alloc,
438440
.free = sbus_iommu_free,
439441
#endif
440-
.map_page = sbus_iommu_map_page_pflush,
441-
.unmap_page = sbus_iommu_unmap_page,
442+
.map_phys = sbus_iommu_map_phys_pflush,
443+
.unmap_phys = sbus_iommu_unmap_phys,
442444
.map_sg = sbus_iommu_map_sg_pflush,
443445
.unmap_sg = sbus_iommu_unmap_sg,
444446
};

0 commit comments

Comments
 (0)