Skip to content

Commit a7405aa

Browse files
committed
Merge tag 'dma-mapping-6.19-2025-12-05' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux
Pull dma-mapping updates from Marek Szyprowski: - More DMA mapping API refactoring to physical addresses as the primary interface instead of page+offset parameters. This time dma_map_ops callbacks are converted to physical addresses, what in turn results also in some simplification of architecture specific code (Leon Romanovsky and Jason Gunthorpe) - Clarify that dma_map_benchmark is not a kernel self-test, but standalone tool (Qinxin Xia) * tag 'dma-mapping-6.19-2025-12-05' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux: dma-mapping: remove unused map_page callback xen: swiotlb: Convert mapping routine to rely on physical address x86: Use physical address for DMA mapping sparc: Use physical address DMA mapping powerpc: Convert to physical address DMA mapping parisc: Convert DMA map_page to map_phys interface MIPS/jazzdma: Provide physical address directly alpha: Convert mapping routine to rely on physical address dma-mapping: remove unused mapping resource callbacks xen: swiotlb: Switch to physical address mapping callbacks ARM: dma-mapping: Switch to physical address mapping callbacks ARM: dma-mapping: Reduce struct page exposure in arch_sync_dma*() dma-mapping: convert dummy ops to physical address mapping dma-mapping: prepare dma_map_ops to conversion to physical address tools/dma: move dma_map_benchmark from selftests to tools/dma
2 parents f468cf5 + 131971f commit a7405aa

31 files changed

Lines changed: 435 additions & 450 deletions

File tree

arch/alpha/kernel/pci_iommu.c

Lines changed: 21 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -224,28 +224,26 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
224224
until either pci_unmap_single or pci_dma_sync_single is performed. */
225225

226226
static dma_addr_t
227-
pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
227+
pci_map_single_1(struct pci_dev *pdev, phys_addr_t paddr, size_t size,
228228
int dac_allowed)
229229
{
230230
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
231231
dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
232+
unsigned long offset = offset_in_page(paddr);
232233
struct pci_iommu_arena *arena;
233234
long npages, dma_ofs, i;
234-
unsigned long paddr;
235235
dma_addr_t ret;
236236
unsigned int align = 0;
237237
struct device *dev = pdev ? &pdev->dev : NULL;
238238

239-
paddr = __pa(cpu_addr);
240-
241239
#if !DEBUG_NODIRECT
242240
/* First check to see if we can use the direct map window. */
243241
if (paddr + size + __direct_map_base - 1 <= max_dma
244242
&& paddr + size <= __direct_map_size) {
245243
ret = paddr + __direct_map_base;
246244

247-
DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
248-
cpu_addr, size, ret, __builtin_return_address(0));
245+
DBGA2("pci_map_single: [%pa,%zx] -> direct %llx from %ps\n",
246+
&paddr, size, ret, __builtin_return_address(0));
249247

250248
return ret;
251249
}
@@ -255,8 +253,8 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
255253
if (dac_allowed) {
256254
ret = paddr + alpha_mv.pci_dac_offset;
257255

258-
DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
259-
cpu_addr, size, ret, __builtin_return_address(0));
256+
DBGA2("pci_map_single: [%pa,%zx] -> DAC %llx from %ps\n",
257+
&paddr, size, ret, __builtin_return_address(0));
260258

261259
return ret;
262260
}
@@ -290,10 +288,10 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
290288
arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
291289

292290
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
293-
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
291+
ret += offset;
294292

295-
DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
296-
cpu_addr, size, npages, ret, __builtin_return_address(0));
293+
DBGA2("pci_map_single: [%pa,%zx] np %ld -> sg %llx from %ps\n",
294+
&paddr, size, npages, ret, __builtin_return_address(0));
297295

298296
return ret;
299297
}
@@ -322,19 +320,18 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
322320
return NULL;
323321
}
324322

325-
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
326-
unsigned long offset, size_t size,
327-
enum dma_data_direction dir,
323+
static dma_addr_t alpha_pci_map_phys(struct device *dev, phys_addr_t phys,
324+
size_t size, enum dma_data_direction dir,
328325
unsigned long attrs)
329326
{
330327
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
331328
int dac_allowed;
332329

333-
BUG_ON(dir == DMA_NONE);
330+
if (unlikely(attrs & DMA_ATTR_MMIO))
331+
return DMA_MAPPING_ERROR;
334332

335-
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
336-
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
337-
size, dac_allowed);
333+
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
334+
return pci_map_single_1(pdev, phys, size, dac_allowed);
338335
}
339336

340337
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
@@ -343,7 +340,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
343340
the cpu to the buffer are guaranteed to see whatever the device
344341
wrote there. */
345342

346-
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
343+
static void alpha_pci_unmap_phys(struct device *dev, dma_addr_t dma_addr,
347344
size_t size, enum dma_data_direction dir,
348345
unsigned long attrs)
349346
{
@@ -353,8 +350,6 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
353350
struct pci_iommu_arena *arena;
354351
long dma_ofs, npages;
355352

356-
BUG_ON(dir == DMA_NONE);
357-
358353
if (dma_addr >= __direct_map_base
359354
&& dma_addr < __direct_map_base + __direct_map_size) {
360355
/* Nothing to do. */
@@ -429,7 +424,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
429424
}
430425
memset(cpu_addr, 0, size);
431426

432-
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
427+
*dma_addrp = pci_map_single_1(pdev, virt_to_phys(cpu_addr), size, 0);
433428
if (*dma_addrp == DMA_MAPPING_ERROR) {
434429
free_pages((unsigned long)cpu_addr, order);
435430
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
@@ -643,9 +638,8 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
643638
/* Fast path single entry scatterlists. */
644639
if (nents == 1) {
645640
sg->dma_length = sg->length;
646-
sg->dma_address
647-
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
648-
sg->length, dac_allowed);
641+
sg->dma_address = pci_map_single_1(pdev, sg_phys(sg),
642+
sg->length, dac_allowed);
649643
if (sg->dma_address == DMA_MAPPING_ERROR)
650644
return -EIO;
651645
return 1;
@@ -917,8 +911,8 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
917911
const struct dma_map_ops alpha_pci_ops = {
918912
.alloc = alpha_pci_alloc_coherent,
919913
.free = alpha_pci_free_coherent,
920-
.map_page = alpha_pci_map_page,
921-
.unmap_page = alpha_pci_unmap_page,
914+
.map_phys = alpha_pci_map_phys,
915+
.unmap_phys = alpha_pci_unmap_phys,
922916
.map_sg = alpha_pci_map_sg,
923917
.unmap_sg = alpha_pci_unmap_sg,
924918
.dma_supported = alpha_pci_supported,

0 commit comments

Comments
 (0)