Skip to content

Commit af85de5

Browse files
rleonmszyprow
authored andcommitted
xen: swiotlb: Switch to physical address mapping callbacks
Combine resource and page mappings routines to one function and remove .map_resource/.unmap_resource callbacks completely. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-5-3bbfe3a25cdf@kernel.org
1 parent 50b149b commit af85de5

1 file changed

Lines changed: 29 additions & 34 deletions

File tree

drivers/xen/swiotlb-xen.c

Lines changed: 29 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -200,17 +200,32 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
200200
* physical address to use is returned.
201201
*
202202
* Once the device is given the dma address, the device owns this memory until
203-
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
203+
* either xen_swiotlb_unmap_phys or xen_swiotlb_dma_sync_single is performed.
204204
*/
205-
static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
206-
unsigned long offset, size_t size,
207-
enum dma_data_direction dir,
205+
static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
206+
size_t size, enum dma_data_direction dir,
208207
unsigned long attrs)
209208
{
210-
phys_addr_t map, phys = page_to_phys(page) + offset;
211-
dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
209+
dma_addr_t dev_addr;
210+
phys_addr_t map;
212211

213212
BUG_ON(dir == DMA_NONE);
213+
214+
if (attrs & DMA_ATTR_MMIO) {
215+
if (unlikely(!dma_capable(dev, phys, size, false))) {
216+
dev_err_once(
217+
dev,
218+
"DMA addr %pa+%zu overflow (mask %llx, bus limit %llx).\n",
219+
&phys, size, *dev->dma_mask,
220+
dev->bus_dma_limit);
221+
WARN_ON_ONCE(1);
222+
return DMA_MAPPING_ERROR;
223+
}
224+
return phys;
225+
}
226+
227+
dev_addr = xen_phys_to_dma(dev, phys);
228+
214229
/*
215230
* If the address happens to be in the device's DMA window,
216231
* we can safely return the device addr and not worry about bounce
@@ -257,13 +272,13 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
257272

258273
/*
259274
* Unmap a single streaming mode DMA translation. The dma_addr and size must
260-
* match what was provided for in a previous xen_swiotlb_map_page call. All
275+
* match what was provided for in a previous xen_swiotlb_map_phys call. All
261276
* other usages are undefined.
262277
*
263278
* After this call, reads by the cpu to the buffer are guaranteed to see
264279
* whatever the device wrote there.
265280
*/
266-
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
281+
static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
267282
size_t size, enum dma_data_direction dir, unsigned long attrs)
268283
{
269284
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
@@ -325,7 +340,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
325340

326341
/*
327342
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
328-
* concerning calls here are the same as for swiotlb_unmap_page() above.
343+
* concerning calls here are the same as for swiotlb_unmap_phys() above.
329344
*/
330345
static void
331346
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
@@ -337,7 +352,7 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
337352
BUG_ON(dir == DMA_NONE);
338353

339354
for_each_sg(sgl, sg, nelems, i)
340-
xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
355+
xen_swiotlb_unmap_phys(hwdev, sg->dma_address, sg_dma_len(sg),
341356
dir, attrs);
342357

343358
}
@@ -352,8 +367,8 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
352367
BUG_ON(dir == DMA_NONE);
353368

354369
for_each_sg(sgl, sg, nelems, i) {
355-
sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
356-
sg->offset, sg->length, dir, attrs);
370+
sg->dma_address = xen_swiotlb_map_phys(dev, sg_phys(sg),
371+
sg->length, dir, attrs);
357372
if (sg->dma_address == DMA_MAPPING_ERROR)
358373
goto out_unmap;
359374
sg_dma_len(sg) = sg->length;
@@ -392,25 +407,6 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
392407
}
393408
}
394409

395-
static dma_addr_t xen_swiotlb_direct_map_resource(struct device *dev,
396-
phys_addr_t paddr,
397-
size_t size,
398-
enum dma_data_direction dir,
399-
unsigned long attrs)
400-
{
401-
dma_addr_t dma_addr = paddr;
402-
403-
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
404-
dev_err_once(dev,
405-
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
406-
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
407-
WARN_ON_ONCE(1);
408-
return DMA_MAPPING_ERROR;
409-
}
410-
411-
return dma_addr;
412-
}
413-
414410
/*
415411
* Return whether the given device DMA address mask can be supported
416412
* properly. For example, if your device can only drive the low 24-bits
@@ -437,13 +433,12 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
437433
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
438434
.map_sg = xen_swiotlb_map_sg,
439435
.unmap_sg = xen_swiotlb_unmap_sg,
440-
.map_page = xen_swiotlb_map_page,
441-
.unmap_page = xen_swiotlb_unmap_page,
436+
.map_phys = xen_swiotlb_map_phys,
437+
.unmap_phys = xen_swiotlb_unmap_phys,
442438
.dma_supported = xen_swiotlb_dma_supported,
443439
.mmap = dma_common_mmap,
444440
.get_sgtable = dma_common_get_sgtable,
445441
.alloc_pages_op = dma_common_alloc_pages,
446442
.free_pages = dma_common_free_pages,
447443
.max_mapping_size = swiotlb_max_mapping_size,
448-
.map_resource = xen_swiotlb_direct_map_resource,
449444
};

0 commit comments

Comments
 (0)