Skip to content

Commit 4a3bb42

Browse files
committed
Merge tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - fix debugfs initialization order (Anthony Iliopoulos) - use memory_intersects() directly (Kefeng Wang) - allow to return specific errors from ->map_sg (Logan Gunthorpe, Martin Oliveira) - turn the dma_map_sg return value into an unsigned int (me) - provide a common global coherent pool іmplementation (me) * tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping: (31 commits) hexagon: use the generic global coherent pool dma-mapping: make the global coherent pool conditional dma-mapping: add a dma_init_global_coherent helper dma-mapping: simplify dma_init_coherent_memory dma-mapping: allow using the global coherent pool for !ARM ARM/nommu: use the generic dma-direct code for non-coherent devices dma-direct: add support for dma_coherent_default_memory dma-mapping: return an unsigned int from dma_map_sg{,_attrs} dma-mapping: disallow .map_sg operations from returning zero on error dma-mapping: return error code from dma_dummy_map_sg() x86/amd_gart: don't set failed sg dma_address to DMA_MAPPING_ERROR x86/amd_gart: return error code from gart_map_sg() xen: swiotlb: return error code from xen_swiotlb_map_sg() parisc: return error code from .map_sg() ops sparc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR sparc/iommu: return error codes from .map_sg() ops s390/pci: don't set failed sg dma_address to DMA_MAPPING_ERROR s390/pci: return error code from s390_dma_map_sg() powerpc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR powerpc/iommu: return error code from .map_sg() ops ...
2 parents eceae1e + c1dec34 commit 4a3bb42

30 files changed

Lines changed: 310 additions & 443 deletions

File tree

arch/alpha/kernel/pci_iommu.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -649,7 +649,9 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
649649
sg->dma_address
650650
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
651651
sg->length, dac_allowed);
652-
return sg->dma_address != DMA_MAPPING_ERROR;
652+
if (sg->dma_address == DMA_MAPPING_ERROR)
653+
return -EIO;
654+
return 1;
653655
}
654656

655657
start = sg;
@@ -685,8 +687,10 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
685687
if (out < end)
686688
out->dma_length = 0;
687689

688-
if (out - start == 0)
690+
if (out - start == 0) {
689691
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
692+
return -ENOMEM;
693+
}
690694
DBGA("pci_map_sg: %ld entries\n", out - start);
691695

692696
return out - start;
@@ -699,7 +703,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
699703
entries. Unmap them now. */
700704
if (out > start)
701705
pci_unmap_sg(pdev, start, out - start, dir);
702-
return 0;
706+
return -ENOMEM;
703707
}
704708

705709
/* Unmap a set of streaming mode DMA translations. Again, cpu read

arch/arm/Kconfig

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ config ARM
1818
select ARCH_HAS_SET_MEMORY
1919
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
2020
select ARCH_HAS_STRICT_MODULE_RWX if MMU
21-
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
22-
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21+
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
22+
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
2323
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
2424
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
2525
select ARCH_HAVE_CUSTOM_GPIO_H
@@ -44,6 +44,7 @@ config ARM
4444
select CPU_PM if SUSPEND || CPU_IDLE
4545
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
4646
select DMA_DECLARE_COHERENT
47+
select DMA_GLOBAL_POOL if !MMU
4748
select DMA_OPS
4849
select DMA_REMAP if MMU
4950
select EDAC_SUPPORT

arch/arm/mm/dma-mapping-nommu.c

Lines changed: 6 additions & 167 deletions
Original file line numberDiff line numberDiff line change
@@ -5,78 +5,16 @@
55
* Copyright (C) 2000-2004 Russell King
66
*/
77

8-
#include <linux/export.h>
9-
#include <linux/mm.h>
10-
#include <linux/dma-direct.h>
118
#include <linux/dma-map-ops.h>
12-
#include <linux/scatterlist.h>
13-
149
#include <asm/cachetype.h>
1510
#include <asm/cacheflush.h>
1611
#include <asm/outercache.h>
1712
#include <asm/cp15.h>
1813

1914
#include "dma.h"
2015

21-
/*
22-
* The generic direct mapping code is used if
23-
* - MMU/MPU is off
24-
* - cpu is v7m w/o cache support
25-
* - device is coherent
26-
* otherwise arm_nommu_dma_ops is used.
27-
*
28-
* arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
29-
* [1] on how to declare such memory).
30-
*
31-
* [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
32-
*/
33-
34-
static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
35-
dma_addr_t *dma_handle, gfp_t gfp,
36-
unsigned long attrs)
37-
38-
{
39-
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
40-
41-
/*
42-
* dma_alloc_from_global_coherent() may fail because:
43-
*
44-
* - no consistent DMA region has been defined, so we can't
45-
* continue.
46-
* - there is no space left in consistent DMA region, so we
47-
* only can fallback to generic allocator if we are
48-
* advertised that consistency is not required.
49-
*/
50-
51-
WARN_ON_ONCE(ret == NULL);
52-
return ret;
53-
}
54-
55-
static void arm_nommu_dma_free(struct device *dev, size_t size,
56-
void *cpu_addr, dma_addr_t dma_addr,
57-
unsigned long attrs)
58-
{
59-
int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
60-
61-
WARN_ON_ONCE(ret == 0);
62-
}
63-
64-
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
65-
void *cpu_addr, dma_addr_t dma_addr, size_t size,
66-
unsigned long attrs)
67-
{
68-
int ret;
69-
70-
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
71-
return ret;
72-
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
73-
return ret;
74-
return -ENXIO;
75-
}
76-
77-
78-
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
79-
enum dma_data_direction dir)
16+
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
17+
enum dma_data_direction dir)
8018
{
8119
dmac_map_area(__va(paddr), size, dir);
8220

@@ -86,111 +24,15 @@ static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
8624
outer_clean_range(paddr, paddr + size);
8725
}
8826

89-
static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
90-
enum dma_data_direction dir)
27+
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
28+
enum dma_data_direction dir)
9129
{
9230
if (dir != DMA_TO_DEVICE) {
9331
outer_inv_range(paddr, paddr + size);
9432
dmac_unmap_area(__va(paddr), size, dir);
9533
}
9634
}
9735

98-
static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
99-
unsigned long offset, size_t size,
100-
enum dma_data_direction dir,
101-
unsigned long attrs)
102-
{
103-
dma_addr_t handle = page_to_phys(page) + offset;
104-
105-
__dma_page_cpu_to_dev(handle, size, dir);
106-
107-
return handle;
108-
}
109-
110-
static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
111-
size_t size, enum dma_data_direction dir,
112-
unsigned long attrs)
113-
{
114-
__dma_page_dev_to_cpu(handle, size, dir);
115-
}
116-
117-
118-
static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
119-
int nents, enum dma_data_direction dir,
120-
unsigned long attrs)
121-
{
122-
int i;
123-
struct scatterlist *sg;
124-
125-
for_each_sg(sgl, sg, nents, i) {
126-
sg_dma_address(sg) = sg_phys(sg);
127-
sg_dma_len(sg) = sg->length;
128-
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
129-
}
130-
131-
return nents;
132-
}
133-
134-
static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
135-
int nents, enum dma_data_direction dir,
136-
unsigned long attrs)
137-
{
138-
struct scatterlist *sg;
139-
int i;
140-
141-
for_each_sg(sgl, sg, nents, i)
142-
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
143-
}
144-
145-
static void arm_nommu_dma_sync_single_for_device(struct device *dev,
146-
dma_addr_t handle, size_t size, enum dma_data_direction dir)
147-
{
148-
__dma_page_cpu_to_dev(handle, size, dir);
149-
}
150-
151-
static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
152-
dma_addr_t handle, size_t size, enum dma_data_direction dir)
153-
{
154-
__dma_page_cpu_to_dev(handle, size, dir);
155-
}
156-
157-
static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
158-
int nents, enum dma_data_direction dir)
159-
{
160-
struct scatterlist *sg;
161-
int i;
162-
163-
for_each_sg(sgl, sg, nents, i)
164-
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
165-
}
166-
167-
static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
168-
int nents, enum dma_data_direction dir)
169-
{
170-
struct scatterlist *sg;
171-
int i;
172-
173-
for_each_sg(sgl, sg, nents, i)
174-
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
175-
}
176-
177-
const struct dma_map_ops arm_nommu_dma_ops = {
178-
.alloc = arm_nommu_dma_alloc,
179-
.free = arm_nommu_dma_free,
180-
.alloc_pages = dma_direct_alloc_pages,
181-
.free_pages = dma_direct_free_pages,
182-
.mmap = arm_nommu_dma_mmap,
183-
.map_page = arm_nommu_dma_map_page,
184-
.unmap_page = arm_nommu_dma_unmap_page,
185-
.map_sg = arm_nommu_dma_map_sg,
186-
.unmap_sg = arm_nommu_dma_unmap_sg,
187-
.sync_single_for_device = arm_nommu_dma_sync_single_for_device,
188-
.sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
189-
.sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
190-
.sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
191-
};
192-
EXPORT_SYMBOL(arm_nommu_dma_ops);
193-
19436
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
19537
const struct iommu_ops *iommu, bool coherent)
19638
{
@@ -201,14 +43,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
20143
* enough to check if MPU is in use or not since in absense of
20244
* MPU system memory map is used.
20345
*/
204-
dev->archdata.dma_coherent = (cacheid) ? coherent : true;
46+
dev->dma_coherent = cacheid ? coherent : true;
20547
} else {
20648
/*
20749
* Assume coherent DMA in case MMU/MPU has not been set up.
20850
*/
209-
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
51+
dev->dma_coherent = (get_cr() & CR_M) ? coherent : true;
21052
}
211-
212-
if (!dev->archdata.dma_coherent)
213-
set_dma_ops(dev, &arm_nommu_dma_ops);
21453
}

arch/arm/mm/dma-mapping.c

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -980,23 +980,25 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
980980
{
981981
const struct dma_map_ops *ops = get_dma_ops(dev);
982982
struct scatterlist *s;
983-
int i, j;
983+
int i, j, ret;
984984

985985
for_each_sg(sg, s, nents, i) {
986986
#ifdef CONFIG_NEED_SG_DMA_LENGTH
987987
s->dma_length = s->length;
988988
#endif
989989
s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
990990
s->length, dir, attrs);
991-
if (dma_mapping_error(dev, s->dma_address))
991+
if (dma_mapping_error(dev, s->dma_address)) {
992+
ret = -EIO;
992993
goto bad_mapping;
994+
}
993995
}
994996
return nents;
995997

996998
bad_mapping:
997999
for_each_sg(sg, s, i, j)
9981000
ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
999-
return 0;
1001+
return ret;
10001002
}
10011003

10021004
/**
@@ -1622,20 +1624,21 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
16221624
bool is_coherent)
16231625
{
16241626
struct scatterlist *s = sg, *dma = sg, *start = sg;
1625-
int i, count = 0;
1627+
int i, count = 0, ret;
16261628
unsigned int offset = s->offset;
16271629
unsigned int size = s->offset + s->length;
16281630
unsigned int max = dma_get_max_seg_size(dev);
16291631

16301632
for (i = 1; i < nents; i++) {
16311633
s = sg_next(s);
16321634

1633-
s->dma_address = DMA_MAPPING_ERROR;
16341635
s->dma_length = 0;
16351636

16361637
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1637-
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1638-
dir, attrs, is_coherent) < 0)
1638+
ret = __map_sg_chunk(dev, start, size,
1639+
&dma->dma_address, dir, attrs,
1640+
is_coherent);
1641+
if (ret < 0)
16391642
goto bad_mapping;
16401643

16411644
dma->dma_address += offset;
@@ -1648,8 +1651,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
16481651
}
16491652
size += s->length;
16501653
}
1651-
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1652-
is_coherent) < 0)
1654+
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1655+
is_coherent);
1656+
if (ret < 0)
16531657
goto bad_mapping;
16541658

16551659
dma->dma_address += offset;
@@ -1660,7 +1664,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
16601664
bad_mapping:
16611665
for_each_sg(sg, s, count, i)
16621666
__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1663-
return 0;
1667+
if (ret == -ENOMEM)
1668+
return ret;
1669+
return -EINVAL;
16641670
}
16651671

16661672
/**

arch/hexagon/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ config HEXAGON
77
select ARCH_32BIT_OFF_T
88
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
99
select ARCH_NO_PREEMPT
10+
select DMA_GLOBAL_POOL
1011
# Other pending projects/to-do items.
1112
# select HAVE_REGS_AND_STACK_ACCESS_API
1213
# select HAVE_HW_BREAKPOINT if PERF_EVENTS

0 commit comments

Comments
 (0)