Skip to content

Commit 40ac971

Browse files
Roman SkakunChristoph Hellwig
authored andcommitted
dma-mapping: handle vmalloc addresses in dma_common_{mmap,get_sgtable}
xen-swiotlb can use vmalloc backed addresses for dma coherent allocations and uses the common helpers. Properly handle them to unbreak Xen on ARM platforms. Fixes: 1b65c4e ("swiotlb-xen: use xen_alloc/free_coherent_pages") Signed-off-by: Roman Skakun <roman_skakun@epam.com> Reviewed-by: Andrii Anisov <andrii_anisov@epam.com> [hch: split the patch, renamed the helpers] Signed-off-by: Christoph Hellwig <hch@lst.de>
1 parent d936eb2 commit 40ac971

1 file changed

Lines changed: 10 additions & 2 deletions

File tree

kernel/dma/ops_helpers.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,21 @@
55
*/
66
#include <linux/dma-map-ops.h>
77

8+
static struct page *dma_common_vaddr_to_page(void *cpu_addr)
9+
{
10+
if (is_vmalloc_addr(cpu_addr))
11+
return vmalloc_to_page(cpu_addr);
12+
return virt_to_page(cpu_addr);
13+
}
14+
815
/*
916
* Create scatter-list for the already allocated DMA buffer.
1017
*/
1118
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
1219
void *cpu_addr, dma_addr_t dma_addr, size_t size,
1320
unsigned long attrs)
1421
{
15-
struct page *page = virt_to_page(cpu_addr);
22+
struct page *page = dma_common_vaddr_to_page(cpu_addr);
1623
int ret;
1724

1825
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
3239
unsigned long user_count = vma_pages(vma);
3340
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
3441
unsigned long off = vma->vm_pgoff;
42+
struct page *page = dma_common_vaddr_to_page(cpu_addr);
3543
int ret = -ENXIO;
3644

3745
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
4351
return -ENXIO;
4452

4553
return remap_pfn_range(vma, vma->vm_start,
46-
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
54+
page_to_pfn(page) + vma->vm_pgoff,
4755
user_count << PAGE_SHIFT, vma->vm_page_prot);
4856
#else
4957
return -ENXIO;

0 commit comments

Comments
 (0)