Skip to content

Commit 56e9a0d

Browse files
nicolincjgunthorpe
authored andcommitted
iommufd: Add mmap interface
For vIOMMU passing through HW resources to user space (VMs), allowing a VM to control the passed through HW directly by accessing hardware registers, add an mmap infrastructure to map the physical MMIO pages to user space. Maintain a maple tree per ictx as a translation table managing mmappable regions, from an allocated for-user mmap offset to an iommufd_mmap struct, where it stores the real physical address range for io_remap_pfn_range(). Keep track of the lifecycle of the mmappable region by taking refcount of its owner, so as to enforce user space to unmap the region first before it can destroy its owner object. To allow an IOMMU driver to add and delete mmappable regions onto/from the maple tree, add iommufd_viommu_alloc/destroy_mmap helpers. Link: https://patch.msgid.link/r/9a888a326b12aa5fe940083eae1156304e210fe0.1752126748.git.nicolinc@nvidia.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Pranjal Shrivastava <praan@google.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent 2089691 commit 56e9a0d

4 files changed

Lines changed: 171 additions & 0 deletions

File tree

drivers/iommu/iommufd/driver.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,58 @@ void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
3131
}
3232
EXPORT_SYMBOL_NS_GPL(_iommufd_object_undepend, "IOMMUFD");
3333

34+
/*
35+
* Allocate an @offset to return to user space to use for an mmap() syscall
36+
*
37+
* Driver should use a per-structure helper in include/linux/iommufd.h
38+
*/
39+
int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
40+
phys_addr_t mmio_addr, size_t length,
41+
unsigned long *offset)
42+
{
43+
struct iommufd_mmap *immap;
44+
unsigned long startp;
45+
int rc;
46+
47+
if (!PAGE_ALIGNED(mmio_addr))
48+
return -EINVAL;
49+
if (!length || !PAGE_ALIGNED(length))
50+
return -EINVAL;
51+
52+
immap = kzalloc(sizeof(*immap), GFP_KERNEL);
53+
if (!immap)
54+
return -ENOMEM;
55+
immap->owner = owner;
56+
immap->length = length;
57+
immap->mmio_addr = mmio_addr;
58+
59+
/* Skip the first page to ease caller identifying the returned offset */
60+
rc = mtree_alloc_range(&ictx->mt_mmap, &startp, immap, immap->length,
61+
PAGE_SIZE, ULONG_MAX, GFP_KERNEL);
62+
if (rc < 0) {
63+
kfree(immap);
64+
return rc;
65+
}
66+
67+
/* mmap() syscall will right-shift the offset in vma->vm_pgoff too */
68+
immap->vm_pgoff = startp >> PAGE_SHIFT;
69+
*offset = startp;
70+
return 0;
71+
}
72+
EXPORT_SYMBOL_NS_GPL(_iommufd_alloc_mmap, "IOMMUFD");
73+
74+
/* Driver should use a per-structure helper in include/linux/iommufd.h */
75+
void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
76+
struct iommufd_object *owner, unsigned long offset)
77+
{
78+
struct iommufd_mmap *immap;
79+
80+
immap = mtree_erase(&ictx->mt_mmap, offset);
81+
WARN_ON_ONCE(!immap || immap->owner != owner);
82+
kfree(immap);
83+
}
84+
EXPORT_SYMBOL_NS_GPL(_iommufd_destroy_mmap, "IOMMUFD");
85+
3486
/* Caller should xa_lock(&viommu->vdevs) to protect the return value */
3587
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
3688
unsigned long vdev_id)

drivers/iommu/iommufd/iommufd_private.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <linux/iommu.h>
88
#include <linux/iommufd.h>
99
#include <linux/iova_bitmap.h>
10+
#include <linux/maple_tree.h>
1011
#include <linux/rwsem.h>
1112
#include <linux/uaccess.h>
1213
#include <linux/xarray.h>
@@ -44,6 +45,7 @@ struct iommufd_ctx {
4445
struct xarray groups;
4546
wait_queue_head_t destroy_wait;
4647
struct rw_semaphore ioas_creation_lock;
48+
struct maple_tree mt_mmap;
4749

4850
struct mutex sw_msi_lock;
4951
struct list_head sw_msi_list;
@@ -55,6 +57,18 @@ struct iommufd_ctx {
5557
struct iommufd_ioas *vfio_ioas;
5658
};
5759

60+
/* Entry for iommufd_ctx::mt_mmap */
61+
struct iommufd_mmap {
62+
struct iommufd_object *owner;
63+
64+
/* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
65+
unsigned long vm_pgoff;
66+
67+
/* Physical range for io_remap_pfn_range() */
68+
phys_addr_t mmio_addr;
69+
size_t length;
70+
};
71+
5872
/*
5973
* The IOVA to PFN map. The map automatically copies the PFNs into multiple
6074
* domains and permits sharing of PFNs between io_pagetable instances. This

drivers/iommu/iommufd/main.c

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
275275
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
276276
xa_init(&ictx->groups);
277277
ictx->file = filp;
278+
mt_init_flags(&ictx->mt_mmap, MT_FLAGS_ALLOC_RANGE);
278279
init_waitqueue_head(&ictx->destroy_wait);
279280
mutex_init(&ictx->sw_msi_lock);
280281
INIT_LIST_HEAD(&ictx->sw_msi_list);
@@ -479,11 +480,73 @@ static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd,
479480
return ret;
480481
}
481482

483+
static void iommufd_fops_vma_open(struct vm_area_struct *vma)
484+
{
485+
struct iommufd_mmap *immap = vma->vm_private_data;
486+
487+
refcount_inc(&immap->owner->users);
488+
}
489+
490+
static void iommufd_fops_vma_close(struct vm_area_struct *vma)
491+
{
492+
struct iommufd_mmap *immap = vma->vm_private_data;
493+
494+
refcount_dec(&immap->owner->users);
495+
}
496+
497+
static const struct vm_operations_struct iommufd_vma_ops = {
498+
.open = iommufd_fops_vma_open,
499+
.close = iommufd_fops_vma_close,
500+
};
501+
502+
/* The vm_pgoff must be pre-allocated from mt_mmap, and given to user space */
503+
static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
504+
{
505+
struct iommufd_ctx *ictx = filp->private_data;
506+
size_t length = vma->vm_end - vma->vm_start;
507+
struct iommufd_mmap *immap;
508+
int rc;
509+
510+
if (!PAGE_ALIGNED(length))
511+
return -EINVAL;
512+
if (!(vma->vm_flags & VM_SHARED))
513+
return -EINVAL;
514+
if (vma->vm_flags & VM_EXEC)
515+
return -EPERM;
516+
517+
/* vma->vm_pgoff carries a page-shifted start position to an immap */
518+
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
519+
if (!immap)
520+
return -ENXIO;
521+
/*
522+
* mtree_load() returns the immap for any contained mmio_addr, so only
523+
* allow the exact immap thing to be mapped
524+
*/
525+
if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
526+
return -ENXIO;
527+
528+
vma->vm_pgoff = 0;
529+
vma->vm_private_data = immap;
530+
vma->vm_ops = &iommufd_vma_ops;
531+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
532+
533+
rc = io_remap_pfn_range(vma, vma->vm_start,
534+
immap->mmio_addr >> PAGE_SHIFT, length,
535+
vma->vm_page_prot);
536+
if (rc)
537+
return rc;
538+
539+
/* vm_ops.open won't be called for mmap itself. */
540+
refcount_inc(&immap->owner->users);
541+
return rc;
542+
}
543+
482544
static const struct file_operations iommufd_fops = {
483545
.owner = THIS_MODULE,
484546
.open = iommufd_fops_open,
485547
.release = iommufd_fops_release,
486548
.unlocked_ioctl = iommufd_fops_ioctl,
549+
.mmap = iommufd_fops_mmap,
487550
};
488551

489552
/**

include/linux/iommufd.h

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,11 @@ int _iommufd_object_depend(struct iommufd_object *obj_dependent,
255255
struct iommufd_object *obj_depended);
256256
void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
257257
struct iommufd_object *obj_depended);
258+
int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
259+
phys_addr_t mmio_addr, size_t length,
260+
unsigned long *offset);
261+
void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
262+
struct iommufd_object *owner, unsigned long offset);
258263
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
259264
unsigned long vdev_id);
260265
int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
@@ -275,6 +280,20 @@ _iommufd_object_undepend(struct iommufd_object *obj_dependent,
275280
{
276281
}
277282

283+
static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
284+
struct iommufd_object *owner,
285+
phys_addr_t mmio_addr, size_t length,
286+
unsigned long *offset)
287+
{
288+
return -EOPNOTSUPP;
289+
}
290+
291+
static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
292+
struct iommufd_object *owner,
293+
unsigned long offset)
294+
{
295+
}
296+
278297
static inline struct device *
279298
iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
280299
{
@@ -342,4 +361,27 @@ static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
342361
_iommufd_object_undepend(&dependent->member.obj, \
343362
&depended->member.obj); \
344363
})
364+
365+
/*
366+
* Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
367+
*
368+
* To support an mmappable MMIO region, kernel driver must first register it to
369+
* iommufd core to allocate an @offset, during a driver-structure initialization
370+
* (e.g. viommu_init op). Then, it should report to user space this @offset and
371+
* the @length of the MMIO region for mmap syscall.
372+
*/
373+
static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
374+
phys_addr_t mmio_addr,
375+
size_t length,
376+
unsigned long *offset)
377+
{
378+
return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
379+
length, offset);
380+
}
381+
382+
static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu,
383+
unsigned long offset)
384+
{
385+
_iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset);
386+
}
345387
#endif

0 commit comments

Comments
 (0)