|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | +/* |
| 3 | + * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved |
| 4 | + */ |
| 5 | + |
| 6 | +#include <linux/dma-buf-mapping.h> |
| 7 | +#include <linux/pci-p2pdma.h> |
| 8 | +#include <linux/dma-resv.h> |
| 9 | +#include <rdma/uverbs_std_types.h> |
| 10 | +#include "rdma_core.h" |
| 11 | +#include "uverbs.h" |
| 12 | + |
| 13 | +static int uverbs_dmabuf_attach(struct dma_buf *dmabuf, |
| 14 | + struct dma_buf_attachment *attachment) |
| 15 | +{ |
| 16 | + if (!attachment->peer2peer) |
| 17 | + return -EOPNOTSUPP; |
| 18 | + |
| 19 | + return 0; |
| 20 | +} |
| 21 | + |
| 22 | +static struct sg_table * |
| 23 | +uverbs_dmabuf_map(struct dma_buf_attachment *attachment, |
| 24 | + enum dma_data_direction dir) |
| 25 | +{ |
| 26 | + struct ib_uverbs_dmabuf_file *priv = attachment->dmabuf->priv; |
| 27 | + struct sg_table *ret; |
| 28 | + |
| 29 | + dma_resv_assert_held(priv->dmabuf->resv); |
| 30 | + |
| 31 | + if (priv->revoked) |
| 32 | + return ERR_PTR(-ENODEV); |
| 33 | + |
| 34 | + ret = dma_buf_phys_vec_to_sgt(attachment, priv->provider, |
| 35 | + &priv->phys_vec, 1, priv->phys_vec.len, |
| 36 | + dir); |
| 37 | + if (IS_ERR(ret)) |
| 38 | + return ret; |
| 39 | + |
| 40 | + kref_get(&priv->kref); |
| 41 | + return ret; |
| 42 | +} |
| 43 | + |
| 44 | +static void uverbs_dmabuf_unmap(struct dma_buf_attachment *attachment, |
| 45 | + struct sg_table *sgt, |
| 46 | + enum dma_data_direction dir) |
| 47 | +{ |
| 48 | + struct ib_uverbs_dmabuf_file *priv = attachment->dmabuf->priv; |
| 49 | + |
| 50 | + dma_resv_assert_held(priv->dmabuf->resv); |
| 51 | + dma_buf_free_sgt(attachment, sgt, dir); |
| 52 | + kref_put(&priv->kref, ib_uverbs_dmabuf_done); |
| 53 | +} |
| 54 | + |
| 55 | +static int uverbs_dmabuf_pin(struct dma_buf_attachment *attach) |
| 56 | +{ |
| 57 | + return -EOPNOTSUPP; |
| 58 | +} |
| 59 | + |
| 60 | +static void uverbs_dmabuf_unpin(struct dma_buf_attachment *attach) |
| 61 | +{ |
| 62 | +} |
| 63 | + |
| 64 | +static void uverbs_dmabuf_release(struct dma_buf *dmabuf) |
| 65 | +{ |
| 66 | + struct ib_uverbs_dmabuf_file *priv = dmabuf->priv; |
| 67 | + |
| 68 | + /* |
| 69 | + * This can only happen if the fput came from alloc_abort_fd_uobject() |
| 70 | + */ |
| 71 | + if (!priv->uobj.context) |
| 72 | + return; |
| 73 | + |
| 74 | + uverbs_uobject_release(&priv->uobj); |
| 75 | +} |
| 76 | + |
| 77 | +static const struct dma_buf_ops uverbs_dmabuf_ops = { |
| 78 | + .attach = uverbs_dmabuf_attach, |
| 79 | + .map_dma_buf = uverbs_dmabuf_map, |
| 80 | + .unmap_dma_buf = uverbs_dmabuf_unmap, |
| 81 | + .pin = uverbs_dmabuf_pin, |
| 82 | + .unpin = uverbs_dmabuf_unpin, |
| 83 | + .release = uverbs_dmabuf_release, |
| 84 | +}; |
| 85 | + |
| 86 | +static int UVERBS_HANDLER(UVERBS_METHOD_DMABUF_ALLOC)( |
| 87 | + struct uverbs_attr_bundle *attrs) |
| 88 | +{ |
| 89 | + struct ib_uobject *uobj = |
| 90 | + uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DMABUF_HANDLE) |
| 91 | + ->obj_attr.uobject; |
| 92 | + struct ib_uverbs_dmabuf_file *uverbs_dmabuf = |
| 93 | + container_of(uobj, struct ib_uverbs_dmabuf_file, uobj); |
| 94 | + struct ib_device *ib_dev = attrs->context->device; |
| 95 | + struct rdma_user_mmap_entry *mmap_entry; |
| 96 | + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
| 97 | + off_t pg_off; |
| 98 | + int ret; |
| 99 | + |
| 100 | + ret = uverbs_get_const(&pg_off, attrs, UVERBS_ATTR_ALLOC_DMABUF_PGOFF); |
| 101 | + if (ret) |
| 102 | + return ret; |
| 103 | + |
| 104 | + mmap_entry = ib_dev->ops.pgoff_to_mmap_entry(attrs->context, pg_off); |
| 105 | + if (!mmap_entry) |
| 106 | + return -EINVAL; |
| 107 | + |
| 108 | + ret = ib_dev->ops.mmap_get_pfns(mmap_entry, &uverbs_dmabuf->phys_vec, |
| 109 | + &uverbs_dmabuf->provider); |
| 110 | + if (ret) |
| 111 | + goto err; |
| 112 | + |
| 113 | + exp_info.ops = &uverbs_dmabuf_ops; |
| 114 | + exp_info.size = uverbs_dmabuf->phys_vec.len; |
| 115 | + exp_info.flags = O_CLOEXEC; |
| 116 | + exp_info.priv = uverbs_dmabuf; |
| 117 | + |
| 118 | + uverbs_dmabuf->dmabuf = dma_buf_export(&exp_info); |
| 119 | + if (IS_ERR(uverbs_dmabuf->dmabuf)) { |
| 120 | + ret = PTR_ERR(uverbs_dmabuf->dmabuf); |
| 121 | + goto err; |
| 122 | + } |
| 123 | + |
| 124 | + kref_init(&uverbs_dmabuf->kref); |
| 125 | + init_completion(&uverbs_dmabuf->comp); |
| 126 | + INIT_LIST_HEAD(&uverbs_dmabuf->dmabufs_elm); |
| 127 | + mutex_lock(&mmap_entry->dmabufs_lock); |
| 128 | + if (mmap_entry->driver_removed) |
| 129 | + ret = -EIO; |
| 130 | + else |
| 131 | + list_add_tail(&uverbs_dmabuf->dmabufs_elm, &mmap_entry->dmabufs); |
| 132 | + mutex_unlock(&mmap_entry->dmabufs_lock); |
| 133 | + if (ret) |
| 134 | + goto err_revoked; |
| 135 | + |
| 136 | + uobj->object = uverbs_dmabuf->dmabuf->file; |
| 137 | + uverbs_dmabuf->mmap_entry = mmap_entry; |
| 138 | + uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_ALLOC_DMABUF_HANDLE); |
| 139 | + return 0; |
| 140 | + |
| 141 | +err_revoked: |
| 142 | + dma_buf_put(uverbs_dmabuf->dmabuf); |
| 143 | +err: |
| 144 | + rdma_user_mmap_entry_put(mmap_entry); |
| 145 | + return ret; |
| 146 | +} |
| 147 | + |
| 148 | +DECLARE_UVERBS_NAMED_METHOD( |
| 149 | + UVERBS_METHOD_DMABUF_ALLOC, |
| 150 | + UVERBS_ATTR_FD(UVERBS_ATTR_ALLOC_DMABUF_HANDLE, |
| 151 | + UVERBS_OBJECT_DMABUF, |
| 152 | + UVERBS_ACCESS_NEW, |
| 153 | + UA_MANDATORY), |
| 154 | + UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DMABUF_PGOFF, |
| 155 | + UVERBS_ATTR_TYPE(u64), |
| 156 | + UA_MANDATORY)); |
| 157 | + |
| 158 | +static void uverbs_dmabuf_fd_destroy_uobj(struct ib_uobject *uobj, |
| 159 | + enum rdma_remove_reason why) |
| 160 | +{ |
| 161 | + struct ib_uverbs_dmabuf_file *uverbs_dmabuf = |
| 162 | + container_of(uobj, struct ib_uverbs_dmabuf_file, uobj); |
| 163 | + bool wait_for_comp = false; |
| 164 | + |
| 165 | + mutex_lock(&uverbs_dmabuf->mmap_entry->dmabufs_lock); |
| 166 | + dma_resv_lock(uverbs_dmabuf->dmabuf->resv, NULL); |
| 167 | + if (!uverbs_dmabuf->revoked) { |
| 168 | + uverbs_dmabuf->revoked = true; |
| 169 | + list_del(&uverbs_dmabuf->dmabufs_elm); |
| 170 | + dma_buf_move_notify(uverbs_dmabuf->dmabuf); |
| 171 | + dma_resv_wait_timeout(uverbs_dmabuf->dmabuf->resv, |
| 172 | + DMA_RESV_USAGE_BOOKKEEP, false, |
| 173 | + MAX_SCHEDULE_TIMEOUT); |
| 174 | + wait_for_comp = true; |
| 175 | + } |
| 176 | + dma_resv_unlock(uverbs_dmabuf->dmabuf->resv); |
| 177 | + if (wait_for_comp) { |
| 178 | + kref_put(&uverbs_dmabuf->kref, ib_uverbs_dmabuf_done); |
| 179 | + /* Let's wait till all DMA unmap are completed. */ |
| 180 | + wait_for_completion(&uverbs_dmabuf->comp); |
| 181 | + } |
| 182 | + mutex_unlock(&uverbs_dmabuf->mmap_entry->dmabufs_lock); |
| 183 | + |
| 184 | + /* Matches the get done as part of pgoff_to_mmap_entry() */ |
| 185 | + rdma_user_mmap_entry_put(uverbs_dmabuf->mmap_entry); |
| 186 | +} |
| 187 | + |
| 188 | +DECLARE_UVERBS_NAMED_OBJECT( |
| 189 | + UVERBS_OBJECT_DMABUF, |
| 190 | + UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_dmabuf_file), |
| 191 | + uverbs_dmabuf_fd_destroy_uobj, |
| 192 | + NULL, NULL, O_RDONLY), |
| 193 | + &UVERBS_METHOD(UVERBS_METHOD_DMABUF_ALLOC)); |
| 194 | + |
| 195 | +const struct uapi_definition uverbs_def_obj_dmabuf[] = { |
| 196 | + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DMABUF), |
| 197 | + UAPI_DEF_OBJ_NEEDS_FN(mmap_get_pfns), |
| 198 | + UAPI_DEF_OBJ_NEEDS_FN(pgoff_to_mmap_entry), |
| 199 | + {} |
| 200 | +}; |
0 commit comments