|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES |
| 3 | + */ |
| 4 | +#include "iommufd_private.h" |
| 5 | + |
| 6 | +void iommufd_viommu_destroy(struct iommufd_object *obj) |
| 7 | +{ |
| 8 | + struct iommufd_viommu *viommu = |
| 9 | + container_of(obj, struct iommufd_viommu, obj); |
| 10 | + |
| 11 | + if (viommu->ops && viommu->ops->destroy) |
| 12 | + viommu->ops->destroy(viommu); |
| 13 | + refcount_dec(&viommu->hwpt->common.obj.users); |
| 14 | +} |
| 15 | + |
| 16 | +int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd) |
| 17 | +{ |
| 18 | + struct iommu_viommu_alloc *cmd = ucmd->cmd; |
| 19 | + struct iommufd_hwpt_paging *hwpt_paging; |
| 20 | + struct iommufd_viommu *viommu; |
| 21 | + struct iommufd_device *idev; |
| 22 | + const struct iommu_ops *ops; |
| 23 | + int rc; |
| 24 | + |
| 25 | + if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT) |
| 26 | + return -EOPNOTSUPP; |
| 27 | + |
| 28 | + idev = iommufd_get_device(ucmd, cmd->dev_id); |
| 29 | + if (IS_ERR(idev)) |
| 30 | + return PTR_ERR(idev); |
| 31 | + |
| 32 | + ops = dev_iommu_ops(idev->dev); |
| 33 | + if (!ops->viommu_alloc) { |
| 34 | + rc = -EOPNOTSUPP; |
| 35 | + goto out_put_idev; |
| 36 | + } |
| 37 | + |
| 38 | + hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id); |
| 39 | + if (IS_ERR(hwpt_paging)) { |
| 40 | + rc = PTR_ERR(hwpt_paging); |
| 41 | + goto out_put_idev; |
| 42 | + } |
| 43 | + |
| 44 | + if (!hwpt_paging->nest_parent) { |
| 45 | + rc = -EINVAL; |
| 46 | + goto out_put_hwpt; |
| 47 | + } |
| 48 | + |
| 49 | + viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain, |
| 50 | + ucmd->ictx, cmd->type); |
| 51 | + if (IS_ERR(viommu)) { |
| 52 | + rc = PTR_ERR(viommu); |
| 53 | + goto out_put_hwpt; |
| 54 | + } |
| 55 | + |
| 56 | + viommu->type = cmd->type; |
| 57 | + viommu->ictx = ucmd->ictx; |
| 58 | + viommu->hwpt = hwpt_paging; |
| 59 | + refcount_inc(&viommu->hwpt->common.obj.users); |
| 60 | + /* |
| 61 | + * It is the most likely case that a physical IOMMU is unpluggable. A |
| 62 | + * pluggable IOMMU instance (if exists) is responsible for refcounting |
| 63 | + * on its own. |
| 64 | + */ |
| 65 | + viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev); |
| 66 | + |
| 67 | + cmd->out_viommu_id = viommu->obj.id; |
| 68 | + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); |
| 69 | + if (rc) |
| 70 | + goto out_abort; |
| 71 | + iommufd_object_finalize(ucmd->ictx, &viommu->obj); |
| 72 | + goto out_put_hwpt; |
| 73 | + |
| 74 | +out_abort: |
| 75 | + iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj); |
| 76 | +out_put_hwpt: |
| 77 | + iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj); |
| 78 | +out_put_idev: |
| 79 | + iommufd_put_object(ucmd->ictx, &idev->obj); |
| 80 | + return rc; |
| 81 | +} |
0 commit comments