Skip to content

Commit 2089691

Browse files
nicolincjgunthorpe
authored andcommitted
iommufd/selftest: Add coverage for IOMMUFD_CMD_HW_QUEUE_ALLOC
Some simple tests for IOMMUFD_CMD_HW_QUEUE_ALLOC infrastructure covering the new iommufd_hw_queue_depend/undepend() helpers. Link: https://patch.msgid.link/r/e8a194d187d7ef445f43e4a3c04fb39472050afd.1752126748.git.nicolinc@nvidia.com Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Pranjal Shrivastava <praan@google.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent 0b37d89 commit 2089691

5 files changed

Lines changed: 196 additions & 0 deletions

File tree

drivers/iommu/iommufd/iommufd_test.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,4 +265,7 @@ struct iommu_viommu_event_selftest {
265265
__u32 virt_id;
266266
};
267267

268+
#define IOMMU_HW_QUEUE_TYPE_SELFTEST 0xdeadbeef
269+
#define IOMMU_TEST_HW_QUEUE_MAX 2
270+
268271
#endif

drivers/iommu/iommufd/selftest.c

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,13 +150,28 @@ to_mock_nested(struct iommu_domain *domain)
150150
struct mock_viommu {
151151
struct iommufd_viommu core;
152152
struct mock_iommu_domain *s2_parent;
153+
struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
154+
struct mutex queue_mutex;
153155
};
154156

155157
static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
156158
{
157159
return container_of(viommu, struct mock_viommu, core);
158160
}
159161

162+
struct mock_hw_queue {
163+
struct iommufd_hw_queue core;
164+
struct mock_viommu *mock_viommu;
165+
struct mock_hw_queue *prev;
166+
u16 index;
167+
};
168+
169+
static inline struct mock_hw_queue *
170+
to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
171+
{
172+
return container_of(hw_queue, struct mock_hw_queue, core);
173+
}
174+
160175
enum selftest_obj_type {
161176
TYPE_IDEV,
162177
};
@@ -670,9 +685,11 @@ static void mock_viommu_destroy(struct iommufd_viommu *viommu)
670685
{
671686
struct mock_iommu_device *mock_iommu = container_of(
672687
viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
688+
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
673689

674690
if (refcount_dec_and_test(&mock_iommu->users))
675691
complete(&mock_iommu->complete);
692+
mutex_destroy(&mock_viommu->queue_mutex);
676693

677694
/* iommufd core frees mock_viommu and viommu */
678695
}
@@ -764,10 +781,86 @@ static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
764781
return rc;
765782
}
766783

784+
static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
785+
enum iommu_hw_queue_type queue_type)
786+
{
787+
if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
788+
return 0;
789+
return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
790+
}
791+
792+
static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
793+
{
794+
struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
795+
struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
796+
797+
mutex_lock(&mock_viommu->queue_mutex);
798+
mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
799+
if (mock_hw_queue->prev)
800+
iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
801+
core);
802+
mutex_unlock(&mock_viommu->queue_mutex);
803+
}
804+
805+
/* Test iommufd_hw_queue_depend/undepend() */
806+
static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
807+
phys_addr_t base_addr_pa)
808+
{
809+
struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
810+
struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
811+
struct mock_hw_queue *prev = NULL;
812+
int rc = 0;
813+
814+
if (index >= IOMMU_TEST_HW_QUEUE_MAX)
815+
return -EINVAL;
816+
817+
mutex_lock(&mock_viommu->queue_mutex);
818+
819+
if (mock_viommu->hw_queue[index]) {
820+
rc = -EEXIST;
821+
goto unlock;
822+
}
823+
824+
if (index) {
825+
prev = mock_viommu->hw_queue[index - 1];
826+
if (!prev) {
827+
rc = -EIO;
828+
goto unlock;
829+
}
830+
}
831+
832+
/*
833+
* Test to catch a kernel bug if the core converted the physical address
834+
* incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
835+
*/
836+
if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
837+
hw_queue->base_addr)) {
838+
rc = -EFAULT;
839+
goto unlock;
840+
}
841+
842+
if (prev) {
843+
rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
844+
if (rc)
845+
goto unlock;
846+
}
847+
848+
mock_hw_queue->prev = prev;
849+
mock_hw_queue->mock_viommu = mock_viommu;
850+
mock_viommu->hw_queue[index] = mock_hw_queue;
851+
852+
hw_queue->destroy = &mock_hw_queue_destroy;
853+
unlock:
854+
mutex_unlock(&mock_viommu->queue_mutex);
855+
return rc;
856+
}
857+
767858
static struct iommufd_viommu_ops mock_viommu_ops = {
768859
.destroy = mock_viommu_destroy,
769860
.alloc_domain_nested = mock_viommu_alloc_domain_nested,
770861
.cache_invalidate = mock_viommu_cache_invalidate,
862+
.get_hw_queue_size = mock_viommu_get_hw_queue_size,
863+
.hw_queue_init_phys = mock_hw_queue_init_phys,
771864
};
772865

773866
static size_t mock_get_viommu_size(struct device *dev,
@@ -784,6 +877,7 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
784877
{
785878
struct mock_iommu_device *mock_iommu = container_of(
786879
viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
880+
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
787881
struct iommu_viommu_selftest data;
788882
int rc;
789883

@@ -801,6 +895,9 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
801895
}
802896

803897
refcount_inc(&mock_iommu->users);
898+
mutex_init(&mock_viommu->queue_mutex);
899+
mock_viommu->s2_parent = to_mock_domain(parent_domain);
900+
804901
viommu->ops = &mock_viommu_ops;
805902
return 0;
806903
}

tools/testing/selftests/iommu/iommufd.c

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3032,6 +3032,65 @@ TEST_F(iommufd_viommu, vdevice_cache)
30323032
}
30333033
}
30343034

3035+
TEST_F(iommufd_viommu, hw_queue)
3036+
{
3037+
__u64 iova = MOCK_APERTURE_START, iova2;
3038+
uint32_t viommu_id = self->viommu_id;
3039+
uint32_t hw_queue_id[2];
3040+
3041+
if (!viommu_id)
3042+
SKIP(return, "Skipping test for variant no_viommu");
3043+
3044+
/* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
3045+
test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
3046+
IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
3047+
&hw_queue_id[0]);
3048+
/* Fail queue addr and length */
3049+
test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3050+
0, iova, 0, &hw_queue_id[0]);
3051+
test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
3052+
IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
3053+
PAGE_SIZE, &hw_queue_id[0]);
3054+
/* Fail missing iova */
3055+
test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3056+
0, iova, PAGE_SIZE, &hw_queue_id[0]);
3057+
3058+
/* Map iova */
3059+
test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
3060+
test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
3061+
3062+
/* Fail index=1 and =MAX; must start from index=0 */
3063+
test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3064+
iova, PAGE_SIZE, &hw_queue_id[0]);
3065+
test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3066+
IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
3067+
&hw_queue_id[0]);
3068+
3069+
/* Allocate index=0, declare ownership of the iova */
3070+
test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
3071+
iova, PAGE_SIZE, &hw_queue_id[0]);
3072+
/* Fail duplicated index */
3073+
test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
3074+
0, iova, PAGE_SIZE, &hw_queue_id[0]);
3075+
/* Fail unmap, due to iova ownership */
3076+
test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
3077+
/* The 2nd page is not pinned, so it can be unmmap */
3078+
test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
3079+
3080+
/* Allocate index=1, with an unaligned case */
3081+
test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
3082+
iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
3083+
&hw_queue_id[1]);
3084+
/* Fail to destroy, due to dependency */
3085+
EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
3086+
3087+
/* Destroy in descending order */
3088+
test_ioctl_destroy(hw_queue_id[1]);
3089+
test_ioctl_destroy(hw_queue_id[0]);
3090+
/* Now it can unmap the first page */
3091+
test_ioctl_ioas_unmap(iova, PAGE_SIZE);
3092+
}
3093+
30353094
FIXTURE(iommufd_device_pasid)
30363095
{
30373096
int fd;

tools/testing/selftests/iommu/iommufd_fail_nth.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -634,6 +634,7 @@ TEST_FAIL_NTH(basic_fail_nth, device)
634634
uint32_t idev_id;
635635
uint32_t hwpt_id;
636636
uint32_t viommu_id;
637+
uint32_t hw_queue_id;
637638
uint32_t vdev_id;
638639
__u64 iova;
639640

@@ -696,6 +697,11 @@ TEST_FAIL_NTH(basic_fail_nth, device)
696697
if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
697698
return -1;
698699

700+
if (_test_cmd_hw_queue_alloc(self->fd, viommu_id,
701+
IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, iova,
702+
PAGE_SIZE, &hw_queue_id))
703+
return -1;
704+
699705
if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
700706
return -1;
701707
close(fault_fd);

tools/testing/selftests/iommu/iommufd_utils.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -956,6 +956,37 @@ static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
956956
_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
957957
virt_id, vdev_id))
958958

959+
static int _test_cmd_hw_queue_alloc(int fd, __u32 viommu_id, __u32 type,
960+
__u32 idx, __u64 base_addr, __u64 length,
961+
__u32 *hw_queue_id)
962+
{
963+
struct iommu_hw_queue_alloc cmd = {
964+
.size = sizeof(cmd),
965+
.viommu_id = viommu_id,
966+
.type = type,
967+
.index = idx,
968+
.nesting_parent_iova = base_addr,
969+
.length = length,
970+
};
971+
int ret;
972+
973+
ret = ioctl(fd, IOMMU_HW_QUEUE_ALLOC, &cmd);
974+
if (ret)
975+
return ret;
976+
if (hw_queue_id)
977+
*hw_queue_id = cmd.out_hw_queue_id;
978+
return 0;
979+
}
980+
981+
#define test_cmd_hw_queue_alloc(viommu_id, type, idx, base_addr, len, out_qid) \
982+
ASSERT_EQ(0, _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
983+
base_addr, len, out_qid))
984+
#define test_err_hw_queue_alloc(_errno, viommu_id, type, idx, base_addr, len, \
985+
out_qid) \
986+
EXPECT_ERRNO(_errno, \
987+
_test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
988+
base_addr, len, out_qid))
989+
959990
static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
960991
__u32 *veventq_id, __u32 *veventq_fd)
961992
{

0 commit comments

Comments
 (0)