Skip to content

Commit db892a9

Browse files
committed
accel/ivpu: Improve debug and warning messages
Add IOCTL debug bit for logging user provided parameter validation errors. Refactor several warning and error messages to better reflect fault reason. User generated faults should not flood kernel messages with warnings or errors, so change those to ivpu_dbg(). Add additional debug logs for parameter validation in IOCTLs. Check size provided by in metric streamer start and return -EINVAL together with a debug message print. Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com> Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com> Link: https://patch.msgid.link/20251104132418.970784-1-karol.wachowski@linux.intel.com
1 parent e568dc3 commit db892a9

6 files changed

Lines changed: 120 additions & 58 deletions

File tree

drivers/accel/ivpu/ivpu_drv.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@
7979
#define IVPU_DBG_KREF BIT(11)
8080
#define IVPU_DBG_RPM BIT(12)
8181
#define IVPU_DBG_MMU_MAP BIT(13)
82+
#define IVPU_DBG_IOCTL BIT(14)
8283

8384
#define ivpu_err(vdev, fmt, ...) \
8485
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)

drivers/accel/ivpu/ivpu_gem.c

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -128,8 +128,6 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
128128
bo->ctx_id = ctx->id;
129129
bo->vpu_addr = bo->mm_node.start;
130130
ivpu_dbg_bo(vdev, bo, "vaddr");
131-
} else {
132-
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
133131
}
134132

135133
ivpu_bo_unlock(bo);
@@ -289,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
289287
struct ivpu_addr_range *range;
290288

291289
if (bo->ctx) {
292-
ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
293-
file_priv->ctx.id, bo->ctx->id);
290+
ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n",
291+
bo, file_priv->ctx.id, bo->ctx->id);
294292
return -EALREADY;
295293
}
296294

@@ -357,15 +355,19 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
357355
struct ivpu_bo *bo;
358356
int ret;
359357

360-
if (args->flags & ~DRM_IVPU_BO_FLAGS)
358+
if (args->flags & ~DRM_IVPU_BO_FLAGS) {
359+
ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags);
361360
return -EINVAL;
361+
}
362362

363-
if (size == 0)
363+
if (size == 0) {
364+
ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size);
364365
return -EINVAL;
366+
}
365367

366368
bo = ivpu_bo_alloc(vdev, size, args->flags);
367369
if (IS_ERR(bo)) {
368-
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
370+
ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n",
369371
bo, file_priv->ctx.id, args->size, args->flags);
370372
return PTR_ERR(bo);
371373
}
@@ -374,7 +376,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
374376

375377
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
376378
if (ret) {
377-
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
379+
ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
378380
bo, file_priv->ctx.id, args->size, args->flags);
379381
} else {
380382
args->vpu_addr = bo->vpu_addr;
@@ -403,14 +405,17 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
403405

404406
bo = ivpu_bo_alloc(vdev, size, flags);
405407
if (IS_ERR(bo)) {
406-
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
408+
ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n",
407409
bo, range->start, size, flags);
408410
return NULL;
409411
}
410412

411413
ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
412-
if (ret)
414+
if (ret) {
415+
ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n",
416+
bo, ctx->id, size, ret);
413417
goto err_put;
418+
}
414419

415420
ret = ivpu_bo_bind(bo);
416421
if (ret)

drivers/accel/ivpu/ivpu_gem_userptr.c

Lines changed: 20 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -84,12 +84,12 @@ ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
8484
pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages);
8585
if (pinned < 0) {
8686
ret = pinned;
87-
ivpu_warn(vdev, "Failed to pin user pages: %d\n", ret);
87+
ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret);
8888
goto free_pages_array;
8989
}
9090

9191
if (pinned != nr_pages) {
92-
ivpu_warn(vdev, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
92+
ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
9393
ret = -EFAULT;
9494
goto unpin_pages;
9595
}
@@ -102,7 +102,7 @@ ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
102102

103103
ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL);
104104
if (ret) {
105-
ivpu_warn(vdev, "Failed to create sg table: %d\n", ret);
105+
ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret);
106106
goto free_sgt;
107107
}
108108

@@ -116,7 +116,7 @@ ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
116116
dma_buf = dma_buf_export(&exp_info);
117117
if (IS_ERR(dma_buf)) {
118118
ret = PTR_ERR(dma_buf);
119-
ivpu_warn(vdev, "Failed to export userptr dma-buf: %d\n", ret);
119+
ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret);
120120
goto free_sg_table;
121121
}
122122

@@ -170,25 +170,36 @@ int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct
170170
struct ivpu_bo *bo;
171171
int ret;
172172

173-
if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY))
173+
if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) {
174+
ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags);
174175
return -EINVAL;
176+
}
175177

176-
if (!args->user_ptr || !args->size)
178+
if (!args->user_ptr || !args->size) {
179+
ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n",
180+
args->user_ptr, args->size);
177181
return -EINVAL;
182+
}
178183

179-
if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size))
184+
if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) {
185+
ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n",
186+
args->user_ptr, args->size);
180187
return -EINVAL;
188+
}
181189

182-
if (!access_ok(user_ptr, args->size))
190+
if (!access_ok(user_ptr, args->size)) {
191+
ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n",
192+
args->user_ptr, args->size);
183193
return -EFAULT;
194+
}
184195

185196
bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags);
186197
if (IS_ERR(bo))
187198
return PTR_ERR(bo);
188199

189200
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
190201
if (ret) {
191-
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
202+
ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
192203
bo, file_priv->ctx.id, args->size, args->flags);
193204
} else {
194205
ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",

drivers/accel/ivpu/ivpu_job.c

Lines changed: 65 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32
348348

349349
cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
350350
if (!cmdq) {
351-
ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
351+
ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id);
352352
return NULL;
353353
}
354354

@@ -534,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
534534
job->bo_count = bo_count;
535535
job->done_fence = ivpu_fence_create(vdev);
536536
if (!job->done_fence) {
537-
ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
537+
ivpu_err(vdev, "Failed to create a fence\n");
538538
goto err_free_job;
539539
}
540540

@@ -687,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
687687
else
688688
cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
689689
if (!cmdq) {
690-
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
691690
ret = -EINVAL;
692691
goto err_unlock;
693692
}
@@ -771,8 +770,11 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
771770
for (i = 0; i < buf_count; i++) {
772771
struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
773772

774-
if (!obj)
773+
if (!obj) {
774+
ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n",
775+
buf_handles[i]);
775776
return -ENOENT;
777+
}
776778

777779
job->bos[i] = to_ivpu_bo(obj);
778780

@@ -783,12 +785,13 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
783785

784786
bo = job->bos[CMD_BUF_IDX];
785787
if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
786-
ivpu_warn(vdev, "Buffer is already in use\n");
788+
ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n");
787789
return -EBUSY;
788790
}
789791

790792
if (commands_offset >= ivpu_bo_size(bo)) {
791-
ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
793+
ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n",
794+
commands_offset, ivpu_bo_size(bo));
792795
return -EINVAL;
793796
}
794797

@@ -798,11 +801,11 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
798801
struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
799802

800803
if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
801-
ivpu_warn(vdev, "Preemption buffer is too small\n");
804+
ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n");
802805
return -EINVAL;
803806
}
804807
if (ivpu_bo_is_mappable(preempt_bo)) {
805-
ivpu_warn(vdev, "Preemption buffer cannot be mappable\n");
808+
ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n");
806809
return -EINVAL;
807810
}
808811
job->primary_preempt_buf = preempt_bo;
@@ -811,14 +814,14 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
811814
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
812815
&acquire_ctx);
813816
if (ret) {
814-
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
817+
ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret);
815818
return ret;
816819
}
817820

818821
for (i = 0; i < buf_count; i++) {
819822
ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
820823
if (ret) {
821-
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
824+
ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret);
822825
goto unlock_reservations;
823826
}
824827
}
@@ -865,17 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
865868

866869
job = ivpu_job_create(file_priv, engine, buffer_count);
867870
if (!job) {
868-
ivpu_err(vdev, "Failed to create job\n");
869871
ret = -ENOMEM;
870872
goto err_exit_dev;
871873
}
872874

873875
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
874876
preempt_buffer_index);
875-
if (ret) {
876-
ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
877+
if (ret)
877878
goto err_destroy_job;
878-
}
879879

880880
down_read(&vdev->pm->reset_lock);
881881
ret = ivpu_job_submit(job, priority, cmdq_id);
@@ -901,26 +901,39 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
901901
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
902902
{
903903
struct ivpu_file_priv *file_priv = file->driver_priv;
904+
struct ivpu_device *vdev = file_priv->vdev;
904905
struct drm_ivpu_submit *args = data;
905906
u8 priority;
906907

907-
if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
908+
if (args->engine != DRM_IVPU_ENGINE_COMPUTE) {
909+
ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine);
908910
return -EINVAL;
911+
}
909912

910-
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
913+
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
914+
ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
911915
return -EINVAL;
916+
}
912917

913-
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
918+
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
919+
ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
914920
return -EINVAL;
921+
}
915922

916-
if (!IS_ALIGNED(args->commands_offset, 8))
923+
if (!IS_ALIGNED(args->commands_offset, 8)) {
924+
ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
917925
return -EINVAL;
926+
}
918927

919-
if (!file_priv->ctx.id)
928+
if (!file_priv->ctx.id) {
929+
ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
920930
return -EINVAL;
931+
}
921932

922-
if (file_priv->has_mmu_faults)
933+
if (file_priv->has_mmu_faults) {
934+
ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
923935
return -EBADFD;
936+
}
924937

925938
priority = ivpu_job_to_jsm_priority(args->priority);
926939

@@ -931,28 +944,44 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
931944
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
932945
{
933946
struct ivpu_file_priv *file_priv = file->driver_priv;
947+
struct ivpu_device *vdev = file_priv->vdev;
934948
struct drm_ivpu_cmdq_submit *args = data;
935949

936-
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
950+
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
951+
ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
937952
return -ENODEV;
953+
}
938954

939-
if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
955+
if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) {
956+
ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id);
940957
return -EINVAL;
958+
}
941959

942-
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
960+
if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
961+
ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
943962
return -EINVAL;
963+
}
944964

945-
if (args->preempt_buffer_index >= args->buffer_count)
965+
if (args->preempt_buffer_index >= args->buffer_count) {
966+
ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n",
967+
args->preempt_buffer_index);
946968
return -EINVAL;
969+
}
947970

948-
if (!IS_ALIGNED(args->commands_offset, 8))
971+
if (!IS_ALIGNED(args->commands_offset, 8)) {
972+
ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
949973
return -EINVAL;
974+
}
950975

951-
if (!file_priv->ctx.id)
976+
if (!file_priv->ctx.id) {
977+
ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
952978
return -EINVAL;
979+
}
953980

954-
if (file_priv->has_mmu_faults)
981+
if (file_priv->has_mmu_faults) {
982+
ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
955983
return -EBADFD;
984+
}
956985

957986
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
958987
(void __user *)args->buffers_ptr, args->commands_offset,
@@ -967,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
967996
struct ivpu_cmdq *cmdq;
968997
int ret;
969998

970-
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
999+
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
1000+
ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
9711001
return -ENODEV;
1002+
}
9721003

973-
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
1004+
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
1005+
ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
9741006
return -EINVAL;
1007+
}
9751008

9761009
ret = ivpu_rpm_get(vdev);
9771010
if (ret < 0)
@@ -999,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
9991032
u32 cmdq_id = 0;
10001033
int ret;
10011034

1002-
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
1035+
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
1036+
ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
10031037
return -ENODEV;
1038+
}
10041039

10051040
ret = ivpu_rpm_get(vdev);
10061041
if (ret < 0)

drivers/accel/ivpu/ivpu_mmu_context.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
529529

530530
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
531531
if (ret)
532-
ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
532+
ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n",
533+
ctx->id, ret);
533534
}
534535

535536
int

0 commit comments

Comments
 (0)