@@ -348,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32
348348
349349 cmdq = xa_load (& file_priv -> cmdq_xa , cmdq_id );
350350 if (!cmdq ) {
351- ivpu_warn_ratelimited (vdev , "Failed to find command queue with ID: %u\n" , cmdq_id );
351+ ivpu_dbg (vdev , IOCTL , "Failed to find command queue with ID: %u\n" , cmdq_id );
352352 return NULL ;
353353 }
354354
@@ -534,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
534534 job -> bo_count = bo_count ;
535535 job -> done_fence = ivpu_fence_create (vdev );
536536 if (!job -> done_fence ) {
537- ivpu_warn_ratelimited (vdev , "Failed to create a fence\n" );
537+ ivpu_err (vdev , "Failed to create a fence\n" );
538538 goto err_free_job ;
539539 }
540540
@@ -687,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
687687 else
688688 cmdq = ivpu_cmdq_acquire (file_priv , cmdq_id );
689689 if (!cmdq ) {
690- ivpu_warn_ratelimited (vdev , "Failed to get job queue, ctx %d\n" , file_priv -> ctx .id );
691690 ret = - EINVAL ;
692691 goto err_unlock ;
693692 }
@@ -771,8 +770,11 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
771770 for (i = 0 ; i < buf_count ; i ++ ) {
772771 struct drm_gem_object * obj = drm_gem_object_lookup (file , buf_handles [i ]);
773772
774- if (!obj )
773+ if (!obj ) {
774+ ivpu_dbg (vdev , IOCTL , "Failed to lookup GEM object with handle %u\n" ,
775+ buf_handles [i ]);
775776 return - ENOENT ;
777+ }
776778
777779 job -> bos [i ] = to_ivpu_bo (obj );
778780
@@ -783,12 +785,13 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
783785
784786 bo = job -> bos [CMD_BUF_IDX ];
785787 if (!dma_resv_test_signaled (bo -> base .base .resv , DMA_RESV_USAGE_READ )) {
786- ivpu_warn (vdev , "Buffer is already in use\n" );
788+ ivpu_dbg (vdev , IOCTL , "Buffer is already in use by another job \n" );
787789 return - EBUSY ;
788790 }
789791
790792 if (commands_offset >= ivpu_bo_size (bo )) {
791- ivpu_warn (vdev , "Invalid command buffer offset %u\n" , commands_offset );
793+ ivpu_dbg (vdev , IOCTL , "Invalid commands offset %u for buffer size %zu\n" ,
794+ commands_offset , ivpu_bo_size (bo ));
792795 return - EINVAL ;
793796 }
794797
@@ -798,11 +801,11 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
798801 struct ivpu_bo * preempt_bo = job -> bos [preempt_buffer_index ];
799802
800803 if (ivpu_bo_size (preempt_bo ) < ivpu_fw_preempt_buf_size (vdev )) {
801- ivpu_warn (vdev , "Preemption buffer is too small\n" );
804+ ivpu_dbg (vdev , IOCTL , "Preemption buffer is too small\n" );
802805 return - EINVAL ;
803806 }
804807 if (ivpu_bo_is_mappable (preempt_bo )) {
805- ivpu_warn (vdev , "Preemption buffer cannot be mappable\n" );
808+ ivpu_dbg (vdev , IOCTL , "Preemption buffer cannot be mappable\n" );
806809 return - EINVAL ;
807810 }
808811 job -> primary_preempt_buf = preempt_bo ;
@@ -811,14 +814,14 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
811814 ret = drm_gem_lock_reservations ((struct drm_gem_object * * )job -> bos , buf_count ,
812815 & acquire_ctx );
813816 if (ret ) {
814- ivpu_warn (vdev , "Failed to lock reservations: %d\n" , ret );
817+ ivpu_warn_ratelimited (vdev , "Failed to lock reservations: %d\n" , ret );
815818 return ret ;
816819 }
817820
818821 for (i = 0 ; i < buf_count ; i ++ ) {
819822 ret = dma_resv_reserve_fences (job -> bos [i ]-> base .base .resv , 1 );
820823 if (ret ) {
821- ivpu_warn (vdev , "Failed to reserve fences: %d\n" , ret );
824+ ivpu_warn_ratelimited (vdev , "Failed to reserve fences: %d\n" , ret );
822825 goto unlock_reservations ;
823826 }
824827 }
@@ -865,17 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
865868
866869 job = ivpu_job_create (file_priv , engine , buffer_count );
867870 if (!job ) {
868- ivpu_err (vdev , "Failed to create job\n" );
869871 ret = - ENOMEM ;
870872 goto err_exit_dev ;
871873 }
872874
873875 ret = ivpu_job_prepare_bos_for_submit (file , job , buf_handles , buffer_count , cmds_offset ,
874876 preempt_buffer_index );
875- if (ret ) {
876- ivpu_err (vdev , "Failed to prepare job: %d\n" , ret );
877+ if (ret )
877878 goto err_destroy_job ;
878- }
879879
880880 down_read (& vdev -> pm -> reset_lock );
881881 ret = ivpu_job_submit (job , priority , cmdq_id );
@@ -901,26 +901,39 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
901901int ivpu_submit_ioctl (struct drm_device * dev , void * data , struct drm_file * file )
902902{
903903 struct ivpu_file_priv * file_priv = file -> driver_priv ;
904+ struct ivpu_device * vdev = file_priv -> vdev ;
904905 struct drm_ivpu_submit * args = data ;
905906 u8 priority ;
906907
907- if (args -> engine != DRM_IVPU_ENGINE_COMPUTE )
908+ if (args -> engine != DRM_IVPU_ENGINE_COMPUTE ) {
909+ ivpu_dbg (vdev , IOCTL , "Invalid engine %d\n" , args -> engine );
908910 return - EINVAL ;
911+ }
909912
910- if (args -> priority > DRM_IVPU_JOB_PRIORITY_REALTIME )
913+ if (args -> priority > DRM_IVPU_JOB_PRIORITY_REALTIME ) {
914+ ivpu_dbg (vdev , IOCTL , "Invalid priority %d\n" , args -> priority );
911915 return - EINVAL ;
916+ }
912917
913- if (args -> buffer_count == 0 || args -> buffer_count > JOB_MAX_BUFFER_COUNT )
918+ if (args -> buffer_count == 0 || args -> buffer_count > JOB_MAX_BUFFER_COUNT ) {
919+ ivpu_dbg (vdev , IOCTL , "Invalid buffer count %u\n" , args -> buffer_count );
914920 return - EINVAL ;
921+ }
915922
916- if (!IS_ALIGNED (args -> commands_offset , 8 ))
923+ if (!IS_ALIGNED (args -> commands_offset , 8 )) {
924+ ivpu_dbg (vdev , IOCTL , "Invalid commands offset %u\n" , args -> commands_offset );
917925 return - EINVAL ;
926+ }
918927
919- if (!file_priv -> ctx .id )
928+ if (!file_priv -> ctx .id ) {
929+ ivpu_dbg (vdev , IOCTL , "Context not initialized\n" );
920930 return - EINVAL ;
931+ }
921932
922- if (file_priv -> has_mmu_faults )
933+ if (file_priv -> has_mmu_faults ) {
934+ ivpu_dbg (vdev , IOCTL , "Context %u has MMU faults\n" , file_priv -> ctx .id );
923935 return - EBADFD ;
936+ }
924937
925938 priority = ivpu_job_to_jsm_priority (args -> priority );
926939
@@ -931,28 +944,44 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
931944int ivpu_cmdq_submit_ioctl (struct drm_device * dev , void * data , struct drm_file * file )
932945{
933946 struct ivpu_file_priv * file_priv = file -> driver_priv ;
947+ struct ivpu_device * vdev = file_priv -> vdev ;
934948 struct drm_ivpu_cmdq_submit * args = data ;
935949
936- if (!ivpu_is_capable (file_priv -> vdev , DRM_IVPU_CAP_MANAGE_CMDQ ))
950+ if (!ivpu_is_capable (file_priv -> vdev , DRM_IVPU_CAP_MANAGE_CMDQ )) {
951+ ivpu_dbg (vdev , IOCTL , "Command queue management not supported\n" );
937952 return - ENODEV ;
953+ }
938954
939- if (args -> cmdq_id < IVPU_CMDQ_MIN_ID || args -> cmdq_id > IVPU_CMDQ_MAX_ID )
955+ if (args -> cmdq_id < IVPU_CMDQ_MIN_ID || args -> cmdq_id > IVPU_CMDQ_MAX_ID ) {
956+ ivpu_dbg (vdev , IOCTL , "Invalid command queue ID %u\n" , args -> cmdq_id );
940957 return - EINVAL ;
958+ }
941959
942- if (args -> buffer_count == 0 || args -> buffer_count > JOB_MAX_BUFFER_COUNT )
960+ if (args -> buffer_count == 0 || args -> buffer_count > JOB_MAX_BUFFER_COUNT ) {
961+ ivpu_dbg (vdev , IOCTL , "Invalid buffer count %u\n" , args -> buffer_count );
943962 return - EINVAL ;
963+ }
944964
945- if (args -> preempt_buffer_index >= args -> buffer_count )
965+ if (args -> preempt_buffer_index >= args -> buffer_count ) {
966+ ivpu_dbg (vdev , IOCTL , "Invalid preemption buffer index %u\n" ,
967+ args -> preempt_buffer_index );
946968 return - EINVAL ;
969+ }
947970
948- if (!IS_ALIGNED (args -> commands_offset , 8 ))
971+ if (!IS_ALIGNED (args -> commands_offset , 8 )) {
972+ ivpu_dbg (vdev , IOCTL , "Invalid commands offset %u\n" , args -> commands_offset );
949973 return - EINVAL ;
974+ }
950975
951- if (!file_priv -> ctx .id )
976+ if (!file_priv -> ctx .id ) {
977+ ivpu_dbg (vdev , IOCTL , "Context not initialized\n" );
952978 return - EINVAL ;
979+ }
953980
954- if (file_priv -> has_mmu_faults )
981+ if (file_priv -> has_mmu_faults ) {
982+ ivpu_dbg (vdev , IOCTL , "Context %u has MMU faults\n" , file_priv -> ctx .id );
955983 return - EBADFD ;
984+ }
956985
957986 return ivpu_submit (file , file_priv , args -> cmdq_id , args -> buffer_count , VPU_ENGINE_COMPUTE ,
958987 (void __user * )args -> buffers_ptr , args -> commands_offset ,
@@ -967,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
967996 struct ivpu_cmdq * cmdq ;
968997 int ret ;
969998
970- if (!ivpu_is_capable (vdev , DRM_IVPU_CAP_MANAGE_CMDQ ))
999+ if (!ivpu_is_capable (vdev , DRM_IVPU_CAP_MANAGE_CMDQ )) {
1000+ ivpu_dbg (vdev , IOCTL , "Command queue management not supported\n" );
9711001 return - ENODEV ;
1002+ }
9721003
973- if (args -> priority > DRM_IVPU_JOB_PRIORITY_REALTIME )
1004+ if (args -> priority > DRM_IVPU_JOB_PRIORITY_REALTIME ) {
1005+ ivpu_dbg (vdev , IOCTL , "Invalid priority %d\n" , args -> priority );
9741006 return - EINVAL ;
1007+ }
9751008
9761009 ret = ivpu_rpm_get (vdev );
9771010 if (ret < 0 )
@@ -999,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
9991032 u32 cmdq_id = 0 ;
10001033 int ret ;
10011034
1002- if (!ivpu_is_capable (vdev , DRM_IVPU_CAP_MANAGE_CMDQ ))
1035+ if (!ivpu_is_capable (vdev , DRM_IVPU_CAP_MANAGE_CMDQ )) {
1036+ ivpu_dbg (vdev , IOCTL , "Command queue management not supported\n" );
10031037 return - ENODEV ;
1038+ }
10041039
10051040 ret = ivpu_rpm_get (vdev );
10061041 if (ret < 0 )
0 commit comments