@@ -545,25 +545,31 @@ static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
545545 * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
546546 * @adev: Pointer to the AMDGPU device
547547 * @instance_id: Logical ID of the SDMA engine instance to reset
548+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
549+ * will handle it.
548550 *
549551 * Returns: 0 on success, or a negative error code on failure.
550552 */
551- int amdgpu_sdma_reset_engine (struct amdgpu_device * adev , uint32_t instance_id )
553+ int amdgpu_sdma_reset_engine (struct amdgpu_device * adev , uint32_t instance_id ,
554+ bool caller_handles_kernel_queues )
552555{
553556 int ret = 0 ;
554557 struct amdgpu_sdma_instance * sdma_instance = & adev -> sdma .instance [instance_id ];
555558 struct amdgpu_ring * gfx_ring = & sdma_instance -> ring ;
556559 struct amdgpu_ring * page_ring = & sdma_instance -> page ;
557560
558561 mutex_lock (& sdma_instance -> engine_reset_mutex );
559- /* Stop the scheduler's work queue for the GFX and page rings if they are running.
560- * This ensures that no new tasks are submitted to the queues while
561- * the reset is in progress.
562- */
563- drm_sched_wqueue_stop (& gfx_ring -> sched );
564562
565- if (adev -> sdma .has_page_queue )
566- drm_sched_wqueue_stop (& page_ring -> sched );
563+ if (!caller_handles_kernel_queues ) {
564+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
565+ * This ensures that no new tasks are submitted to the queues while
566+ * the reset is in progress.
567+ */
568+ drm_sched_wqueue_stop (& gfx_ring -> sched );
569+
570+ if (adev -> sdma .has_page_queue )
571+ drm_sched_wqueue_stop (& page_ring -> sched );
572+ }
567573
568574 if (sdma_instance -> funcs -> stop_kernel_queue ) {
569575 sdma_instance -> funcs -> stop_kernel_queue (gfx_ring );
@@ -585,16 +591,18 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
585591 }
586592
587593exit :
588- /* Restart the scheduler's work queue for the GFX and page rings
589- * if they were stopped by this function. This allows new tasks
590- * to be submitted to the queues after the reset is complete.
591- */
592- if (!ret ) {
593- amdgpu_fence_driver_force_completion (gfx_ring );
594- drm_sched_wqueue_start (& gfx_ring -> sched );
595- if (adev -> sdma .has_page_queue ) {
596- amdgpu_fence_driver_force_completion (page_ring );
597- drm_sched_wqueue_start (& page_ring -> sched );
594+ if (!caller_handles_kernel_queues ) {
595+ /* Restart the scheduler's work queue for the GFX and page rings
596+ * if they were stopped by this function. This allows new tasks
597+ * to be submitted to the queues after the reset is complete.
598+ */
599+ if (!ret ) {
600+ amdgpu_fence_driver_force_completion (gfx_ring );
601+ drm_sched_wqueue_start (& gfx_ring -> sched );
602+ if (adev -> sdma .has_page_queue ) {
603+ amdgpu_fence_driver_force_completion (page_ring );
604+ drm_sched_wqueue_start (& page_ring -> sched );
605+ }
598606 }
599607 }
600608 mutex_unlock (& sdma_instance -> engine_reset_mutex );
0 commit comments