@@ -1419,48 +1419,57 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
14191419 return DRM_GPU_SCHED_STAT_NO_HANG ;
14201420}
14211421
1422- static void __guc_exec_queue_fini_async (struct work_struct * w )
1422+ static void guc_exec_queue_fini (struct xe_exec_queue * q )
1423+ {
1424+ struct xe_guc_exec_queue * ge = q -> guc ;
1425+ struct xe_guc * guc = exec_queue_to_guc (q );
1426+
1427+ release_guc_id (guc , q );
1428+ xe_sched_entity_fini (& ge -> entity );
1429+ xe_sched_fini (& ge -> sched );
1430+
1431+ /*
1432+ * RCU free due sched being exported via DRM scheduler fences
1433+ * (timeline name).
1434+ */
1435+ kfree_rcu (ge , rcu );
1436+ }
1437+
1438+ static void __guc_exec_queue_destroy_async (struct work_struct * w )
14231439{
14241440 struct xe_guc_exec_queue * ge =
1425- container_of (w , struct xe_guc_exec_queue , fini_async );
1441+ container_of (w , struct xe_guc_exec_queue , destroy_async );
14261442 struct xe_exec_queue * q = ge -> q ;
14271443 struct xe_guc * guc = exec_queue_to_guc (q );
14281444
14291445 xe_pm_runtime_get (guc_to_xe (guc ));
14301446 trace_xe_exec_queue_destroy (q );
14311447
1432- release_guc_id (guc , q );
14331448 if (xe_exec_queue_is_lr (q ))
14341449 cancel_work_sync (& ge -> lr_tdr );
14351450 /* Confirm no work left behind accessing device structures */
14361451 cancel_delayed_work_sync (& ge -> sched .base .work_tdr );
1437- xe_sched_entity_fini (& ge -> entity );
1438- xe_sched_fini (& ge -> sched );
14391452
1440- /*
1441- * RCU free due sched being exported via DRM scheduler fences
1442- * (timeline name).
1443- */
1444- kfree_rcu (ge , rcu );
14451453 xe_exec_queue_fini (q );
1454+
14461455 xe_pm_runtime_put (guc_to_xe (guc ));
14471456}
14481457
1449- static void guc_exec_queue_fini_async (struct xe_exec_queue * q )
1458+ static void guc_exec_queue_destroy_async (struct xe_exec_queue * q )
14501459{
14511460 struct xe_guc * guc = exec_queue_to_guc (q );
14521461 struct xe_device * xe = guc_to_xe (guc );
14531462
1454- INIT_WORK (& q -> guc -> fini_async , __guc_exec_queue_fini_async );
1463+ INIT_WORK (& q -> guc -> destroy_async , __guc_exec_queue_destroy_async );
14551464
14561465 /* We must block on kernel engines so slabs are empty on driver unload */
14571466 if (q -> flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged (q ))
1458- __guc_exec_queue_fini_async (& q -> guc -> fini_async );
1467+ __guc_exec_queue_destroy_async (& q -> guc -> destroy_async );
14591468 else
1460- queue_work (xe -> destroy_wq , & q -> guc -> fini_async );
1469+ queue_work (xe -> destroy_wq , & q -> guc -> destroy_async );
14611470}
14621471
1463- static void __guc_exec_queue_fini (struct xe_guc * guc , struct xe_exec_queue * q )
1472+ static void __guc_exec_queue_destroy (struct xe_guc * guc , struct xe_exec_queue * q )
14641473{
14651474 /*
14661475 * Might be done from within the GPU scheduler, need to do async as we
@@ -1469,7 +1478,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
14691478 * this we and don't really care when everything is fini'd, just that it
14701479 * is.
14711480 */
1472- guc_exec_queue_fini_async (q );
1481+ guc_exec_queue_destroy_async (q );
14731482}
14741483
14751484static void __guc_exec_queue_process_msg_cleanup (struct xe_sched_msg * msg )
@@ -1483,7 +1492,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
14831492 if (exec_queue_registered (q ))
14841493 disable_scheduling_deregister (guc , q );
14851494 else
1486- __guc_exec_queue_fini (guc , q );
1495+ __guc_exec_queue_destroy (guc , q );
14871496}
14881497
14891498static bool guc_exec_queue_allowed_to_change_state (struct xe_exec_queue * q )
@@ -1716,14 +1725,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
17161725#define STATIC_MSG_CLEANUP 0
17171726#define STATIC_MSG_SUSPEND 1
17181727#define STATIC_MSG_RESUME 2
1719- static void guc_exec_queue_fini (struct xe_exec_queue * q )
1728+ static void guc_exec_queue_destroy (struct xe_exec_queue * q )
17201729{
17211730 struct xe_sched_msg * msg = q -> guc -> static_msgs + STATIC_MSG_CLEANUP ;
17221731
17231732 if (!(q -> flags & EXEC_QUEUE_FLAG_PERMANENT ) && !exec_queue_wedged (q ))
17241733 guc_exec_queue_add_msg (q , msg , CLEANUP );
17251734 else
1726- __guc_exec_queue_fini (exec_queue_to_guc (q ), q );
1735+ __guc_exec_queue_destroy (exec_queue_to_guc (q ), q );
17271736}
17281737
17291738static int guc_exec_queue_set_priority (struct xe_exec_queue * q ,
@@ -1853,6 +1862,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
18531862 .init = guc_exec_queue_init ,
18541863 .kill = guc_exec_queue_kill ,
18551864 .fini = guc_exec_queue_fini ,
1865+ .destroy = guc_exec_queue_destroy ,
18561866 .set_priority = guc_exec_queue_set_priority ,
18571867 .set_timeslice = guc_exec_queue_set_timeslice ,
18581868 .set_preempt_timeout = guc_exec_queue_set_preempt_timeout ,
@@ -1874,7 +1884,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
18741884 if (exec_queue_extra_ref (q ) || xe_exec_queue_is_lr (q ))
18751885 xe_exec_queue_put (q );
18761886 else if (exec_queue_destroyed (q ))
1877- __guc_exec_queue_fini (guc , q );
1887+ __guc_exec_queue_destroy (guc , q );
18781888 }
18791889 if (q -> guc -> suspend_pending ) {
18801890 set_exec_queue_suspended (q );
@@ -2203,7 +2213,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
22032213 if (exec_queue_extra_ref (q ) || xe_exec_queue_is_lr (q ))
22042214 xe_exec_queue_put (q );
22052215 else
2206- __guc_exec_queue_fini (guc , q );
2216+ __guc_exec_queue_destroy (guc , q );
22072217}
22082218
22092219int xe_guc_deregister_done_handler (struct xe_guc * guc , u32 * msg , u32 len )
0 commit comments