File tree Expand file tree Collapse file tree
Expand file tree Collapse file tree Original file line number Diff line number Diff line change @@ -394,9 +394,9 @@ static void submit_cleanup(struct kref *kref)
394394
395395 if (submit -> out_fence ) {
396396 /* first remove from IDR, so fence can not be found anymore */
397- mutex_lock (& submit -> gpu -> fence_lock );
397+ mutex_lock (& submit -> gpu -> idr_lock );
398398 idr_remove (& submit -> gpu -> fence_idr , submit -> out_fence_id );
399- mutex_unlock (& submit -> gpu -> fence_lock );
399+ mutex_unlock (& submit -> gpu -> idr_lock );
400400 dma_fence_put (submit -> out_fence );
401401 }
402402
Original file line number Diff line number Diff line change @@ -1830,7 +1830,8 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
18301830
18311831 gpu -> dev = & pdev -> dev ;
18321832 mutex_init (& gpu -> lock );
1833- mutex_init (& gpu -> fence_lock );
1833+ mutex_init (& gpu -> sched_lock );
1834+ mutex_init (& gpu -> idr_lock );
18341835
18351836 /* Map registers: */
18361837 gpu -> mmio = devm_platform_ioremap_resource (pdev , 0 );
Original file line number Diff line number Diff line change @@ -103,6 +103,7 @@ struct etnaviv_gpu {
103103 struct etnaviv_chip_identity identity ;
104104 enum etnaviv_sec_mode sec_mode ;
105105 struct workqueue_struct * wq ;
106+ struct mutex sched_lock ;
106107 struct drm_gpu_scheduler sched ;
107108 bool initialized ;
108109 bool fe_running ;
@@ -120,7 +121,7 @@ struct etnaviv_gpu {
120121 u32 idle_mask ;
121122
122123 /* Fencing support */
123- struct mutex fence_lock ;
124+ struct mutex idr_lock ;
124125 struct idr fence_idr ;
125126 u32 next_fence ;
126127 u32 completed_fence ;
Original file line number Diff line number Diff line change @@ -97,21 +97,24 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
9797
9898int etnaviv_sched_push_job (struct etnaviv_gem_submit * submit )
9999{
100+ struct etnaviv_gpu * gpu = submit -> gpu ;
100101 int ret = 0 ;
101102
102103 /*
103- * Hold the fence lock across the whole operation to avoid jobs being
104+ * Hold the sched lock across the whole operation to avoid jobs being
104105 * pushed out of order with regard to their sched fence seqnos as
105106 * allocated in drm_sched_job_arm.
106107 */
107- mutex_lock (& submit -> gpu -> fence_lock );
108+ mutex_lock (& gpu -> sched_lock );
108109
109110 drm_sched_job_arm (& submit -> sched_job );
110111
111112 submit -> out_fence = dma_fence_get (& submit -> sched_job .s_fence -> finished );
112- submit -> out_fence_id = idr_alloc_cyclic (& submit -> gpu -> fence_idr ,
113+ mutex_lock (& gpu -> idr_lock );
114+ submit -> out_fence_id = idr_alloc_cyclic (& gpu -> fence_idr ,
113115 submit -> out_fence , 0 ,
114116 INT_MAX , GFP_KERNEL );
117+ mutex_unlock (& gpu -> idr_lock );
115118 if (submit -> out_fence_id < 0 ) {
116119 drm_sched_job_cleanup (& submit -> sched_job );
117120 ret = - ENOMEM ;
@@ -124,7 +127,7 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
124127 drm_sched_entity_push_job (& submit -> sched_job );
125128
126129out_unlock :
127- mutex_unlock (& submit -> gpu -> fence_lock );
130+ mutex_unlock (& gpu -> sched_lock );
128131
129132 return ret ;
130133}
You can’t perform that action at this time.
0 commit comments