1717
1818#define vm_dbg (fmt , ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
1919
20+ static uint vm_log_shift = 0 ;
21+ MODULE_PARM_DESC (vm_log_shift , "Length of VM op log" );
22+ module_param_named (vm_log_shift , vm_log_shift , uint , 0600 );
23+
2024/**
2125 * struct msm_vm_map_op - create new pgtable mapping
2226 */
@@ -31,6 +35,13 @@ struct msm_vm_map_op {
3135 struct sg_table * sgt ;
3236 /** @prot: the mapping protection flags */
3337 int prot ;
38+
39+ /**
40+ * @queue_id: The id of the submitqueue the operation is performed
41+ * on, or zero for (in particular) UNMAP ops triggered outside of
42+ * a submitqueue (ie. process cleanup)
43+ */
44+ int queue_id ;
3445};
3546
3647/**
@@ -41,6 +52,13 @@ struct msm_vm_unmap_op {
4152 uint64_t iova ;
4253 /** @range: size of region to unmap */
4354 uint64_t range ;
55+
56+ /**
57+ * @queue_id: The id of the submitqueue the operation is performed
58+ * on, or zero for (in particular) UNMAP ops triggered outside of
59+ * a submitqueue (ie. process cleanup)
60+ */
61+ int queue_id ;
4462};
4563
4664/**
@@ -144,27 +162,95 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm)
144162 vm -> mmu -> funcs -> destroy (vm -> mmu );
145163 dma_fence_put (vm -> last_fence );
146164 put_pid (vm -> pid );
165+ kfree (vm -> log );
147166 kfree (vm );
148167}
149168
169+ /**
170+ * msm_gem_vm_unusable() - Mark a VM as unusable
171+ * @vm: the VM to mark unusable
172+ */
173+ void
174+ msm_gem_vm_unusable (struct drm_gpuvm * gpuvm )
175+ {
176+ struct msm_gem_vm * vm = to_msm_vm (gpuvm );
177+ uint32_t vm_log_len = (1 << vm -> log_shift );
178+ uint32_t vm_log_mask = vm_log_len - 1 ;
179+ uint32_t nr_vm_logs ;
180+ int first ;
181+
182+ vm -> unusable = true;
183+
184+ /* Bail if no log, or empty log: */
185+ if (!vm -> log || !vm -> log [0 ].op )
186+ return ;
187+
188+ mutex_lock (& vm -> mmu_lock );
189+
190+ /*
191+ * log_idx is the next entry to overwrite, meaning it is the oldest, or
192+ * first, entry (other than the special case handled below where the
193+ * log hasn't wrapped around yet)
194+ */
195+ first = vm -> log_idx ;
196+
197+ if (!vm -> log [first ].op ) {
198+ /*
199+ * If the next log entry has not been written yet, then only
200+ * entries 0 to idx-1 are valid (ie. we haven't wrapped around
201+ * yet)
202+ */
203+ nr_vm_logs = MAX (0 , first - 1 );
204+ first = 0 ;
205+ } else {
206+ nr_vm_logs = vm_log_len ;
207+ }
208+
209+ pr_err ("vm-log:\n" );
210+ for (int i = 0 ; i < nr_vm_logs ; i ++ ) {
211+ int idx = (i + first ) & vm_log_mask ;
212+ struct msm_gem_vm_log_entry * e = & vm -> log [idx ];
213+ pr_err (" - %s:%d: 0x%016llx-0x%016llx\n" ,
214+ e -> op , e -> queue_id , e -> iova ,
215+ e -> iova + e -> range );
216+ }
217+
218+ mutex_unlock (& vm -> mmu_lock );
219+ }
220+
150221static void
151- vm_unmap_op (struct msm_gem_vm * vm , const struct msm_vm_unmap_op * op )
222+ vm_log (struct msm_gem_vm * vm , const char * op , uint64_t iova , uint64_t range , int queue_id )
152223{
224+ int idx ;
225+
153226 if (!vm -> managed )
154227 lockdep_assert_held (& vm -> mmu_lock );
155228
156- vm_dbg ("%p: %016llx %016llx" , vm , op -> iova , op -> iova + op -> range );
229+ vm_dbg ("%s:%p:%d: %016llx %016llx" , op , vm , queue_id , iova , iova + range );
230+
231+ if (!vm -> log )
232+ return ;
233+
234+ idx = vm -> log_idx ;
235+ vm -> log [idx ].op = op ;
236+ vm -> log [idx ].iova = iova ;
237+ vm -> log [idx ].range = range ;
238+ vm -> log [idx ].queue_id = queue_id ;
239+ vm -> log_idx = (vm -> log_idx + 1 ) & ((1 << vm -> log_shift ) - 1 );
240+ }
241+
242+ static void
243+ vm_unmap_op (struct msm_gem_vm * vm , const struct msm_vm_unmap_op * op )
244+ {
245+ vm_log (vm , "unmap" , op -> iova , op -> range , op -> queue_id );
157246
158247 vm -> mmu -> funcs -> unmap (vm -> mmu , op -> iova , op -> range );
159248}
160249
161250static int
162251vm_map_op (struct msm_gem_vm * vm , const struct msm_vm_map_op * op )
163252{
164- if (!vm -> managed )
165- lockdep_assert_held (& vm -> mmu_lock );
166-
167- vm_dbg ("%p: %016llx %016llx" , vm , op -> iova , op -> iova + op -> range );
253+ vm_log (vm , "map" , op -> iova , op -> range , op -> queue_id );
168254
169255 return vm -> mmu -> funcs -> map (vm -> mmu , op -> iova , op -> sgt , op -> offset ,
170256 op -> range , op -> prot );
@@ -382,6 +468,7 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
382468static int
383469msm_gem_vm_sm_step_map (struct drm_gpuva_op * op , void * arg )
384470{
471+ struct msm_vm_bind_job * job = ((struct op_arg * )arg )-> job ;
385472 struct drm_gem_object * obj = op -> map .gem .obj ;
386473 struct drm_gpuva * vma ;
387474 struct sg_table * sgt ;
@@ -412,6 +499,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
412499 .range = vma -> va .range ,
413500 .offset = vma -> gem .offset ,
414501 .prot = prot ,
502+ .queue_id = job -> queue -> id ,
415503 },
416504 .obj = vma -> gem .obj ,
417505 });
@@ -445,6 +533,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
445533 .unmap = {
446534 .iova = unmap_start ,
447535 .range = unmap_range ,
536+ .queue_id = job -> queue -> id ,
448537 },
449538 .obj = orig_vma -> gem .obj ,
450539 });
@@ -506,6 +595,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
506595static int
507596msm_gem_vm_sm_step_unmap (struct drm_gpuva_op * op , void * arg )
508597{
598+ struct msm_vm_bind_job * job = ((struct op_arg * )arg )-> job ;
509599 struct drm_gpuva * vma = op -> unmap .va ;
510600 struct msm_gem_vma * msm_vma = to_msm_vma (vma );
511601
@@ -520,6 +610,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
520610 .unmap = {
521611 .iova = vma -> va .addr ,
522612 .range = vma -> va .range ,
613+ .queue_id = job -> queue -> id ,
523614 },
524615 .obj = vma -> gem .obj ,
525616 });
@@ -584,7 +675,7 @@ msm_vma_job_run(struct drm_sched_job *_job)
584675 * now the VM is in an undefined state. Game over!
585676 */
586677 if (ret )
587- vm -> unusable = true ;
678+ msm_gem_vm_unusable ( job -> vm ) ;
588679
589680 job_foreach_bo (obj , job ) {
590681 msm_gem_lock (obj );
@@ -695,6 +786,23 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
695786
696787 drm_mm_init (& vm -> mm , va_start , va_size );
697788
789+ /*
790+ * We don't really need vm log for kernel managed VMs, as the kernel
791+ * is responsible for ensuring that GEM objs are mapped if they are
792+ * used by a submit. Furthermore we piggyback on mmu_lock to serialize
793+ * access to the log.
794+ *
795+ * Limit the max log_shift to 8 to prevent userspace from asking us
796+ * for an unreasonable log size.
797+ */
798+ if (!managed )
799+ vm -> log_shift = MIN (vm_log_shift , 8 );
800+
801+ if (vm -> log_shift ) {
802+ vm -> log = kmalloc_array (1 << vm -> log_shift , sizeof (vm -> log [0 ]),
803+ GFP_KERNEL | __GFP_ZERO );
804+ }
805+
698806 return & vm -> base ;
699807
700808err_free_dummy :
@@ -1162,7 +1270,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
11621270 * state the vm is in. So throw up our hands!
11631271 */
11641272 if (i > 0 )
1165- vm -> unusable = true ;
1273+ msm_gem_vm_unusable ( job -> vm ) ;
11661274 return ret ;
11671275 }
11681276 }
0 commit comments