1212#include "xe_vm.h"
1313#include "xe_vm_types.h"
1414
15+ static bool xe_svm_range_in_vram (struct xe_svm_range * range )
16+ {
17+ /* Not reliable without notifier lock */
18+ return range -> base .flags .has_devmem_pages ;
19+ }
20+
21+ static bool xe_svm_range_has_vram_binding (struct xe_svm_range * range )
22+ {
23+ /* Not reliable without notifier lock */
24+ return xe_svm_range_in_vram (range ) && range -> tile_present ;
25+ }
26+
1527static struct xe_vm * gpusvm_to_vm (struct drm_gpusvm * gpusvm )
1628{
1729 return container_of (gpusvm , struct xe_vm , svm .gpusvm );
@@ -37,6 +49,23 @@ static unsigned long xe_svm_range_size(struct xe_svm_range *range)
3749 return drm_gpusvm_range_size (& range -> base );
3850}
3951
52+ #define range_debug (r__ , operaton__ ) \
53+ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
54+ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
55+ "start=0x%014lx, end=0x%014lx, size=%lu", \
56+ (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
57+ (r__)->base.gpusvm, \
58+ xe_svm_range_in_vram((r__)) ? 1 : 0, \
59+ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
60+ (r__)->base.notifier_seq, \
61+ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
62+ xe_svm_range_size((r__)))
63+
64+ void xe_svm_range_debug (struct xe_svm_range * range , const char * operation )
65+ {
66+ range_debug (range , operation );
67+ }
68+
4069static void * xe_svm_devm_owner (struct xe_device * xe )
4170{
4271 return xe ;
@@ -74,6 +103,8 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
74103{
75104 struct xe_device * xe = vm -> xe ;
76105
106+ range_debug (range , "GARBAGE COLLECTOR ADD" );
107+
77108 drm_gpusvm_range_set_unmapped (& range -> base , mmu_range );
78109
79110 spin_lock (& vm -> svm .garbage_collector .lock );
@@ -99,10 +130,14 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
99130
100131 xe_svm_assert_in_notifier (vm );
101132
133+ range_debug (range , "NOTIFIER" );
134+
102135 /* Skip if already unmapped or if no binding exist */
103136 if (range -> base .flags .unmapped || !range -> tile_present )
104137 return 0 ;
105138
139+ range_debug (range , "NOTIFIER - EXECUTE" );
140+
106141 /* Adjust invalidation to range boundaries */
107142 * adj_start = min (xe_svm_range_start (range ), mmu_range -> start );
108143 * adj_end = max (xe_svm_range_end (range ), mmu_range -> end );
@@ -153,6 +188,11 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
153188
154189 xe_svm_assert_in_notifier (vm );
155190
191+ vm_dbg (& gpusvm_to_vm (gpusvm )-> xe -> drm ,
192+ "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d" ,
193+ vm -> usm .asid , gpusvm , notifier -> notifier .invalidate_seq ,
194+ mmu_range -> start , mmu_range -> end , mmu_range -> event );
195+
156196 /* Adjust invalidation to notifier boundaries */
157197 adj_start = max (drm_gpusvm_notifier_start (notifier ), adj_start );
158198 adj_end = min (drm_gpusvm_notifier_end (notifier ), adj_end );
@@ -237,6 +277,8 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
237277{
238278 struct dma_fence * fence ;
239279
280+ range_debug (range , "GARBAGE COLLECTOR" );
281+
240282 xe_vm_lock (vm , false);
241283 fence = xe_vm_range_unbind (vm , range );
242284 xe_vm_unlock (vm );
@@ -396,16 +438,23 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
396438 int incr = (match && last ) ? 1 : 0 ;
397439
398440 if (vram_addr != XE_VRAM_ADDR_INVALID ) {
399- if (sram )
441+ if (sram ) {
442+ vm_dbg (& tile -> xe -> drm ,
443+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld" ,
444+ vram_addr , (u64 )dma_addr [pos ], i - pos + incr );
400445 __fence = xe_migrate_from_vram (tile -> migrate ,
401446 i - pos + incr ,
402447 vram_addr ,
403448 dma_addr + pos );
404- else
449+ } else {
450+ vm_dbg (& tile -> xe -> drm ,
451+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld" ,
452+ (u64 )dma_addr [pos ], vram_addr , i - pos + incr );
405453 __fence = xe_migrate_to_vram (tile -> migrate ,
406454 i - pos + incr ,
407455 dma_addr + pos ,
408456 vram_addr );
457+ }
409458 if (IS_ERR (__fence )) {
410459 err = PTR_ERR (__fence );
411460 goto err_out ;
@@ -425,14 +474,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
425474
426475 /* Extra mismatched device page, copy it */
427476 if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID ) {
428- if (sram )
477+ if (sram ) {
478+ vm_dbg (& tile -> xe -> drm ,
479+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d" ,
480+ vram_addr , (u64 )dma_addr [pos ], 1 );
429481 __fence = xe_migrate_from_vram (tile -> migrate , 1 ,
430482 vram_addr ,
431483 dma_addr + pos );
432- else
484+ } else {
485+ vm_dbg (& tile -> xe -> drm ,
486+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d" ,
487+ (u64 )dma_addr [pos ], vram_addr , 1 );
433488 __fence = xe_migrate_to_vram (tile -> migrate , 1 ,
434489 dma_addr + pos ,
435490 vram_addr );
491+ }
436492 if (IS_ERR (__fence )) {
437493 err = PTR_ERR (__fence );
438494 goto err_out ;
@@ -609,6 +665,8 @@ static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
609665 ktime_t end = 0 ;
610666 int err ;
611667
668+ range_debug (range , "ALLOCATE VRAM" );
669+
612670 if (!mmget_not_zero (mm ))
613671 return - EFAULT ;
614672 mmap_read_lock (mm );
@@ -699,6 +757,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
699757 if (xe_svm_range_is_valid (range , tile ))
700758 return 0 ;
701759
760+ range_debug (range , "PAGE FAULT" );
761+
702762 /* XXX: Add migration policy, for now migrate range once */
703763 if (!range -> skip_migrate && range -> base .flags .migrate_devmem &&
704764 xe_svm_range_size (range ) >= SZ_64K ) {
@@ -714,18 +774,26 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
714774 }
715775 }
716776
777+ range_debug (range , "GET PAGES" );
717778 err = drm_gpusvm_range_get_pages (& vm -> svm .gpusvm , r , & ctx );
718779 /* Corner where CPU mappings have changed */
719780 if (err == - EOPNOTSUPP || err == - EFAULT || err == - EPERM ) {
720- if (err == - EOPNOTSUPP )
781+ if (err == - EOPNOTSUPP ) {
782+ range_debug (range , "PAGE FAULT - EVICT PAGES" );
721783 drm_gpusvm_range_evict (& vm -> svm .gpusvm , & range -> base );
784+ }
722785 drm_dbg (& vm -> xe -> drm ,
723786 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n" ,
724787 vm -> usm .asid , & vm -> svm .gpusvm , ERR_PTR (err ));
788+ range_debug (range , "PAGE FAULT - RETRY PAGES" );
725789 goto retry ;
726790 }
727- if (err )
791+ if (err ) {
792+ range_debug (range , "PAGE FAULT - FAIL PAGE COLLECT" );
728793 goto err_out ;
794+ }
795+
796+ range_debug (range , "PAGE FAULT - BIND" );
729797
730798retry_bind :
731799 drm_exec_init (& exec , 0 , 0 );
@@ -741,8 +809,10 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
741809 if (IS_ERR (fence )) {
742810 drm_exec_fini (& exec );
743811 err = PTR_ERR (fence );
744- if (err == - EAGAIN )
812+ if (err == - EAGAIN ) {
813+ range_debug (range , "PAGE FAULT - RETRY BIND" );
745814 goto retry ;
815+ }
746816 if (xe_vm_validate_should_retry (& exec , err , & end ))
747817 goto retry_bind ;
748818 goto err_out ;
0 commit comments