@@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
476476
477477static int xe_svm_copy (struct page * * pages ,
478478 struct drm_pagemap_addr * pagemap_addr ,
479- unsigned long npages , const enum xe_svm_copy_dir dir )
479+ unsigned long npages , const enum xe_svm_copy_dir dir ,
480+ struct dma_fence * pre_migrate_fence )
480481{
481482 struct xe_vram_region * vr = NULL ;
482483 struct xe_gt * gt = NULL ;
@@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages,
565566 __fence = xe_migrate_from_vram (vr -> migrate ,
566567 i - pos + incr ,
567568 vram_addr ,
568- & pagemap_addr [pos ]);
569+ & pagemap_addr [pos ],
570+ pre_migrate_fence );
569571 } else {
570572 vm_dbg (& xe -> drm ,
571573 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld" ,
@@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages,
574576 __fence = xe_migrate_to_vram (vr -> migrate ,
575577 i - pos + incr ,
576578 & pagemap_addr [pos ],
577- vram_addr );
579+ vram_addr ,
580+ pre_migrate_fence );
578581 }
579582 if (IS_ERR (__fence )) {
580583 err = PTR_ERR (__fence );
581584 goto err_out ;
582585 }
583-
586+ pre_migrate_fence = NULL ;
584587 dma_fence_put (fence );
585588 fence = __fence ;
586589 }
@@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages,
603606 vram_addr , (u64 )pagemap_addr [pos ].addr , 1 );
604607 __fence = xe_migrate_from_vram (vr -> migrate , 1 ,
605608 vram_addr ,
606- & pagemap_addr [pos ]);
609+ & pagemap_addr [pos ],
610+ pre_migrate_fence );
607611 } else {
608612 vm_dbg (& xe -> drm ,
609613 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d" ,
610614 (u64 )pagemap_addr [pos ].addr , vram_addr , 1 );
611615 __fence = xe_migrate_to_vram (vr -> migrate , 1 ,
612616 & pagemap_addr [pos ],
613- vram_addr );
617+ vram_addr ,
618+ pre_migrate_fence );
614619 }
615620 if (IS_ERR (__fence )) {
616621 err = PTR_ERR (__fence );
617622 goto err_out ;
618623 }
619-
624+ pre_migrate_fence = NULL ;
620625 dma_fence_put (fence );
621626 fence = __fence ;
622627 }
@@ -629,6 +634,8 @@ static int xe_svm_copy(struct page **pages,
629634 dma_fence_wait (fence , false);
630635 dma_fence_put (fence );
631636 }
637+ if (pre_migrate_fence )
638+ dma_fence_wait (pre_migrate_fence , false);
632639
633640 /*
634641 * XXX: We can't derive the GT here (or anywhere in this functions, but
@@ -645,16 +652,20 @@ static int xe_svm_copy(struct page **pages,
645652
646653static int xe_svm_copy_to_devmem (struct page * * pages ,
647654 struct drm_pagemap_addr * pagemap_addr ,
648- unsigned long npages )
655+ unsigned long npages ,
656+ struct dma_fence * pre_migrate_fence )
649657{
650- return xe_svm_copy (pages , pagemap_addr , npages , XE_SVM_COPY_TO_VRAM );
658+ return xe_svm_copy (pages , pagemap_addr , npages , XE_SVM_COPY_TO_VRAM ,
659+ pre_migrate_fence );
651660}
652661
653662static int xe_svm_copy_to_ram (struct page * * pages ,
654663 struct drm_pagemap_addr * pagemap_addr ,
655- unsigned long npages )
664+ unsigned long npages ,
665+ struct dma_fence * pre_migrate_fence )
656666{
657- return xe_svm_copy (pages , pagemap_addr , npages , XE_SVM_COPY_TO_SRAM );
667+ return xe_svm_copy (pages , pagemap_addr , npages , XE_SVM_COPY_TO_SRAM ,
668+ pre_migrate_fence );
658669}
659670
660671static struct xe_bo * to_xe_bo (struct drm_pagemap_devmem * devmem_allocation )
@@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
667678 struct xe_bo * bo = to_xe_bo (devmem_allocation );
668679 struct xe_device * xe = xe_bo_device (bo );
669680
681+ dma_fence_put (devmem_allocation -> pre_migrate_fence );
670682 xe_bo_put_async (bo );
671683 xe_pm_runtime_put (xe );
672684}
@@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
861873 unsigned long timeslice_ms )
862874{
863875 struct xe_vram_region * vr = container_of (dpagemap , typeof (* vr ), dpagemap );
876+ struct dma_fence * pre_migrate_fence = NULL ;
864877 struct xe_device * xe = vr -> xe ;
865878 struct device * dev = xe -> drm .dev ;
866879 struct drm_buddy_block * block ;
@@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
887900 break ;
888901 }
889902
903+ /* Ensure that any clearing or async eviction will complete before migration. */
904+ if (!dma_resv_test_signaled (bo -> ttm .base .resv , DMA_RESV_USAGE_KERNEL )) {
905+ err = dma_resv_get_singleton (bo -> ttm .base .resv , DMA_RESV_USAGE_KERNEL ,
906+ & pre_migrate_fence );
907+ if (err )
908+ dma_resv_wait_timeout (bo -> ttm .base .resv , DMA_RESV_USAGE_KERNEL ,
909+ false, MAX_SCHEDULE_TIMEOUT );
910+ else if (pre_migrate_fence )
911+ dma_fence_enable_sw_signaling (pre_migrate_fence );
912+ }
913+
890914 drm_pagemap_devmem_init (& bo -> devmem_allocation , dev , mm ,
891- & dpagemap_devmem_ops , dpagemap , end - start );
915+ & dpagemap_devmem_ops , dpagemap , end - start ,
916+ pre_migrate_fence );
892917
893918 blocks = & to_xe_ttm_vram_mgr_resource (bo -> ttm .resource )-> blocks ;
894919 list_for_each_entry (block , blocks , link )
@@ -941,7 +966,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
941966 xe_assert (vm -> xe , IS_DGFX (vm -> xe ));
942967
943968 if (xe_svm_range_in_vram (range )) {
944- drm_info (& vm -> xe -> drm , "Range is already in VRAM\n" );
969+ drm_dbg (& vm -> xe -> drm , "Range is already in VRAM\n" );
945970 return false;
946971 }
947972
0 commit comments