@@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
11181118 lockdep_assert_held (& gpusvm -> notifier_lock );
11191119
11201120 if (range -> flags .has_dma_mapping ) {
1121+ struct drm_gpusvm_range_flags flags = {
1122+ .__flags = range -> flags .__flags ,
1123+ };
1124+
11211125 for (i = 0 , j = 0 ; i < npages ; j ++ ) {
11221126 struct drm_pagemap_device_addr * addr = & range -> dma_addr [j ];
11231127
@@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
11311135 dev , * addr );
11321136 i += 1 << addr -> order ;
11331137 }
1134- range -> flags .has_devmem_pages = false;
1135- range -> flags .has_dma_mapping = false;
1138+
1139+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
1140+ flags .has_devmem_pages = false;
1141+ flags .has_dma_mapping = false;
1142+ WRITE_ONCE (range -> flags .__flags , flags .__flags );
1143+
11361144 range -> dpagemap = NULL ;
11371145 }
11381146}
@@ -1334,6 +1342,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
13341342 int err = 0 ;
13351343 struct dev_pagemap * pagemap ;
13361344 struct drm_pagemap * dpagemap ;
1345+ struct drm_gpusvm_range_flags flags ;
13371346
13381347retry :
13391348 hmm_range .notifier_seq = mmu_interval_read_begin (notifier );
@@ -1378,7 +1387,8 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
13781387 */
13791388 drm_gpusvm_notifier_lock (gpusvm );
13801389
1381- if (range -> flags .unmapped ) {
1390+ flags .__flags = range -> flags .__flags ;
1391+ if (flags .unmapped ) {
13821392 drm_gpusvm_notifier_unlock (gpusvm );
13831393 err = - EFAULT ;
13841394 goto err_free ;
@@ -1454,6 +1464,11 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
14541464 goto err_unmap ;
14551465 }
14561466
1467+ if (ctx -> devmem_only ) {
1468+ err = - EFAULT ;
1469+ goto err_unmap ;
1470+ }
1471+
14571472 addr = dma_map_page (gpusvm -> drm -> dev ,
14581473 page , 0 ,
14591474 PAGE_SIZE << order ,
@@ -1469,14 +1484,17 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
14691484 }
14701485 i += 1 << order ;
14711486 num_dma_mapped = i ;
1472- range -> flags .has_dma_mapping = true;
1487+ flags .has_dma_mapping = true;
14731488 }
14741489
14751490 if (zdd ) {
1476- range -> flags .has_devmem_pages = true;
1491+ flags .has_devmem_pages = true;
14771492 range -> dpagemap = dpagemap ;
14781493 }
14791494
1495+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
1496+ WRITE_ONCE (range -> flags .__flags , flags .__flags );
1497+
14801498 drm_gpusvm_notifier_unlock (gpusvm );
14811499 kvfree (pfns );
14821500set_seqno :
@@ -1765,6 +1783,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
17651783 goto err_finalize ;
17661784
17671785 /* Upon success bind devmem allocation to range and zdd */
1786+ devmem_allocation -> timeslice_expiration = get_jiffies_64 () +
1787+ msecs_to_jiffies (ctx -> timeslice_ms );
17681788 zdd -> devmem_allocation = devmem_allocation ; /* Owns ref */
17691789
17701790err_finalize :
@@ -1985,6 +2005,13 @@ static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
19852005 void * buf ;
19862006 int i , err = 0 ;
19872007
2008+ if (page ) {
2009+ zdd = page -> zone_device_data ;
2010+ if (time_before64 (get_jiffies_64 (),
2011+ zdd -> devmem_allocation -> timeslice_expiration ))
2012+ return 0 ;
2013+ }
2014+
19882015 start = ALIGN_DOWN (fault_addr , size );
19892016 end = ALIGN (fault_addr + 1 , size );
19902017
0 commit comments