@@ -1106,26 +1106,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
11061106}
11071107
11081108static int
1109- svm_range_split_tail (struct svm_range * prange ,
1110- uint64_t new_last , struct list_head * insert_list )
1109+ svm_range_split_tail (struct svm_range * prange , uint64_t new_last ,
1110+ struct list_head * insert_list , struct list_head * remap_list )
11111111{
11121112 struct svm_range * tail ;
11131113 int r = svm_range_split (prange , prange -> start , new_last , & tail );
11141114
1115- if (!r )
1115+ if (!r ) {
11161116 list_add (& tail -> list , insert_list );
1117+ if (!IS_ALIGNED (new_last + 1 , 1UL << prange -> granularity ))
1118+ list_add (& tail -> update_list , remap_list );
1119+ }
11171120 return r ;
11181121}
11191122
11201123static int
1121- svm_range_split_head (struct svm_range * prange ,
1122- uint64_t new_start , struct list_head * insert_list )
1124+ svm_range_split_head (struct svm_range * prange , uint64_t new_start ,
1125+ struct list_head * insert_list , struct list_head * remap_list )
11231126{
11241127 struct svm_range * head ;
11251128 int r = svm_range_split (prange , new_start , prange -> last , & head );
11261129
1127- if (!r )
1130+ if (!r ) {
11281131 list_add (& head -> list , insert_list );
1132+ if (!IS_ALIGNED (new_start , 1UL << prange -> granularity ))
1133+ list_add (& head -> update_list , remap_list );
1134+ }
11291135 return r ;
11301136}
11311137
@@ -2052,7 +2058,7 @@ static int
20522058svm_range_add (struct kfd_process * p , uint64_t start , uint64_t size ,
20532059 uint32_t nattr , struct kfd_ioctl_svm_attribute * attrs ,
20542060 struct list_head * update_list , struct list_head * insert_list ,
2055- struct list_head * remove_list )
2061+ struct list_head * remove_list , struct list_head * remap_list )
20562062{
20572063 unsigned long last = start + size - 1UL ;
20582064 struct svm_range_list * svms = & p -> svms ;
@@ -2068,6 +2074,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
20682074 INIT_LIST_HEAD (insert_list );
20692075 INIT_LIST_HEAD (remove_list );
20702076 INIT_LIST_HEAD (& new_list );
2077+ INIT_LIST_HEAD (remap_list );
20712078
20722079 node = interval_tree_iter_first (& svms -> objects , start , last );
20732080 while (node ) {
@@ -2104,14 +2111,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
21042111 if (node -> start < start ) {
21052112 pr_debug ("change old range start\n" );
21062113 r = svm_range_split_head (prange , start ,
2107- insert_list );
2114+ insert_list , remap_list );
21082115 if (r )
21092116 goto out ;
21102117 }
21112118 if (node -> last > last ) {
21122119 pr_debug ("change old range last\n" );
21132120 r = svm_range_split_tail (prange , last ,
2114- insert_list );
2121+ insert_list , remap_list );
21152122 if (r )
21162123 goto out ;
21172124 }
@@ -3501,6 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
35013508 struct list_head update_list ;
35023509 struct list_head insert_list ;
35033510 struct list_head remove_list ;
3511+ struct list_head remap_list ;
35043512 struct svm_range_list * svms ;
35053513 struct svm_range * prange ;
35063514 struct svm_range * next ;
@@ -3532,7 +3540,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
35323540
35333541 /* Add new range and split existing ranges as needed */
35343542 r = svm_range_add (p , start , size , nattr , attrs , & update_list ,
3535- & insert_list , & remove_list );
3543+ & insert_list , & remove_list , & remap_list );
35363544 if (r ) {
35373545 mutex_unlock (& svms -> lock );
35383546 mmap_write_unlock (mm );
@@ -3597,6 +3605,19 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
35973605 ret = r ;
35983606 }
35993607
3608+ list_for_each_entry (prange , & remap_list , update_list ) {
3609+ pr_debug ("Remapping prange 0x%p [0x%lx 0x%lx]\n" ,
3610+ prange , prange -> start , prange -> last );
3611+ mutex_lock (& prange -> migrate_mutex );
3612+ r = svm_range_validate_and_map (mm , prange , MAX_GPU_INSTANCE ,
3613+ true, true, prange -> mapped_to_gpu );
3614+ if (r )
3615+ pr_debug ("failed %d on remap svm range\n" , r );
3616+ mutex_unlock (& prange -> migrate_mutex );
3617+ if (r )
3618+ ret = r ;
3619+ }
3620+
36003621 dynamic_svm_range_dump (svms );
36013622
36023623 mutex_unlock (& svms -> lock );
0 commit comments