@@ -1171,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start,
11711171}
11721172
11731173static void
1174- svm_range_add_child (struct svm_range * prange , struct mm_struct * mm ,
1175- struct svm_range * pchild , enum svm_work_list_ops op )
1174+ svm_range_add_child (struct svm_range * prange , struct svm_range * pchild , enum svm_work_list_ops op )
11761175{
11771176 pr_debug ("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n" ,
11781177 pchild , pchild -> start , pchild -> last , prange , op );
11791178
1180- pchild -> work_item .mm = mm ;
1179+ pchild -> work_item .mm = NULL ;
11811180 pchild -> work_item .op = op ;
11821181 list_add_tail (& pchild -> child_list , & prange -> child_list );
11831182}
@@ -1278,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
12781277 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC ;
12791278 /* system memory accessed by the dGPU */
12801279 } else {
1281- if (gc_ip_version < IP_VERSION (9 , 5 , 0 ))
1280+ if (gc_ip_version < IP_VERSION (9 , 5 , 0 ) || ext_coherent )
12821281 mapping_flags |= AMDGPU_VM_MTYPE_UC ;
12831282 else
12841283 mapping_flags |= AMDGPU_VM_MTYPE_NC ;
@@ -2394,15 +2393,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
23942393 prange -> work_item .op != SVM_OP_UNMAP_RANGE )
23952394 prange -> work_item .op = op ;
23962395 } else {
2397- prange -> work_item .op = op ;
2398-
2399- /* Pairs with mmput in deferred_list_work */
2400- mmget (mm );
2401- prange -> work_item .mm = mm ;
2402- list_add_tail (& prange -> deferred_list ,
2403- & prange -> svms -> deferred_range_list );
2404- pr_debug ("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n" ,
2405- prange , prange -> start , prange -> last , op );
2396+ /* Pairs with mmput in deferred_list_work.
2397+ * If process is exiting and mm is gone, don't update mmu notifier.
2398+ */
2399+ if (mmget_not_zero (mm )) {
2400+ prange -> work_item .mm = mm ;
2401+ prange -> work_item .op = op ;
2402+ list_add_tail (& prange -> deferred_list ,
2403+ & prange -> svms -> deferred_range_list );
2404+ pr_debug ("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n" ,
2405+ prange , prange -> start , prange -> last , op );
2406+ }
24062407 }
24072408 spin_unlock (& svms -> deferred_list_lock );
24082409}
@@ -2416,8 +2417,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms)
24162417}
24172418
24182419static void
2419- svm_range_unmap_split (struct mm_struct * mm , struct svm_range * parent ,
2420- struct svm_range * prange , unsigned long start ,
2420+ svm_range_unmap_split (struct svm_range * parent , struct svm_range * prange , unsigned long start ,
24212421 unsigned long last )
24222422{
24232423 struct svm_range * head ;
@@ -2438,12 +2438,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
24382438 svm_range_split (tail , last + 1 , tail -> last , & head );
24392439
24402440 if (head != prange && tail != prange ) {
2441- svm_range_add_child (parent , mm , head , SVM_OP_UNMAP_RANGE );
2442- svm_range_add_child (parent , mm , tail , SVM_OP_ADD_RANGE );
2441+ svm_range_add_child (parent , head , SVM_OP_UNMAP_RANGE );
2442+ svm_range_add_child (parent , tail , SVM_OP_ADD_RANGE );
24432443 } else if (tail != prange ) {
2444- svm_range_add_child (parent , mm , tail , SVM_OP_UNMAP_RANGE );
2444+ svm_range_add_child (parent , tail , SVM_OP_UNMAP_RANGE );
24452445 } else if (head != prange ) {
2446- svm_range_add_child (parent , mm , head , SVM_OP_UNMAP_RANGE );
2446+ svm_range_add_child (parent , head , SVM_OP_UNMAP_RANGE );
24472447 } else if (parent != prange ) {
24482448 prange -> work_item .op = SVM_OP_UNMAP_RANGE ;
24492449 }
@@ -2520,14 +2520,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
25202520 l = min (last , pchild -> last );
25212521 if (l >= s )
25222522 svm_range_unmap_from_gpus (pchild , s , l , trigger );
2523- svm_range_unmap_split (mm , prange , pchild , start , last );
2523+ svm_range_unmap_split (prange , pchild , start , last );
25242524 mutex_unlock (& pchild -> lock );
25252525 }
25262526 s = max (start , prange -> start );
25272527 l = min (last , prange -> last );
25282528 if (l >= s )
25292529 svm_range_unmap_from_gpus (prange , s , l , trigger );
2530- svm_range_unmap_split (mm , prange , prange , start , last );
2530+ svm_range_unmap_split (prange , prange , start , last );
25312531
25322532 if (unmap_parent )
25332533 svm_range_add_list_work (svms , prange , mm , SVM_OP_UNMAP_RANGE );
@@ -2570,8 +2570,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
25702570
25712571 if (range -> event == MMU_NOTIFY_RELEASE )
25722572 return true;
2573- if (!mmget_not_zero (mni -> mm ))
2574- return true;
25752573
25762574 start = mni -> interval_tree .start ;
25772575 last = mni -> interval_tree .last ;
@@ -2598,7 +2596,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
25982596 }
25992597
26002598 svm_range_unlock (prange );
2601- mmput (mni -> mm );
26022599
26032600 return true;
26042601}
0 commit comments