@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
214214{
215215 struct dmirror * dmirror = container_of (mni , struct dmirror , notifier );
216216
217+ /*
218+ * Ignore invalidation callbacks for device private pages since
219+ * the invalidation is handled as part of the migration process.
220+ */
221+ if (range -> event == MMU_NOTIFY_MIGRATE &&
222+ range -> migrate_pgmap_owner == dmirror -> mdevice )
223+ return true;
224+
217225 if (mmu_notifier_range_blockable (range ))
218226 mutex_lock (& dmirror -> mutex );
219227 else if (!mutex_trylock (& dmirror -> mutex ))
@@ -693,7 +701,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
693701 args .dst = dst_pfns ;
694702 args .start = addr ;
695703 args .end = next ;
696- args .pgmap_owner = NULL ;
704+ args .pgmap_owner = dmirror -> mdevice ;
697705 args .flags = MIGRATE_VMA_SELECT_SYSTEM ;
698706 ret = migrate_vma_setup (& args );
699707 if (ret )
@@ -983,7 +991,7 @@ static void dmirror_devmem_free(struct page *page)
983991}
984992
985993static vm_fault_t dmirror_devmem_fault_alloc_and_copy (struct migrate_vma * args ,
986- struct dmirror_device * mdevice )
994+ struct dmirror * dmirror )
987995{
988996 const unsigned long * src = args -> src ;
989997 unsigned long * dst = args -> dst ;
@@ -1005,6 +1013,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
10051013 continue ;
10061014
10071015 lock_page (dpage );
1016+ xa_erase (& dmirror -> pt , addr >> PAGE_SHIFT );
10081017 copy_highpage (dpage , spage );
10091018 * dst = migrate_pfn (page_to_pfn (dpage )) | MIGRATE_PFN_LOCKED ;
10101019 if (* src & MIGRATE_PFN_WRITE )
@@ -1013,15 +1022,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
10131022 return 0 ;
10141023}
10151024
1016- static void dmirror_devmem_fault_finalize_and_map (struct migrate_vma * args ,
1017- struct dmirror * dmirror )
1018- {
1019- /* Invalidate the device's page table mapping. */
1020- mutex_lock (& dmirror -> mutex );
1021- dmirror_do_update (dmirror , args -> start , args -> end );
1022- mutex_unlock (& dmirror -> mutex );
1023- }
1024-
10251025static vm_fault_t dmirror_devmem_fault (struct vm_fault * vmf )
10261026{
10271027 struct migrate_vma args ;
@@ -1051,11 +1051,15 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
10511051 if (migrate_vma_setup (& args ))
10521052 return VM_FAULT_SIGBUS ;
10531053
1054- ret = dmirror_devmem_fault_alloc_and_copy (& args , dmirror -> mdevice );
1054+ ret = dmirror_devmem_fault_alloc_and_copy (& args , dmirror );
10551055 if (ret )
10561056 return ret ;
10571057 migrate_vma_pages (& args );
1058- dmirror_devmem_fault_finalize_and_map (& args , dmirror );
1058+ /*
1059+ * No device finalize step is needed since
1060+ * dmirror_devmem_fault_alloc_and_copy() will have already
1061+ * invalidated the device page table.
1062+ */
10591063 migrate_vma_finalize (& args );
10601064 return 0 ;
10611065}
0 commit comments