@@ -964,10 +964,16 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
964964 }
965965}
966966
967+ #define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull))
968+
967969static void xe_pt_commit_prepare_locks_assert (struct xe_vma * vma )
968970{
969- struct xe_vm * vm = xe_vma_vm ( vma ) ;
971+ struct xe_vm * vm ;
970972
973+ if (vma == XE_INVALID_VMA )
974+ return ;
975+
976+ vm = xe_vma_vm (vma );
971977 lockdep_assert_held (& vm -> lock );
972978
973979 if (!xe_vma_has_no_bo (vma ))
@@ -978,8 +984,12 @@ static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
978984
979985static void xe_pt_commit_locks_assert (struct xe_vma * vma )
980986{
981- struct xe_vm * vm = xe_vma_vm ( vma ) ;
987+ struct xe_vm * vm ;
982988
989+ if (vma == XE_INVALID_VMA )
990+ return ;
991+
992+ vm = xe_vma_vm (vma );
983993 xe_pt_commit_prepare_locks_assert (vma );
984994
985995 if (xe_vma_is_userptr (vma ))
@@ -1007,7 +1017,8 @@ static void xe_pt_commit(struct xe_vma *vma,
10071017 int j_ = j + entries [i ].ofs ;
10081018
10091019 pt_dir -> children [j_ ] = pt_dir -> staging [j_ ];
1010- xe_pt_destroy (oldpte , xe_vma_vm (vma )-> flags , deferred );
1020+ xe_pt_destroy (oldpte , (vma == XE_INVALID_VMA ) ? 0 :
1021+ xe_vma_vm (vma )-> flags , deferred );
10111022 }
10121023 }
10131024}
@@ -1420,6 +1431,9 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
14201431 list_for_each_entry (op , & vops -> list , link ) {
14211432 struct xe_svm_range * range = op -> map_range .range ;
14221433
1434+ if (op -> subop == XE_VMA_SUBOP_UNMAP_RANGE )
1435+ continue ;
1436+
14231437 xe_assert (vm -> xe , xe_vma_is_cpu_addr_mirror (op -> map_range .vma ));
14241438 xe_assert (vm -> xe , op -> subop == XE_VMA_SUBOP_MAP_RANGE );
14251439
@@ -1617,7 +1631,9 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
16171631 * xe_pt_stage_unbind() - Build page-table update structures for an unbind
16181632 * operation
16191633 * @tile: The tile we're unbinding for.
1634+ * @vm: The vm
16201635 * @vma: The vma we're unbinding.
1636+ * @range: The range we're unbinding.
16211637 * @entries: Caller-provided storage for the update structures.
16221638 *
16231639 * Builds page-table update structures for an unbind operation. The function
@@ -1627,9 +1643,14 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
16271643 *
16281644 * Return: The number of entries used.
16291645 */
1630- static unsigned int xe_pt_stage_unbind (struct xe_tile * tile , struct xe_vma * vma ,
1646+ static unsigned int xe_pt_stage_unbind (struct xe_tile * tile ,
1647+ struct xe_vm * vm ,
1648+ struct xe_vma * vma ,
1649+ struct xe_svm_range * range ,
16311650 struct xe_vm_pgtable_update * entries )
16321651{
1652+ u64 start = range ? range -> base .itree .start : xe_vma_start (vma );
1653+ u64 end = range ? range -> base .itree .last + 1 : xe_vma_end (vma );
16331654 struct xe_pt_stage_unbind_walk xe_walk = {
16341655 .base = {
16351656 .ops = & xe_pt_stage_unbind_ops ,
@@ -1638,14 +1659,14 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
16381659 .staging = true,
16391660 },
16401661 .tile = tile ,
1641- .modified_start = xe_vma_start ( vma ) ,
1642- .modified_end = xe_vma_end ( vma ) ,
1662+ .modified_start = start ,
1663+ .modified_end = end ,
16431664 .wupd .entries = entries ,
16441665 };
1645- struct xe_pt * pt = xe_vma_vm ( vma ) -> pt_root [tile -> id ];
1666+ struct xe_pt * pt = vm -> pt_root [tile -> id ];
16461667
1647- (void )xe_pt_walk_shared (& pt -> base , pt -> level , xe_vma_start ( vma ) ,
1648- xe_vma_end ( vma ), & xe_walk .base );
1668+ (void )xe_pt_walk_shared (& pt -> base , pt -> level , start , end ,
1669+ & xe_walk .base );
16491670
16501671 return xe_walk .wupd .num_used_entries ;
16511672}
@@ -1887,13 +1908,6 @@ static int unbind_op_prepare(struct xe_tile *tile,
18871908 "Preparing unbind, with range [%llx...%llx)\n" ,
18881909 xe_vma_start (vma ), xe_vma_end (vma ) - 1 );
18891910
1890- /*
1891- * Wait for invalidation to complete. Can corrupt internal page table
1892- * state if an invalidation is running while preparing an unbind.
1893- */
1894- if (xe_vma_is_userptr (vma ) && xe_vm_in_fault_mode (xe_vma_vm (vma )))
1895- mmu_interval_read_begin (& to_userptr_vma (vma )-> userptr .notifier );
1896-
18971911 pt_op -> vma = vma ;
18981912 pt_op -> bind = false;
18991913 pt_op -> rebind = false;
@@ -1902,7 +1916,8 @@ static int unbind_op_prepare(struct xe_tile *tile,
19021916 if (err )
19031917 return err ;
19041918
1905- pt_op -> num_entries = xe_pt_stage_unbind (tile , vma , pt_op -> entries );
1919+ pt_op -> num_entries = xe_pt_stage_unbind (tile , xe_vma_vm (vma ),
1920+ vma , NULL , pt_op -> entries );
19061921
19071922 xe_vm_dbg_print_entries (tile_to_xe (tile ), pt_op -> entries ,
19081923 pt_op -> num_entries , false);
@@ -1917,6 +1932,42 @@ static int unbind_op_prepare(struct xe_tile *tile,
19171932 return 0 ;
19181933}
19191934
1935+ static int unbind_range_prepare (struct xe_vm * vm ,
1936+ struct xe_tile * tile ,
1937+ struct xe_vm_pgtable_update_ops * pt_update_ops ,
1938+ struct xe_svm_range * range )
1939+ {
1940+ u32 current_op = pt_update_ops -> current_op ;
1941+ struct xe_vm_pgtable_update_op * pt_op = & pt_update_ops -> ops [current_op ];
1942+
1943+ if (!(range -> tile_present & BIT (tile -> id )))
1944+ return 0 ;
1945+
1946+ vm_dbg (& vm -> xe -> drm ,
1947+ "Preparing unbind, with range [%lx...%lx)\n" ,
1948+ range -> base .itree .start , range -> base .itree .last );
1949+
1950+ pt_op -> vma = XE_INVALID_VMA ;
1951+ pt_op -> bind = false;
1952+ pt_op -> rebind = false;
1953+
1954+ pt_op -> num_entries = xe_pt_stage_unbind (tile , vm , NULL , range ,
1955+ pt_op -> entries );
1956+
1957+ xe_vm_dbg_print_entries (tile_to_xe (tile ), pt_op -> entries ,
1958+ pt_op -> num_entries , false);
1959+ xe_pt_update_ops_rfence_interval (pt_update_ops , range -> base .itree .start ,
1960+ range -> base .itree .last + 1 );
1961+ ++ pt_update_ops -> current_op ;
1962+ pt_update_ops -> needs_svm_lock = true;
1963+ pt_update_ops -> needs_invalidation = true;
1964+
1965+ xe_pt_commit_prepare_unbind (XE_INVALID_VMA , pt_op -> entries ,
1966+ pt_op -> num_entries );
1967+
1968+ return 0 ;
1969+ }
1970+
19201971static int op_prepare (struct xe_vm * vm ,
19211972 struct xe_tile * tile ,
19221973 struct xe_vm_pgtable_update_ops * pt_update_ops ,
@@ -1984,6 +2035,9 @@ static int op_prepare(struct xe_vm *vm,
19842035 err = bind_range_prepare (vm , tile , pt_update_ops ,
19852036 op -> map_range .vma ,
19862037 op -> map_range .range );
2038+ } else if (op -> subop == XE_VMA_SUBOP_UNMAP_RANGE ) {
2039+ err = unbind_range_prepare (vm , tile , pt_update_ops ,
2040+ op -> unmap_range .range );
19872041 }
19882042 break ;
19892043 default :
@@ -2173,6 +2227,8 @@ static void op_commit(struct xe_vm *vm,
21732227 if (op -> subop == XE_VMA_SUBOP_MAP_RANGE ) {
21742228 op -> map_range .range -> tile_present |= BIT (tile -> id );
21752229 op -> map_range .range -> tile_invalidated &= ~BIT (tile -> id );
2230+ } else if (op -> subop == XE_VMA_SUBOP_UNMAP_RANGE ) {
2231+ op -> unmap_range .range -> tile_present &= ~BIT (tile -> id );
21762232 }
21772233 break ;
21782234 }
0 commit comments