Skip to content

Commit ebb0880

Browse files
committed
drm/xe: Skip TLB invalidation waits in page fault binds
Avoid waiting on unrelated TLB invalidations when servicing page fault binds. Since the migrate queue is shared across processes, TLB invalidations triggered by other processes may occur concurrently but are not relevant to the current bind. Teach the bind pipeline to skip waits on such invalidations to prevent unnecessary serialization. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/20251031234050.3043507-5-matthew.brost@intel.com
1 parent cb99e12 commit ebb0880

2 files changed

Lines changed: 13 additions & 2 deletions

File tree

drivers/gpu/drm/xe/xe_vm.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -755,6 +755,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
755755
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
756756

757757
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
758+
vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
758759
for_each_tile(tile, vm->xe, id) {
759760
vops.pt_update_ops[id].wait_vm_bookkeep = true;
760761
vops.pt_update_ops[tile->id].q =
@@ -845,6 +846,7 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
845846
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
846847

847848
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
849+
vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
848850
for_each_tile(tile, vm->xe, id) {
849851
vops.pt_update_ops[id].wait_vm_bookkeep = true;
850852
vops.pt_update_ops[tile->id].q =
@@ -3111,8 +3113,13 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
31113113
if (number_tiles == 0)
31123114
return ERR_PTR(-ENODATA);
31133115

3114-
for_each_tile(tile, vm->xe, id)
3115-
n_fence += (1 + XE_MAX_GT_PER_TILE);
3116+
if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) {
3117+
for_each_tile(tile, vm->xe, id)
3118+
++n_fence;
3119+
} else {
3120+
for_each_tile(tile, vm->xe, id)
3121+
n_fence += (1 + XE_MAX_GT_PER_TILE);
3122+
}
31163123

31173124
fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL);
31183125
if (!fences) {
@@ -3153,6 +3160,9 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
31533160

31543161
collect_fences:
31553162
fences[current_fence++] = fence ?: dma_fence_get_stub();
3163+
if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT)
3164+
continue;
3165+
31563166
xe_migrate_job_lock(tile->migrate, q);
31573167
for_each_tlb_inval(i)
31583168
fences[current_fence++] =

drivers/gpu/drm/xe/xe_vm_types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -466,6 +466,7 @@ struct xe_vma_ops {
466466
#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
467467
#define XE_VMA_OPS_FLAG_MADVISE BIT(1)
468468
#define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2)
469+
#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT BIT(3)
469470
u32 flags;
470471
#ifdef TEST_VM_OPS_ERROR
471472
/** @inject_error: inject error to test error handling */

0 commit comments

Comments
 (0)