Skip to content

Commit 8443e8c

Browse files
committed
drm/xe: Add helpers to send TLB invalidations
Break out the GuC specific code into helpers as part of the process to decouple frontback TLB invalidation code from the backend. Signed-off-by: Stuart Summers <stuart.summers@intel.com> Reviewed-by: Stuart Summers <stuart.summers@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250826182911.392550-9-stuart.summers@intel.com
1 parent 9aff63c commit 8443e8c

1 file changed

Lines changed: 117 additions & 117 deletions

File tree

drivers/gpu/drm/xe/xe_tlb_inval.c

Lines changed: 117 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -221,12 +221,11 @@ static bool tlb_inval_seqno_past(struct xe_gt *gt, int seqno)
221221
return seqno_recv >= seqno;
222222
}
223223

224-
static int send_tlb_inval(struct xe_guc *guc, struct xe_tlb_inval_fence *fence,
225-
u32 *action, int len)
224+
static int send_tlb_inval(struct xe_guc *guc, const u32 *action, int len)
226225
{
227226
struct xe_gt *gt = guc_to_gt(guc);
228227

229-
xe_gt_assert(gt, fence);
228+
xe_gt_assert(gt, action[1]); /* Seqno */
230229

231230
/*
232231
* XXX: The seqno algorithm relies on TLB invalidation being processed
@@ -235,7 +234,6 @@ static int send_tlb_inval(struct xe_guc *guc, struct xe_tlb_inval_fence *fence,
235234
*/
236235

237236
xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
238-
action[1] = fence->seqno;
239237

240238
return xe_guc_ct_send(&guc->ct, action, len,
241239
G2H_LEN_DW_TLB_INVALIDATE, 1);
@@ -270,91 +268,15 @@ static void xe_tlb_inval_fence_prep(struct xe_tlb_inval_fence *fence)
270268
XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
271269
XE_GUC_TLB_INVAL_FLUSH_CACHE)
272270

273-
/**
274-
* xe_tlb_inval_guc - Issue a TLB invalidation on this GT for the GuC
275-
* @gt: GT structure
276-
* @fence: invalidation fence which will be signal on TLB invalidation
277-
* completion
278-
*
279-
* Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
280-
* caller can use the invalidation fence to wait for completion.
281-
*
282-
* Return: 0 on success, negative error code on error
283-
*/
284-
static int xe_tlb_inval_guc(struct xe_gt *gt,
285-
struct xe_tlb_inval_fence *fence)
271+
static int send_tlb_inval_ggtt(struct xe_gt *gt, int seqno)
286272
{
287273
u32 action[] = {
288274
XE_GUC_ACTION_TLB_INVALIDATION,
289-
0, /* seqno, replaced in send_tlb_inval */
275+
seqno,
290276
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
291277
};
292-
int ret;
293-
294-
mutex_lock(&gt->tlb_inval.seqno_lock);
295-
xe_tlb_inval_fence_prep(fence);
296-
297-
ret = send_tlb_inval(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
298-
if (ret < 0)
299-
inval_fence_signal_unlocked(gt_to_xe(gt), fence);
300-
mutex_unlock(&gt->tlb_inval.seqno_lock);
301-
302-
/*
303-
* -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
304-
* should be nuked on a GT reset so this error can be ignored.
305-
*/
306-
if (ret == -ECANCELED)
307-
return 0;
308-
309-
return ret;
310-
}
311-
312-
/**
313-
* xe_tlb_inval_ggtt - Issue a TLB invalidation on this GT for the GGTT
314-
* @tlb_inval: TLB invalidation client
315-
*
316-
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
317-
* synchronous.
318-
*
319-
* Return: 0 on success, negative error code on error
320-
*/
321-
int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval)
322-
{
323-
struct xe_gt *gt = tlb_inval->private;
324-
struct xe_device *xe = gt_to_xe(gt);
325-
unsigned int fw_ref;
326-
327-
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
328-
gt->uc.guc.submission_state.enabled) {
329-
struct xe_tlb_inval_fence fence;
330-
int ret;
331-
332-
xe_tlb_inval_fence_init(tlb_inval, &fence, true);
333-
ret = xe_tlb_inval_guc(gt, &fence);
334-
if (ret)
335-
return ret;
336-
337-
xe_tlb_inval_fence_wait(&fence);
338-
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
339-
struct xe_mmio *mmio = &gt->mmio;
340278

341-
if (IS_SRIOV_VF(xe))
342-
return 0;
343-
344-
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
345-
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
346-
xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
347-
PVC_GUC_TLB_INV_DESC1_INVALIDATE);
348-
xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
349-
PVC_GUC_TLB_INV_DESC0_VALID);
350-
} else {
351-
xe_mmio_write32(mmio, GUC_TLB_INV_CR,
352-
GUC_TLB_INV_CR_INVALIDATE);
353-
}
354-
xe_force_wake_put(gt_to_fw(gt), fw_ref);
355-
}
356-
357-
return 0;
279+
return send_tlb_inval(&gt->uc.guc, action, ARRAY_SIZE(action));
358280
}
359281

360282
static int send_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
@@ -369,7 +291,7 @@ static int send_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
369291

370292
xe_gt_assert(gt, fence);
371293

372-
return send_tlb_inval(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
294+
return send_tlb_inval(&gt->uc.guc, action, ARRAY_SIZE(action));
373295
}
374296

375297
/**
@@ -401,43 +323,17 @@ int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
401323
*/
402324
#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
403325

404-
/**
405-
* xe_tlb_inval_range - Issue a TLB invalidation on this GT for an address range
406-
* @tlb_inval: TLB invalidation client
407-
* @fence: invalidation fence which will be signal on TLB invalidation
408-
* completion
409-
* @start: start address
410-
* @end: end address
411-
* @asid: address space id
412-
*
413-
* Issue a range based TLB invalidation if supported, if not fallback to a full
414-
* TLB invalidation. Completion of TLB is asynchronous and caller can use
415-
* the invalidation fence to wait for completion.
416-
*
417-
* Return: Negative error code on error, 0 on success
418-
*/
419-
int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
420-
struct xe_tlb_inval_fence *fence, u64 start, u64 end,
421-
u32 asid)
326+
static int send_tlb_inval_ppgtt(struct xe_gt *gt, u64 start, u64 end,
327+
u32 asid, int seqno)
422328
{
423-
struct xe_gt *gt = tlb_inval->private;
424-
struct xe_device *xe = gt_to_xe(gt);
425329
#define MAX_TLB_INVALIDATION_LEN 7
426330
u32 action[MAX_TLB_INVALIDATION_LEN];
427331
u64 length = end - start;
428-
int len = 0, ret;
429-
430-
xe_gt_assert(gt, fence);
431-
432-
/* Execlists not supported */
433-
if (gt_to_xe(gt)->info.force_execlist) {
434-
__inval_fence_signal(xe, fence);
435-
return 0;
436-
}
332+
int len = 0;
437333

438334
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
439-
action[len++] = 0; /* seqno, replaced in send_tlb_inval */
440-
if (!xe->info.has_range_tlb_inval ||
335+
action[len++] = seqno;
336+
if (!gt_to_xe(gt)->info.has_range_tlb_inval ||
441337
length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
442338
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
443339
} else {
@@ -486,11 +382,115 @@ int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
486382

487383
xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
488384

385+
return send_tlb_inval(&gt->uc.guc, action, len);
386+
}
387+
388+
static int __xe_tlb_inval_ggtt(struct xe_gt *gt,
389+
struct xe_tlb_inval_fence *fence)
390+
{
391+
int ret;
392+
393+
mutex_lock(&gt->tlb_inval.seqno_lock);
394+
xe_tlb_inval_fence_prep(fence);
395+
396+
ret = send_tlb_inval_ggtt(gt, fence->seqno);
397+
if (ret < 0)
398+
inval_fence_signal_unlocked(gt_to_xe(gt), fence);
399+
mutex_unlock(&gt->tlb_inval.seqno_lock);
400+
401+
/*
402+
* -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
403+
* should be nuked on a GT reset so this error can be ignored.
404+
*/
405+
if (ret == -ECANCELED)
406+
return 0;
407+
408+
return ret;
409+
}
410+
411+
/**
412+
* xe_tlb_inval_ggtt - Issue a TLB invalidation on this GT for the GGTT
413+
* @tlb_inval: TLB invalidation client
414+
*
415+
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
416+
* synchronous.
417+
*
418+
* Return: 0 on success, negative error code on error
419+
*/
420+
int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval)
421+
{
422+
struct xe_gt *gt = tlb_inval->private;
423+
struct xe_device *xe = gt_to_xe(gt);
424+
unsigned int fw_ref;
425+
426+
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
427+
gt->uc.guc.submission_state.enabled) {
428+
struct xe_tlb_inval_fence fence;
429+
int ret;
430+
431+
xe_tlb_inval_fence_init(tlb_inval, &fence, true);
432+
ret = __xe_tlb_inval_ggtt(gt, &fence);
433+
if (ret)
434+
return ret;
435+
436+
xe_tlb_inval_fence_wait(&fence);
437+
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
438+
struct xe_mmio *mmio = &gt->mmio;
439+
440+
if (IS_SRIOV_VF(xe))
441+
return 0;
442+
443+
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
444+
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
445+
xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
446+
PVC_GUC_TLB_INV_DESC1_INVALIDATE);
447+
xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
448+
PVC_GUC_TLB_INV_DESC0_VALID);
449+
} else {
450+
xe_mmio_write32(mmio, GUC_TLB_INV_CR,
451+
GUC_TLB_INV_CR_INVALIDATE);
452+
}
453+
xe_force_wake_put(gt_to_fw(gt), fw_ref);
454+
}
455+
456+
return 0;
457+
}
458+
459+
/**
460+
* xe_tlb_inval_range - Issue a TLB invalidation on this GT for an address range
461+
* @tlb_inval: TLB invalidation client
462+
* @fence: invalidation fence which will be signal on TLB invalidation
463+
* completion
464+
* @start: start address
465+
* @end: end address
466+
* @asid: address space id
467+
*
468+
* Issue a range based TLB invalidation if supported, if not fallback to a full
469+
* TLB invalidation. Completion of TLB is asynchronous and caller can use
470+
* the invalidation fence to wait for completion.
471+
*
472+
* Return: Negative error code on error, 0 on success
473+
*/
474+
int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
475+
struct xe_tlb_inval_fence *fence, u64 start, u64 end,
476+
u32 asid)
477+
{
478+
struct xe_gt *gt = tlb_inval->private;
479+
struct xe_device *xe = gt_to_xe(gt);
480+
int ret;
481+
482+
xe_gt_assert(gt, fence);
483+
484+
/* Execlists not supported */
485+
if (xe->info.force_execlist) {
486+
__inval_fence_signal(xe, fence);
487+
return 0;
488+
}
489+
489490
mutex_lock(&gt->tlb_inval.seqno_lock);
490491
xe_tlb_inval_fence_prep(fence);
491492

492-
ret = send_tlb_inval(&gt->uc.guc, fence, action,
493-
ARRAY_SIZE(action));
493+
ret = send_tlb_inval_ppgtt(gt, start, end, asid, fence->seqno);
494494
if (ret < 0)
495495
inval_fence_signal_unlocked(xe, fence);
496496
mutex_unlock(&gt->tlb_inval.seqno_lock);

0 commit comments

Comments
 (0)