Skip to content

Commit 0951dce

Browse files
Jonathan-Cavittnirmoy
authored andcommitted
drm/i915/gem: Make i915_gem_shrinker multi-gt aware
Where applicable, use for_each_gt instead of to_gt in the i915_gem_shrinker functions to make them apply to more than just the primary GT. Specifically, this ensure i915_gem_shrink_all retires all requests across all GTs, and this makes i915_gem_shrinker_vmap unmap VMAs from all GTs. v2: Pass correct GT to intel_gt_retire_requests(Andrzej). v3: Remove unnecessary braces(Andi) v4: Undo v3 to fix build failure. Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230926093028.23614-1-nirmoy.das@intel.com
1 parent 37d6235 commit 0951dce

1 file changed

Lines changed: 26 additions & 18 deletions

File tree

drivers/gpu/drm/i915/gem/i915_gem_shrinker.c

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/vmalloc.h>
1515

1616
#include "gt/intel_gt_requests.h"
17+
#include "gt/intel_gt.h"
1718

1819
#include "i915_trace.h"
1920

@@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
119120
intel_wakeref_t wakeref = 0;
120121
unsigned long count = 0;
121122
unsigned long scanned = 0;
122-
int err = 0;
123+
int err = 0, i = 0;
124+
struct intel_gt *gt;
123125

124126
/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
125127
bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
@@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
147149
* what we can do is give them a kick so that we do not keep idle
148150
* contexts around longer than is necessary.
149151
*/
150-
if (shrink & I915_SHRINK_ACTIVE)
151-
/* Retire requests to unpin all idle contexts */
152-
intel_gt_retire_requests(to_gt(i915));
152+
if (shrink & I915_SHRINK_ACTIVE) {
153+
for_each_gt(gt, i915, i)
154+
/* Retire requests to unpin all idle contexts */
155+
intel_gt_retire_requests(gt);
156+
}
153157

154158
/*
155159
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
389393
struct i915_vma *vma, *next;
390394
unsigned long freed_pages = 0;
391395
intel_wakeref_t wakeref;
396+
struct intel_gt *gt;
397+
int i;
392398

393399
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
394400
freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
@@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
397403
I915_SHRINK_VMAPS);
398404

399405
/* We also want to clear any cached iomaps as they wrap vmap */
400-
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
401-
list_for_each_entry_safe(vma, next,
402-
&to_gt(i915)->ggtt->vm.bound_list, vm_link) {
403-
unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
404-
struct drm_i915_gem_object *obj = vma->obj;
405-
406-
if (!vma->iomap || i915_vma_is_active(vma))
407-
continue;
406+
for_each_gt(gt, i915, i) {
407+
mutex_lock(&gt->ggtt->vm.mutex);
408+
list_for_each_entry_safe(vma, next,
409+
&gt->ggtt->vm.bound_list, vm_link) {
410+
unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
411+
struct drm_i915_gem_object *obj = vma->obj;
412+
413+
if (!vma->iomap || i915_vma_is_active(vma))
414+
continue;
408415

409-
if (!i915_gem_object_trylock(obj, NULL))
410-
continue;
416+
if (!i915_gem_object_trylock(obj, NULL))
417+
continue;
411418

412-
if (__i915_vma_unbind(vma) == 0)
413-
freed_pages += count;
419+
if (__i915_vma_unbind(vma) == 0)
420+
freed_pages += count;
414421

415-
i915_gem_object_unlock(obj);
422+
i915_gem_object_unlock(obj);
423+
}
424+
mutex_unlock(&gt->ggtt->vm.mutex);
416425
}
417-
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
418426

419427
*(unsigned long *)ptr += freed_pages;
420428
return NOTIFY_DONE;

0 commit comments

Comments
 (0)