Skip to content

Commit a00839f

Browse files
Darksonnjannau
authored andcommitted
panthor: use drm_gpuva_unlink_defer()
Instead of manually deferring cleanup of vm_bos, use the new GPUVM infrastructure for doing so. To avoid manual management of vm_bo refcounts, the panthor_vma_link() and panthor_vma_unlink() methods are changed to get and put a vm_bo refcount on the vm_bo. This simplifies the code a lot. I preserved the behavior where panthor_gpuva_sm_step_map() drops the refcount right away rather than letting panthor_vm_cleanup_op_ctx() do it later. Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Link: https://lore.kernel.org/r/20251006-vmbo-defer-v4-2-30cbd2c05adb@google.com Signed-off-by: Alice Ryhl <aliceryhl@google.com>
1 parent e13d2ee commit a00839f

1 file changed

Lines changed: 19 additions & 91 deletions

File tree

drivers/gpu/drm/panthor/panthor_mmu.c

Lines changed: 19 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -181,20 +181,6 @@ struct panthor_vm_op_ctx {
181181
u64 range;
182182
} va;
183183

184-
/**
185-
* @returned_vmas: List of panthor_vma objects returned after a VM operation.
186-
*
187-
* For unmap operations, this will contain all VMAs that were covered by the
188-
* specified VA range.
189-
*
190-
* For map operations, this will contain all VMAs that previously mapped to
191-
* the specified VA range.
192-
*
193-
* Those VMAs, and the resources they point to will be released as part of
194-
* the op_ctx cleanup operation.
195-
*/
196-
struct list_head returned_vmas;
197-
198184
/** @map: Fields specific to a map operation. */
199185
struct {
200186
/** @map.vm_bo: Buffer object to map. */
@@ -1081,47 +1067,18 @@ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
10811067
mutex_unlock(&vm->mm_lock);
10821068
}
10831069

1084-
static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
1070+
static void panthor_vm_bo_free(struct drm_gpuvm_bo *vm_bo)
10851071
{
10861072
struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
1087-
struct drm_gpuvm *vm = vm_bo->vm;
1088-
bool unpin;
1089-
1090-
/* We must retain the GEM before calling drm_gpuvm_bo_put(),
1091-
* otherwise the mutex might be destroyed while we hold it.
1092-
* Same goes for the VM, since we take the VM resv lock.
1093-
*/
1094-
drm_gem_object_get(&bo->base.base);
1095-
drm_gpuvm_get(vm);
1096-
1097-
/* We take the resv lock to protect against concurrent accesses to the
1098-
* gpuvm evicted/extobj lists that are modified in
1099-
* drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1100-
* releases sthe last vm_bo reference.
1101-
* We take the BO GPUVA list lock to protect the vm_bo removal from the
1102-
* GEM vm_bo list.
1103-
*/
1104-
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
1105-
mutex_lock(&bo->base.base.gpuva.lock);
1106-
unpin = drm_gpuvm_bo_put(vm_bo);
1107-
mutex_unlock(&bo->base.base.gpuva.lock);
1108-
dma_resv_unlock(drm_gpuvm_resv(vm));
11091073

1110-
/* If the vm_bo object was destroyed, release the pin reference that
1111-
* was hold by this object.
1112-
*/
1113-
if (unpin && !drm_gem_is_imported(&bo->base.base))
1074+
if (!drm_gem_is_imported(&bo->base.base))
11141075
drm_gem_shmem_unpin(&bo->base);
1115-
1116-
drm_gpuvm_put(vm);
1117-
drm_gem_object_put(&bo->base.base);
1076+
kfree(vm_bo);
11181077
}
11191078

11201079
static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
11211080
struct panthor_vm *vm)
11221081
{
1123-
struct panthor_vma *vma, *tmp_vma;
1124-
11251082
u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
11261083
op_ctx->rsvd_page_tables.ptr;
11271084

@@ -1134,16 +1091,12 @@ static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
11341091
kfree(op_ctx->rsvd_page_tables.pages);
11351092

11361093
if (op_ctx->map.vm_bo)
1137-
panthor_vm_bo_put(op_ctx->map.vm_bo);
1094+
drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
11381095

11391096
for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
11401097
kfree(op_ctx->preallocated_vmas[i]);
11411098

1142-
list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
1143-
list_del(&vma->node);
1144-
panthor_vm_bo_put(vma->base.vm_bo);
1145-
kfree(vma);
1146-
}
1099+
drm_gpuvm_bo_deferred_cleanup(&vm->base);
11471100
}
11481101

11491102
static struct panthor_vma *
@@ -1236,7 +1189,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12361189
return -EINVAL;
12371190

12381191
memset(op_ctx, 0, sizeof(*op_ctx));
1239-
INIT_LIST_HEAD(&op_ctx->returned_vmas);
12401192
op_ctx->flags = flags;
12411193
op_ctx->va.range = size;
12421194
op_ctx->va.addr = va;
@@ -1247,7 +1199,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12471199

12481200
if (!drm_gem_is_imported(&bo->base.base)) {
12491201
/* Pre-reserve the BO pages, so the map operation doesn't have to
1250-
* allocate.
1202+
* allocate. This pin is dropped in panthor_vm_bo_free(), so
1203+
* once we have successfully called drm_gpuvm_bo_create(),
1204+
* GPUVM will take care of dropping the pin for us.
12511205
*/
12521206
ret = drm_gem_shmem_pin(&bo->base);
12531207
if (ret)
@@ -1286,16 +1240,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12861240
mutex_unlock(&bo->base.base.gpuva.lock);
12871241
dma_resv_unlock(panthor_vm_resv(vm));
12881242

1289-
/* If the a vm_bo for this <VM,BO> combination exists, it already
1290-
* retains a pin ref, and we can release the one we took earlier.
1291-
*
1292-
* If our pre-allocated vm_bo is picked, it now retains the pin ref,
1293-
* which will be released in panthor_vm_bo_put().
1294-
*/
1295-
if (preallocated_vm_bo != op_ctx->map.vm_bo &&
1296-
!drm_gem_is_imported(&bo->base.base))
1297-
drm_gem_shmem_unpin(&bo->base);
1298-
12991243
op_ctx->map.bo_offset = offset;
13001244

13011245
/* L1, L2 and L3 page tables.
@@ -1343,7 +1287,6 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
13431287
int ret;
13441288

13451289
memset(op_ctx, 0, sizeof(*op_ctx));
1346-
INIT_LIST_HEAD(&op_ctx->returned_vmas);
13471290
op_ctx->va.range = size;
13481291
op_ctx->va.addr = va;
13491292
op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
@@ -1391,7 +1334,6 @@ static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx
13911334
struct panthor_vm *vm)
13921335
{
13931336
memset(op_ctx, 0, sizeof(*op_ctx));
1394-
INIT_LIST_HEAD(&op_ctx->returned_vmas);
13951337
op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
13961338
}
13971339

@@ -2037,26 +1979,13 @@ static void panthor_vma_link(struct panthor_vm *vm,
20371979

20381980
mutex_lock(&bo->base.base.gpuva.lock);
20391981
drm_gpuva_link(&vma->base, vm_bo);
2040-
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
20411982
mutex_unlock(&bo->base.base.gpuva.lock);
20421983
}
20431984

2044-
static void panthor_vma_unlink(struct panthor_vm *vm,
2045-
struct panthor_vma *vma)
1985+
static void panthor_vma_unlink(struct panthor_vma *vma)
20461986
{
2047-
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
2048-
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
2049-
2050-
mutex_lock(&bo->base.base.gpuva.lock);
2051-
drm_gpuva_unlink(&vma->base);
2052-
mutex_unlock(&bo->base.base.gpuva.lock);
2053-
2054-
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
2055-
* when entering this function, so we can implement deferred VMA
2056-
* destruction. Re-assign it here.
2057-
*/
2058-
vma->base.vm_bo = vm_bo;
2059-
list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
1987+
drm_gpuva_unlink_defer(&vma->base);
1988+
kfree(vma);
20601989
}
20611990

20621991
static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
@@ -2088,12 +2017,12 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
20882017
if (ret)
20892018
return ret;
20902019

2091-
/* Ref owned by the mapping now, clear the obj field so we don't release the
2092-
* pinning/obj ref behind GPUVA's back.
2093-
*/
20942020
drm_gpuva_map(&vm->base, &vma->base, &op->map);
20952021
panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
2022+
2023+
drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
20962024
op_ctx->map.vm_bo = NULL;
2025+
20972026
return 0;
20982027
}
20992028

@@ -2132,16 +2061,14 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
21322061
* owned by the old mapping which will be released when this
21332062
* mapping is destroyed, we need to grab a ref here.
21342063
*/
2135-
panthor_vma_link(vm, prev_vma,
2136-
drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2064+
panthor_vma_link(vm, prev_vma, op->remap.unmap->va->vm_bo);
21372065
}
21382066

21392067
if (next_vma) {
2140-
panthor_vma_link(vm, next_vma,
2141-
drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
2068+
panthor_vma_link(vm, next_vma, op->remap.unmap->va->vm_bo);
21422069
}
21432070

2144-
panthor_vma_unlink(vm, unmap_vma);
2071+
panthor_vma_unlink(unmap_vma);
21452072
return 0;
21462073
}
21472074

@@ -2158,12 +2085,13 @@ static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
21582085
return ret;
21592086

21602087
drm_gpuva_unmap(&op->unmap);
2161-
panthor_vma_unlink(vm, unmap_vma);
2088+
panthor_vma_unlink(unmap_vma);
21622089
return 0;
21632090
}
21642091

21652092
static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
21662093
.vm_free = panthor_vm_free,
2094+
.vm_bo_free = panthor_vm_bo_free,
21672095
.sm_step_map = panthor_gpuva_sm_step_map,
21682096
.sm_step_remap = panthor_gpuva_sm_step_remap,
21692097
.sm_step_unmap = panthor_gpuva_sm_step_unmap,

0 commit comments

Comments
 (0)