@@ -180,20 +180,6 @@ struct panthor_vm_op_ctx {
180180 u64 range ;
181181 } va ;
182182
183- /**
184- * @returned_vmas: List of panthor_vma objects returned after a VM operation.
185- *
186- * For unmap operations, this will contain all VMAs that were covered by the
187- * specified VA range.
188- *
189- * For map operations, this will contain all VMAs that previously mapped to
190- * the specified VA range.
191- *
192- * Those VMAs, and the resources they point to will be released as part of
193- * the op_ctx cleanup operation.
194- */
195- struct list_head returned_vmas ;
196-
197183 /** @map: Fields specific to a map operation. */
198184 struct {
199185 /** @map.vm_bo: Buffer object to map. */
@@ -1053,47 +1039,18 @@ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
10531039 mutex_unlock (& vm -> mm_lock );
10541040}
10551041
1056- static void panthor_vm_bo_put (struct drm_gpuvm_bo * vm_bo )
1042+ static void panthor_vm_bo_free (struct drm_gpuvm_bo * vm_bo )
10571043{
10581044 struct panthor_gem_object * bo = to_panthor_bo (vm_bo -> obj );
1059- struct drm_gpuvm * vm = vm_bo -> vm ;
1060- bool unpin ;
1061-
1062- /* We must retain the GEM before calling drm_gpuvm_bo_put(),
1063- * otherwise the mutex might be destroyed while we hold it.
1064- * Same goes for the VM, since we take the VM resv lock.
1065- */
1066- drm_gem_object_get (& bo -> base .base );
1067- drm_gpuvm_get (vm );
1068-
1069- /* We take the resv lock to protect against concurrent accesses to the
1070- * gpuvm evicted/extobj lists that are modified in
1071- * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
1072- * releases sthe last vm_bo reference.
1073- * We take the BO GPUVA list lock to protect the vm_bo removal from the
1074- * GEM vm_bo list.
1075- */
1076- dma_resv_lock (drm_gpuvm_resv (vm ), NULL );
1077- mutex_lock (& bo -> base .base .gpuva .lock );
1078- unpin = drm_gpuvm_bo_put (vm_bo );
1079- mutex_unlock (& bo -> base .base .gpuva .lock );
1080- dma_resv_unlock (drm_gpuvm_resv (vm ));
10811045
1082- /* If the vm_bo object was destroyed, release the pin reference that
1083- * was hold by this object.
1084- */
1085- if (unpin && !drm_gem_is_imported (& bo -> base .base ))
1046+ if (!drm_gem_is_imported (& bo -> base .base ))
10861047 drm_gem_shmem_unpin (& bo -> base );
1087-
1088- drm_gpuvm_put (vm );
1089- drm_gem_object_put (& bo -> base .base );
1048+ kfree (vm_bo );
10901049}
10911050
10921051static void panthor_vm_cleanup_op_ctx (struct panthor_vm_op_ctx * op_ctx ,
10931052 struct panthor_vm * vm )
10941053{
1095- struct panthor_vma * vma , * tmp_vma ;
1096-
10971054 u32 remaining_pt_count = op_ctx -> rsvd_page_tables .count -
10981055 op_ctx -> rsvd_page_tables .ptr ;
10991056
@@ -1106,16 +1063,12 @@ static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
11061063 kfree (op_ctx -> rsvd_page_tables .pages );
11071064
11081065 if (op_ctx -> map .vm_bo )
1109- panthor_vm_bo_put (op_ctx -> map .vm_bo );
1066+ drm_gpuvm_bo_put_deferred (op_ctx -> map .vm_bo );
11101067
11111068 for (u32 i = 0 ; i < ARRAY_SIZE (op_ctx -> preallocated_vmas ); i ++ )
11121069 kfree (op_ctx -> preallocated_vmas [i ]);
11131070
1114- list_for_each_entry_safe (vma , tmp_vma , & op_ctx -> returned_vmas , node ) {
1115- list_del (& vma -> node );
1116- panthor_vm_bo_put (vma -> base .vm_bo );
1117- kfree (vma );
1118- }
1071+ drm_gpuvm_bo_deferred_cleanup (& vm -> base );
11191072}
11201073
11211074static struct panthor_vma *
@@ -1208,7 +1161,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12081161 return - EINVAL ;
12091162
12101163 memset (op_ctx , 0 , sizeof (* op_ctx ));
1211- INIT_LIST_HEAD (& op_ctx -> returned_vmas );
12121164 op_ctx -> flags = flags ;
12131165 op_ctx -> va .range = size ;
12141166 op_ctx -> va .addr = va ;
@@ -1219,7 +1171,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12191171
12201172 if (!drm_gem_is_imported (& bo -> base .base )) {
12211173 /* Pre-reserve the BO pages, so the map operation doesn't have to
1222- * allocate.
1174+ * allocate. This pin is dropped in panthor_vm_bo_free(), so
1175+ * once we have successfully called drm_gpuvm_bo_create(),
1176+ * GPUVM will take care of dropping the pin for us.
12231177 */
12241178 ret = drm_gem_shmem_pin (& bo -> base );
12251179 if (ret )
@@ -1258,16 +1212,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
12581212 mutex_unlock (& bo -> base .base .gpuva .lock );
12591213 dma_resv_unlock (panthor_vm_resv (vm ));
12601214
1261- /* If the a vm_bo for this <VM,BO> combination exists, it already
1262- * retains a pin ref, and we can release the one we took earlier.
1263- *
1264- * If our pre-allocated vm_bo is picked, it now retains the pin ref,
1265- * which will be released in panthor_vm_bo_put().
1266- */
1267- if (preallocated_vm_bo != op_ctx -> map .vm_bo &&
1268- !drm_gem_is_imported (& bo -> base .base ))
1269- drm_gem_shmem_unpin (& bo -> base );
1270-
12711215 op_ctx -> map .bo_offset = offset ;
12721216
12731217 /* L1, L2 and L3 page tables.
@@ -1315,7 +1259,6 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
13151259 int ret ;
13161260
13171261 memset (op_ctx , 0 , sizeof (* op_ctx ));
1318- INIT_LIST_HEAD (& op_ctx -> returned_vmas );
13191262 op_ctx -> va .range = size ;
13201263 op_ctx -> va .addr = va ;
13211264 op_ctx -> flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP ;
@@ -1363,7 +1306,6 @@ static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx
13631306 struct panthor_vm * vm )
13641307{
13651308 memset (op_ctx , 0 , sizeof (* op_ctx ));
1366- INIT_LIST_HEAD (& op_ctx -> returned_vmas );
13671309 op_ctx -> flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY ;
13681310}
13691311
@@ -2009,26 +1951,13 @@ static void panthor_vma_link(struct panthor_vm *vm,
20091951
20101952 mutex_lock (& bo -> base .base .gpuva .lock );
20111953 drm_gpuva_link (& vma -> base , vm_bo );
2012- drm_WARN_ON (& vm -> ptdev -> base , drm_gpuvm_bo_put (vm_bo ));
20131954 mutex_unlock (& bo -> base .base .gpuva .lock );
20141955}
20151956
2016- static void panthor_vma_unlink (struct panthor_vm * vm ,
2017- struct panthor_vma * vma )
1957+ static void panthor_vma_unlink (struct panthor_vma * vma )
20181958{
2019- struct panthor_gem_object * bo = to_panthor_bo (vma -> base .gem .obj );
2020- struct drm_gpuvm_bo * vm_bo = drm_gpuvm_bo_get (vma -> base .vm_bo );
2021-
2022- mutex_lock (& bo -> base .base .gpuva .lock );
2023- drm_gpuva_unlink (& vma -> base );
2024- mutex_unlock (& bo -> base .base .gpuva .lock );
2025-
2026- /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
2027- * when entering this function, so we can implement deferred VMA
2028- * destruction. Re-assign it here.
2029- */
2030- vma -> base .vm_bo = vm_bo ;
2031- list_add_tail (& vma -> node , & vm -> op_ctx -> returned_vmas );
1959+ drm_gpuva_unlink_defer (& vma -> base );
1960+ kfree (vma );
20321961}
20331962
20341963static void panthor_vma_init (struct panthor_vma * vma , u32 flags )
@@ -2060,12 +1989,12 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
20601989 if (ret )
20611990 return ret ;
20621991
2063- /* Ref owned by the mapping now, clear the obj field so we don't release the
2064- * pinning/obj ref behind GPUVA's back.
2065- */
20661992 drm_gpuva_map (& vm -> base , & vma -> base , & op -> map );
20671993 panthor_vma_link (vm , vma , op_ctx -> map .vm_bo );
1994+
1995+ drm_gpuvm_bo_put_deferred (op_ctx -> map .vm_bo );
20681996 op_ctx -> map .vm_bo = NULL ;
1997+
20691998 return 0 ;
20701999}
20712000
@@ -2104,16 +2033,14 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
21042033 * owned by the old mapping which will be released when this
21052034 * mapping is destroyed, we need to grab a ref here.
21062035 */
2107- panthor_vma_link (vm , prev_vma ,
2108- drm_gpuvm_bo_get (op -> remap .unmap -> va -> vm_bo ));
2036+ panthor_vma_link (vm , prev_vma , op -> remap .unmap -> va -> vm_bo );
21092037 }
21102038
21112039 if (next_vma ) {
2112- panthor_vma_link (vm , next_vma ,
2113- drm_gpuvm_bo_get (op -> remap .unmap -> va -> vm_bo ));
2040+ panthor_vma_link (vm , next_vma , op -> remap .unmap -> va -> vm_bo );
21142041 }
21152042
2116- panthor_vma_unlink (vm , unmap_vma );
2043+ panthor_vma_unlink (unmap_vma );
21172044 return 0 ;
21182045}
21192046
@@ -2130,12 +2057,13 @@ static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
21302057 return ret ;
21312058
21322059 drm_gpuva_unmap (& op -> unmap );
2133- panthor_vma_unlink (vm , unmap_vma );
2060+ panthor_vma_unlink (unmap_vma );
21342061 return 0 ;
21352062}
21362063
21372064static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
21382065 .vm_free = panthor_vm_free ,
2066+ .vm_bo_free = panthor_vm_bo_free ,
21392067 .sm_step_map = panthor_gpuva_sm_step_map ,
21402068 .sm_step_remap = panthor_gpuva_sm_step_remap ,
21412069 .sm_step_unmap = panthor_gpuva_sm_step_unmap ,
0 commit comments