Skip to content

Commit f812250

Browse files
committed
Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next
fix address space collisions in some edge cases when userspace is using softpin and cleans up the MMU reference handling a bit. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas Stach <l.stach@pengutronix.de> Link: https://patchwork.freedesktop.org/patch/msgid/ffae9f7d03ca7a9e00da16d5910ae810befd3c5a.camel@pengutronix.de
2 parents 5756c29 + 2829a9f commit f812250

2 files changed

Lines changed: 55 additions & 10 deletions

File tree

drivers/gpu/drm/etnaviv/etnaviv_gem.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -294,18 +294,15 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
294294
list_del(&mapping->obj_node);
295295
}
296296

297-
mapping->context = etnaviv_iommu_context_get(mmu_context);
298297
mapping->use = 1;
299298

300299
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
301300
mmu_context->global->memory_base,
302301
mapping, va);
303-
if (ret < 0) {
304-
etnaviv_iommu_context_put(mmu_context);
302+
if (ret < 0)
305303
kfree(mapping);
306-
} else {
304+
else
307305
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
308-
}
309306

310307
out:
311308
mutex_unlock(&etnaviv_obj->lock);
@@ -500,10 +497,8 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
500497

501498
WARN_ON(mapping->use);
502499

503-
if (context) {
500+
if (context)
504501
etnaviv_iommu_unmap_gem(context, mapping);
505-
etnaviv_iommu_context_put(context);
506-
}
507502

508503
list_del(&mapping->obj_node);
509504
kfree(mapping);

drivers/gpu/drm/etnaviv/etnaviv_mmu.c

Lines changed: 52 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,8 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
9292
da += bytes;
9393
}
9494

95+
context->flush_seq++;
96+
9597
return 0;
9698

9799
fail:
@@ -117,6 +119,8 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
117119

118120
da += bytes;
119121
}
122+
123+
context->flush_seq++;
120124
}
121125

122126
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
@@ -219,8 +223,47 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
219223
static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
220224
struct drm_mm_node *node, size_t size, u64 va)
221225
{
226+
struct etnaviv_vram_mapping *m, *n;
227+
struct drm_mm_node *scan_node;
228+
LIST_HEAD(scan_list);
229+
int ret;
230+
222231
lockdep_assert_held(&context->lock);
223232

233+
ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
234+
va + size, DRM_MM_INSERT_LOWEST);
235+
if (ret != -ENOSPC)
236+
return ret;
237+
238+
/*
239+
* When we can't insert the node, due to a existing mapping blocking
240+
* the address space, there are two possible reasons:
241+
* 1. Userspace genuinely messed up and tried to reuse address space
242+
* before the last job using this VMA has finished executing.
243+
* 2. The existing buffer mappings are idle, but the buffers are not
244+
* destroyed yet (likely due to being referenced by another context) in
245+
* which case the mappings will not be cleaned up and we must reap them
246+
* here to make space for the new mapping.
247+
*/
248+
249+
drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
250+
m = container_of(scan_node, struct etnaviv_vram_mapping,
251+
vram_node);
252+
253+
if (m->use)
254+
return -ENOSPC;
255+
256+
list_add(&m->scan_node, &scan_list);
257+
}
258+
259+
list_for_each_entry_safe(m, n, &scan_list, scan_node) {
260+
etnaviv_iommu_remove_mapping(context, m);
261+
etnaviv_iommu_context_put(m->context);
262+
m->context = NULL;
263+
list_del_init(&m->mmu_node);
264+
list_del_init(&m->scan_node);
265+
}
266+
224267
return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
225268
va + size, DRM_MM_INSERT_LOWEST);
226269
}
@@ -245,6 +288,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
245288
iova = sg_dma_address(sgt->sgl) - memory_base;
246289
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
247290
mapping->iova = iova;
291+
mapping->context = etnaviv_iommu_context_get(context);
248292
list_add_tail(&mapping->mmu_node, &context->mappings);
249293
ret = 0;
250294
goto unlock;
@@ -271,8 +315,8 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
271315
goto unlock;
272316
}
273317

318+
mapping->context = etnaviv_iommu_context_get(context);
274319
list_add_tail(&mapping->mmu_node, &context->mappings);
275-
context->flush_seq++;
276320
unlock:
277321
mutex_unlock(&context->lock);
278322

@@ -286,13 +330,19 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
286330

287331
mutex_lock(&context->lock);
288332

333+
/* Bail if the mapping has been reaped by another thread */
334+
if (!mapping->context) {
335+
mutex_unlock(&context->lock);
336+
return;
337+
}
338+
289339
/* If the vram node is on the mm, unmap and remove the node */
290340
if (mapping->vram_node.mm == &context->mm)
291341
etnaviv_iommu_remove_mapping(context, mapping);
292342

293343
list_del(&mapping->mmu_node);
294-
context->flush_seq++;
295344
mutex_unlock(&context->lock);
345+
etnaviv_iommu_context_put(context);
296346
}
297347

298348
static void etnaviv_iommu_context_free(struct kref *kref)

0 commit comments

Comments
 (0)