Skip to content

Commit c488a94

Browse files
marysakaLyude
authored andcommitted
drm/nouveau/uvmm: Allow larger pages
Now that everything in UVMM knows about the variable page shift, we can select larger values. The proposed approach relies on nouveau_bo::page unless if it would cause alignment issues (in which case we fall back to searching for an appropriate shift) Signed-off-by: Mary Guillemard <mary@mary.zone> Co-developed-by: Mohamed Ahmed <mohamedahmedegypt2001@gmail.com> Signed-off-by: Mohamed Ahmed <mohamedahmedegypt2001@gmail.com> Reviewed-by: Lyude Paul <lyude@redhat.com> Reviewed-by: James Jones <jajones@nvidia.com> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patch.msgid.link/20251110-nouveau-compv6-v6-2-83b05475f57c@mary.zone
1 parent 6d13495 commit c488a94

1 file changed

Lines changed: 58 additions & 2 deletions

File tree

drivers/gpu/drm/nouveau/nouveau_uvmm.c

Lines changed: 58 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,62 @@ op_unmap_prepare_unwind(struct drm_gpuva *va)
454454
drm_gpuva_insert(va->vm, va);
455455
}
456456

457+
static bool
458+
op_map_aligned_to_page_shift(const struct drm_gpuva_op_map *op, u8 page_shift)
459+
{
460+
u64 non_page_bits = (1ULL << page_shift) - 1;
461+
462+
return (op->va.addr & non_page_bits) == 0 &&
463+
(op->va.range & non_page_bits) == 0 &&
464+
(op->gem.offset & non_page_bits) == 0;
465+
}
466+
467+
static u8
468+
select_page_shift(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *op)
469+
{
470+
struct nouveau_bo *nvbo = nouveau_gem_object(op->gem.obj);
471+
472+
/* nouveau_bo_fixup_align() guarantees that the page size will be aligned
473+
* for most cases, but it can't handle cases where userspace allocates with
474+
* a size and then binds with a smaller granularity. So in order to avoid
475+
* breaking old userspace, we need to ensure that the VA is actually
476+
* aligned before using it, and if it isn't, then we downgrade to the first
477+
* granularity that will fit, which is optimal from a correctness and
478+
* performance perspective.
479+
*/
480+
if (op_map_aligned_to_page_shift(op, nvbo->page))
481+
return nvbo->page;
482+
483+
struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
484+
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
485+
int i;
486+
487+
/* If the given granularity doesn't fit, let's find one that will fit. */
488+
for (i = 0; i < vmm->page_nr; i++) {
489+
/* Ignore anything that is bigger or identical to the BO preference. */
490+
if (vmm->page[i].shift >= nvbo->page)
491+
continue;
492+
493+
/* Skip incompatible domains. */
494+
if ((mem->mem.type & NVIF_MEM_VRAM) && !vmm->page[i].vram)
495+
continue;
496+
if ((mem->mem.type & NVIF_MEM_HOST) &&
497+
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
498+
continue;
499+
500+
/* If it fits, return the proposed shift. */
501+
if (op_map_aligned_to_page_shift(op, vmm->page[i].shift))
502+
return vmm->page[i].shift;
503+
}
504+
505+
/* If we get here then nothing can reconcile the requirements. This should never
506+
* happen.
507+
*/
508+
drm_WARN_ONCE(op->gem.obj->dev, 1, "Could not find an appropriate page size.\n");
509+
510+
return PAGE_SHIFT;
511+
}
512+
457513
static void
458514
nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
459515
struct nouveau_uvma_prealloc *new,
@@ -506,7 +562,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
506562
if (vmm_get_range)
507563
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
508564
vmm_get_range,
509-
PAGE_SHIFT);
565+
select_page_shift(uvmm, &op->map));
510566
break;
511567
}
512568
case DRM_GPUVA_OP_REMAP: {
@@ -599,7 +655,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
599655

600656
uvma->region = args->region;
601657
uvma->kind = args->kind;
602-
uvma->page_shift = PAGE_SHIFT;
658+
uvma->page_shift = select_page_shift(uvmm, op);
603659

604660
drm_gpuva_map(&uvmm->base, &uvma->va, op);
605661

0 commit comments

Comments
 (0)