Skip to content

Commit 6d13495

Browse files
marysakaLyude
authored andcommitted
drm/nouveau/uvmm: Prepare for larger pages
Currently memory allocated by VM_BIND uAPI can only have a granuality matching PAGE_SIZE (4KiB in common case) To have a better memory management and to allow big (64KiB) and huge (2MiB) pages later in the series, we are now passing the page shift all around the internals of UVMM. Signed-off-by: Mary Guillemard <mary@mary.zone> Co-developed-by: Mohamed Ahmed <mohamedahmedegypt2001@gmail.com> Signed-off-by: Mohamed Ahmed <mohamedahmedegypt2001@gmail.com> Reviewed-by: Lyude Paul <lyude@redhat.com> Reviewed-by: James Jones <jajones@nvidia.com> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patch.msgid.link/20251110-nouveau-compv6-v6-1-83b05475f57c@mary.zone
1 parent 86db652 commit 6d13495

2 files changed

Lines changed: 30 additions & 17 deletions

File tree

drivers/gpu/drm/nouveau/nouveau_uvmm.c

Lines changed: 29 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -107,34 +107,34 @@ nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
107107

108108
static int
109109
nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
110-
u64 addr, u64 range)
110+
u64 addr, u64 range, u8 page_shift)
111111
{
112112
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
113113

114-
return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
114+
return nvif_vmm_raw_get(vmm, addr, range, page_shift);
115115
}
116116

117117
static int
118118
nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
119-
u64 addr, u64 range)
119+
u64 addr, u64 range, u8 page_shift)
120120
{
121121
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
122122

123-
return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
123+
return nvif_vmm_raw_put(vmm, addr, range, page_shift);
124124
}
125125

126126
static int
127127
nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
128-
u64 addr, u64 range, bool sparse)
128+
u64 addr, u64 range, u8 page_shift, bool sparse)
129129
{
130130
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
131131

132-
return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
132+
return nvif_vmm_raw_unmap(vmm, addr, range, page_shift, sparse);
133133
}
134134

135135
static int
136136
nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
137-
u64 addr, u64 range,
137+
u64 addr, u64 range, u8 page_shift,
138138
u64 bo_offset, u8 kind,
139139
struct nouveau_mem *mem)
140140
{
@@ -163,7 +163,7 @@ nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
163163
return -ENOSYS;
164164
}
165165

166-
return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
166+
return nvif_vmm_raw_map(vmm, addr, range, page_shift,
167167
&args, argc,
168168
&mem->mem, bo_offset);
169169
}
@@ -182,8 +182,9 @@ nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
182182
{
183183
u64 addr = uvma->va.va.addr;
184184
u64 range = uvma->va.va.range;
185+
u8 page_shift = uvma->page_shift;
185186

186-
return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
187+
return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range, page_shift);
187188
}
188189

189190
static int
@@ -193,22 +194,25 @@ nouveau_uvma_map(struct nouveau_uvma *uvma,
193194
u64 addr = uvma->va.va.addr;
194195
u64 offset = uvma->va.gem.offset;
195196
u64 range = uvma->va.va.range;
197+
u8 page_shift = uvma->page_shift;
196198

197199
return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
198-
offset, uvma->kind, mem);
200+
page_shift, offset, uvma->kind,
201+
mem);
199202
}
200203

201204
static int
202205
nouveau_uvma_unmap(struct nouveau_uvma *uvma)
203206
{
204207
u64 addr = uvma->va.va.addr;
205208
u64 range = uvma->va.va.range;
209+
u8 page_shift = uvma->page_shift;
206210
bool sparse = !!uvma->region;
207211

208212
if (drm_gpuva_invalidated(&uvma->va))
209213
return 0;
210214

211-
return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
215+
return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
212216
}
213217

214218
static int
@@ -501,7 +505,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
501505

502506
if (vmm_get_range)
503507
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
504-
vmm_get_range);
508+
vmm_get_range,
509+
PAGE_SHIFT);
505510
break;
506511
}
507512
case DRM_GPUVA_OP_REMAP: {
@@ -528,6 +533,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
528533
u64 ustart = va->va.addr;
529534
u64 urange = va->va.range;
530535
u64 uend = ustart + urange;
536+
u8 page_shift = uvma_from_va(va)->page_shift;
531537

532538
/* Nothing to do for mappings we merge with. */
533539
if (uend == vmm_get_start ||
@@ -538,7 +544,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
538544
u64 vmm_get_range = ustart - vmm_get_start;
539545

540546
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
541-
vmm_get_range);
547+
vmm_get_range,
548+
page_shift);
542549
}
543550
vmm_get_start = uend;
544551
break;
@@ -592,6 +599,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
592599

593600
uvma->region = args->region;
594601
uvma->kind = args->kind;
602+
uvma->page_shift = PAGE_SHIFT;
595603

596604
drm_gpuva_map(&uvmm->base, &uvma->va, op);
597605

@@ -633,7 +641,8 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
633641

634642
if (vmm_get_range) {
635643
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
636-
vmm_get_range);
644+
vmm_get_range,
645+
new->map->page_shift);
637646
if (ret) {
638647
op_map_prepare_unwind(new->map);
639648
goto unwind;
@@ -689,6 +698,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
689698
u64 ustart = va->va.addr;
690699
u64 urange = va->va.range;
691700
u64 uend = ustart + urange;
701+
u8 page_shift = uvma_from_va(va)->page_shift;
692702

693703
op_unmap_prepare(u);
694704

@@ -704,7 +714,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
704714
u64 vmm_get_range = ustart - vmm_get_start;
705715

706716
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
707-
vmm_get_range);
717+
vmm_get_range, page_shift);
708718
if (ret) {
709719
op_unmap_prepare_unwind(va);
710720
goto unwind;
@@ -799,10 +809,11 @@ op_unmap_range(struct drm_gpuva_op_unmap *u,
799809
u64 addr, u64 range)
800810
{
801811
struct nouveau_uvma *uvma = uvma_from_va(u->va);
812+
u8 page_shift = uvma->page_shift;
802813
bool sparse = !!uvma->region;
803814

804815
if (!drm_gpuva_invalidated(u->va))
805-
nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
816+
nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
806817
}
807818

808819
static void
@@ -882,6 +893,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
882893
struct drm_gpuva_op_map *n = r->next;
883894
struct drm_gpuva *va = r->unmap->va;
884895
struct nouveau_uvma *uvma = uvma_from_va(va);
896+
u8 page_shift = uvma->page_shift;
885897

886898
if (unmap) {
887899
u64 addr = va->va.addr;
@@ -893,7 +905,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
893905
if (n)
894906
end = n->va.addr;
895907

896-
nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
908+
nouveau_uvmm_vmm_put(uvmm, addr, end - addr, page_shift);
897909
}
898910

899911
nouveau_uvma_gem_put(uvma);

drivers/gpu/drm/nouveau/nouveau_uvmm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ struct nouveau_uvma {
3333

3434
struct nouveau_uvma_region *region;
3535
u8 kind;
36+
u8 page_shift;
3637
};
3738

3839
#define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base)

0 commit comments

Comments
 (0)