Skip to content

Commit ecfd9fa

Browse files
robclarkRob Clark
authored andcommitted
drm/msm: Split out map/unmap ops
With async VM_BIND, the actual pgtable updates are deferred. Synchronously, a list of map/unmap ops will be generated, but the actual pgtable changes are deferred. To support that, split out op handlers and change the existing non-VM_BIND paths to use them. Note in particular, the vma itself may already be destroyed/freed by the time an UNMAP op runs (or even a MAP op if there is a later queued UNMAP). For this reason, the op handlers cannot reference the vma pointer. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661516/
1 parent e601ea3 commit ecfd9fa

1 file changed

Lines changed: 56 additions & 7 deletions

File tree

drivers/gpu/drm/msm/msm_gem_vma.c

Lines changed: 56 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,34 @@
88
#include "msm_gem.h"
99
#include "msm_mmu.h"
1010

11+
#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
12+
13+
/**
14+
* struct msm_vm_map_op - create new pgtable mapping
15+
*/
16+
struct msm_vm_map_op {
17+
/** @iova: start address for mapping */
18+
uint64_t iova;
19+
/** @range: size of the region to map */
20+
uint64_t range;
21+
/** @offset: offset into @sgt to map */
22+
uint64_t offset;
23+
/** @sgt: pages to map, or NULL for a PRR mapping */
24+
struct sg_table *sgt;
25+
/** @prot: the mapping protection flags */
26+
int prot;
27+
};
28+
29+
/**
30+
* struct msm_vm_unmap_op - unmap a range of pages from pgtable
31+
*/
32+
struct msm_vm_unmap_op {
33+
/** @iova: start address for unmap */
34+
uint64_t iova;
35+
/** @range: size of region to unmap */
36+
uint64_t range;
37+
};
38+
1139
static void
1240
msm_gem_vm_free(struct drm_gpuvm *gpuvm)
1341
{
@@ -21,18 +49,36 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm)
2149
kfree(vm);
2250
}
2351

52+
static void
53+
vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
54+
{
55+
vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
56+
57+
vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
58+
}
59+
60+
static int
61+
vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
62+
{
63+
vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
64+
65+
return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
66+
op->range, op->prot);
67+
}
68+
2469
/* Actually unmap memory for the vma */
2570
void msm_gem_vma_unmap(struct drm_gpuva *vma)
2671
{
2772
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
28-
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
29-
unsigned size = vma->va.range;
3073

3174
/* Don't do anything if the memory isn't mapped */
3275
if (!msm_vma->mapped)
3376
return;
3477

35-
vm->mmu->funcs->unmap(vm->mmu, vma->va.addr, size);
78+
vm_unmap_op(to_msm_vm(vma->vm), &(struct msm_vm_unmap_op){
79+
.iova = vma->va.addr,
80+
.range = vma->va.range,
81+
});
3682

3783
msm_vma->mapped = false;
3884
}
@@ -42,7 +88,6 @@ int
4288
msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
4389
{
4490
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
45-
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
4691
int ret;
4792

4893
if (GEM_WARN_ON(!vma->va.addr))
@@ -62,9 +107,13 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
62107
* Revisit this if we can come up with a scheme to pre-alloc pages
63108
* for the pgtable in map/unmap ops.
64109
*/
65-
ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt,
66-
vma->gem.offset, vma->va.range,
67-
prot);
110+
ret = vm_map_op(to_msm_vm(vma->vm), &(struct msm_vm_map_op){
111+
.iova = vma->va.addr,
112+
.range = vma->va.range,
113+
.offset = vma->gem.offset,
114+
.sgt = sgt,
115+
.prot = prot,
116+
});
68117
if (ret) {
69118
msm_vma->mapped = false;
70119
}

0 commit comments

Comments
 (0)