Skip to content

Commit 2b93efe

Browse files
robclarkRob Clark
authored andcommitted
drm/msm: Support IO_PGTABLE_QUIRK_NO_WARN_ON
With user managed VMs and multiple queues, it is in theory possible to trigger map/unmap errors. These will (in a later patch) mark the VM as unusable. But we want to tell the io-pgtable helpers not to spam the log. In addition, in the unmap path, we don't want to bail early from the unmap, to ensure we don't leave some dangling pages mapped. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661520/
1 parent 92395af commit 2b93efe

3 files changed

Lines changed: 20 additions & 7 deletions

File tree

drivers/gpu/drm/msm/adreno/a6xx_gpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2280,7 +2280,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
22802280
{
22812281
struct msm_mmu *mmu;
22822282

2283-
mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu);
2283+
mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, kernel_managed);
22842284

22852285
if (IS_ERR(mmu))
22862286
return ERR_CAST(mmu);

drivers/gpu/drm/msm/msm_iommu.c

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -94,23 +94,32 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
9494
{
9595
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
9696
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
97+
int ret = 0;
9798

9899
while (size) {
99-
size_t unmapped, pgsize, count;
100+
size_t pgsize, count;
101+
ssize_t unmapped;
100102

101103
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
102104

103105
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
104-
if (!unmapped)
105-
break;
106+
if (unmapped <= 0) {
107+
ret = -EINVAL;
108+
/*
109+
* Continue attempting to unamp the remained of the
110+
* range, so we don't end up with some dangling
111+
* mapped pages
112+
*/
113+
unmapped = PAGE_SIZE;
114+
}
106115

107116
iova += unmapped;
108117
size -= unmapped;
109118
}
110119

111120
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
112121

113-
return (size == 0) ? 0 : -EINVAL;
122+
return ret;
114123
}
115124

116125
static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
@@ -324,7 +333,7 @@ static const struct iommu_flush_ops tlb_ops = {
324333
static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
325334
unsigned long iova, int flags, void *arg);
326335

327-
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
336+
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
328337
{
329338
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
330339
struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -358,6 +367,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
358367
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
359368
ttbr0_cfg.tlb = &tlb_ops;
360369

370+
if (!kernel_managed) {
371+
ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
372+
}
373+
361374
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
362375
&ttbr0_cfg, pagetable);
363376

drivers/gpu/drm/msm/msm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
5151
mmu->handler = handler;
5252
}
5353

54-
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
54+
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed);
5555

5656
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
5757
int *asid);

0 commit comments

Comments
 (0)