Skip to content

Commit 9b54a32

Browse files
AngeloGioacchino Del RegnoChun-Kuang Hu
authored andcommitted
drm/mediatek: mtk_gem: Partial refactor and use drm_gem_dma_object
Partially refactor mtk_gem to stop using (and remove) the unneeded custom mtk_gem_obj structure and migrate drivers to use the API defined drm_gem_dma_object structure instead, and to align all of the functions to be similar to the logic from drm_gem_dma_helper. Unfortunately, for this driver it wasn't possible to directly use the drm_gem_dma_helper callbacks (apart from .print_info), as the DMA mapping here is done on specific dma devices instead of the main DRM device. Also, since the mtk_gem_obj structure is no more, also migrate the mtk_plane.c code to grab the DMA address from a drm_gem_dma_object and replace the inclusion of the custom mtk_gem.h header (as it is now unneeded) with the DRM API provided drm_gem_dma_helper. While at it, also set DRM_GEM_DMA_HELPER as an unconditional dependency (remove the `if DRM_FBDEV_EMULATION` from the select DRM_GEM_DMA_HELPER statement in Kconfig). This resolves an issue pointed by UBSAN, as when using drm_fbdev_dma the drm_gem_object is supposed to be child of a drm_gem_dma_object instead of a custom mtk_gem_obj (or the mtk_gem_obj should have been reordered to have the same fields as drm_gem_dma_object, but that would have been too fragile and generally a bad idea anyway). Fixes: 0992284 ("drm/mediatek: Use fbdev-dma") Signed-off-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Link: https://patchwork.kernel.org/project/dri-devel/patch/20251111085114.9752-1-angelogioacchino.delregno@collabora.com/ Signed-off-by: Chun-Kuang Hu <chunkuang.hu@kernel.org>
1 parent 21465e7 commit 9b54a32

4 files changed

Lines changed: 109 additions & 198 deletions

File tree

drivers/gpu/drm/mediatek/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ config DRM_MEDIATEK
88
depends on OF
99
depends on MTK_MMSYS
1010
select DRM_CLIENT_SELECTION
11-
select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
11+
select DRM_GEM_DMA_HELPER
1212
select DRM_KMS_HELPER
1313
select DRM_DISPLAY_HELPER
1414
select DRM_BRIDGE_CONNECTOR

drivers/gpu/drm/mediatek/mtk_gem.c

Lines changed: 103 additions & 161 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
// SPDX-License-Identifier: GPL-2.0-only
22
/*
33
* Copyright (c) 2015 MediaTek Inc.
4+
* Copyright (c) 2025 Collabora Ltd.
5+
* AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
46
*/
57

68
#include <linux/dma-buf.h>
@@ -18,111 +20,130 @@
1820

1921
static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
2022

21-
static const struct vm_operations_struct vm_ops = {
22-
.open = drm_gem_vm_open,
23-
.close = drm_gem_vm_close,
24-
};
23+
static void mtk_gem_free_object(struct drm_gem_object *obj)
24+
{
25+
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
26+
struct mtk_drm_private *priv = obj->dev->dev_private;
27+
28+
if (dma_obj->sgt)
29+
drm_prime_gem_destroy(obj, dma_obj->sgt);
30+
else
31+
dma_free_wc(priv->dma_dev, dma_obj->base.size,
32+
dma_obj->vaddr, dma_obj->dma_addr);
33+
34+
/* release file pointer to gem object. */
35+
drm_gem_object_release(obj);
36+
37+
kfree(dma_obj);
38+
}
39+
40+
/*
41+
* Allocate a sg_table for this GEM object.
42+
* Note: Both the table's contents, and the sg_table itself must be freed by
43+
* the caller.
44+
* Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
45+
*/
46+
static struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
47+
{
48+
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
49+
struct mtk_drm_private *priv = obj->dev->dev_private;
50+
struct sg_table *sgt;
51+
int ret;
52+
53+
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
54+
if (!sgt)
55+
return ERR_PTR(-ENOMEM);
56+
57+
ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr,
58+
dma_obj->dma_addr, obj->size);
59+
if (ret) {
60+
DRM_ERROR("failed to allocate sgt, %d\n", ret);
61+
kfree(sgt);
62+
return ERR_PTR(ret);
63+
}
64+
65+
return sgt;
66+
}
2567

2668
static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
2769
.free = mtk_gem_free_object,
70+
.print_info = drm_gem_dma_object_print_info,
2871
.get_sg_table = mtk_gem_prime_get_sg_table,
29-
.vmap = mtk_gem_prime_vmap,
30-
.vunmap = mtk_gem_prime_vunmap,
72+
.vmap = drm_gem_dma_object_vmap,
3173
.mmap = mtk_gem_object_mmap,
32-
.vm_ops = &vm_ops,
74+
.vm_ops = &drm_gem_dma_vm_ops,
3375
};
3476

35-
static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
36-
unsigned long size)
77+
static struct drm_gem_dma_object *mtk_gem_init(struct drm_device *dev,
78+
unsigned long size, bool private)
3779
{
38-
struct mtk_gem_obj *mtk_gem_obj;
80+
struct drm_gem_dma_object *dma_obj;
3981
int ret;
4082

4183
size = round_up(size, PAGE_SIZE);
4284

4385
if (size == 0)
4486
return ERR_PTR(-EINVAL);
4587

46-
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
47-
if (!mtk_gem_obj)
88+
dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
89+
if (!dma_obj)
4890
return ERR_PTR(-ENOMEM);
4991

50-
mtk_gem_obj->base.funcs = &mtk_gem_object_funcs;
92+
dma_obj->base.funcs = &mtk_gem_object_funcs;
5193

52-
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
53-
if (ret < 0) {
94+
if (private) {
95+
ret = 0;
96+
drm_gem_private_object_init(dev, &dma_obj->base, size);
97+
} else {
98+
ret = drm_gem_object_init(dev, &dma_obj->base, size);
99+
}
100+
if (ret) {
54101
DRM_ERROR("failed to initialize gem object\n");
55-
kfree(mtk_gem_obj);
102+
kfree(dma_obj);
56103
return ERR_PTR(ret);
57104
}
58105

59-
return mtk_gem_obj;
106+
return dma_obj;
60107
}
61108

62-
struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
63-
size_t size, bool alloc_kmap)
109+
static struct drm_gem_dma_object *mtk_gem_create(struct drm_device *dev, size_t size)
64110
{
65111
struct mtk_drm_private *priv = dev->dev_private;
66-
struct mtk_gem_obj *mtk_gem;
112+
struct drm_gem_dma_object *dma_obj;
67113
struct drm_gem_object *obj;
68114
int ret;
69115

70-
mtk_gem = mtk_gem_init(dev, size);
71-
if (IS_ERR(mtk_gem))
72-
return ERR_CAST(mtk_gem);
73-
74-
obj = &mtk_gem->base;
116+
dma_obj = mtk_gem_init(dev, size, false);
117+
if (IS_ERR(dma_obj))
118+
return ERR_CAST(dma_obj);
75119

76-
mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
120+
obj = &dma_obj->base;
77121

78-
if (!alloc_kmap)
79-
mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
80-
81-
mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
82-
&mtk_gem->dma_addr, GFP_KERNEL,
83-
mtk_gem->dma_attrs);
84-
if (!mtk_gem->cookie) {
122+
dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size,
123+
&dma_obj->dma_addr,
124+
GFP_KERNEL | __GFP_NOWARN);
125+
if (!dma_obj->vaddr) {
85126
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
86127
ret = -ENOMEM;
87128
goto err_gem_free;
88129
}
89130

90-
if (alloc_kmap)
91-
mtk_gem->kvaddr = mtk_gem->cookie;
92-
93-
DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
94-
mtk_gem->cookie, &mtk_gem->dma_addr,
131+
DRM_DEBUG_DRIVER("vaddr = %p dma_addr = %pad size = %zu\n",
132+
dma_obj->vaddr, &dma_obj->dma_addr,
95133
size);
96134

97-
return mtk_gem;
135+
return dma_obj;
98136

99137
err_gem_free:
100138
drm_gem_object_release(obj);
101-
kfree(mtk_gem);
139+
kfree(dma_obj);
102140
return ERR_PTR(ret);
103141
}
104142

105-
void mtk_gem_free_object(struct drm_gem_object *obj)
106-
{
107-
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
108-
struct mtk_drm_private *priv = obj->dev->dev_private;
109-
110-
if (mtk_gem->sg)
111-
drm_prime_gem_destroy(obj, mtk_gem->sg);
112-
else
113-
dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
114-
mtk_gem->dma_addr, mtk_gem->dma_attrs);
115-
116-
/* release file pointer to gem object. */
117-
drm_gem_object_release(obj);
118-
119-
kfree(mtk_gem);
120-
}
121-
122143
int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
123144
struct drm_mode_create_dumb *args)
124145
{
125-
struct mtk_gem_obj *mtk_gem;
146+
struct drm_gem_dma_object *dma_obj;
126147
int ret;
127148

128149
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
@@ -135,155 +156,76 @@ int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
135156
args->size = args->pitch;
136157
args->size *= args->height;
137158

138-
mtk_gem = mtk_gem_create(dev, args->size, false);
139-
if (IS_ERR(mtk_gem))
140-
return PTR_ERR(mtk_gem);
159+
dma_obj = mtk_gem_create(dev, args->size);
160+
if (IS_ERR(dma_obj))
161+
return PTR_ERR(dma_obj);
141162

142163
/*
143164
* allocate a id of idr table where the obj is registered
144165
* and handle has the id what user can see.
145166
*/
146-
ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
167+
ret = drm_gem_handle_create(file_priv, &dma_obj->base, &args->handle);
147168
if (ret)
148169
goto err_handle_create;
149170

150171
/* drop reference from allocate - handle holds it now. */
151-
drm_gem_object_put(&mtk_gem->base);
172+
drm_gem_object_put(&dma_obj->base);
152173

153174
return 0;
154175

155176
err_handle_create:
156-
mtk_gem_free_object(&mtk_gem->base);
177+
mtk_gem_free_object(&dma_obj->base);
157178
return ret;
158179
}
159180

160181
static int mtk_gem_object_mmap(struct drm_gem_object *obj,
161182
struct vm_area_struct *vma)
162183

163184
{
164-
int ret;
165-
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
185+
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
166186
struct mtk_drm_private *priv = obj->dev->dev_private;
187+
int ret;
167188

168189
/*
169190
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
170191
* whole buffer from the start.
171192
*/
172-
vma->vm_pgoff = 0;
193+
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
173194

174195
/*
175196
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
176197
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
177198
*/
178-
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
199+
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
200+
179201
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
180202
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
181203

182-
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
183-
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
204+
ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr,
205+
dma_obj->dma_addr, obj->size);
206+
if (ret)
207+
drm_gem_vm_close(vma);
184208

185209
return ret;
186210
}
187211

188-
/*
189-
* Allocate a sg_table for this GEM object.
190-
* Note: Both the table's contents, and the sg_table itself must be freed by
191-
* the caller.
192-
* Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
193-
*/
194-
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
195-
{
196-
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
197-
struct mtk_drm_private *priv = obj->dev->dev_private;
198-
struct sg_table *sgt;
199-
int ret;
200-
201-
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
202-
if (!sgt)
203-
return ERR_PTR(-ENOMEM);
204-
205-
ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
206-
mtk_gem->dma_addr, obj->size,
207-
mtk_gem->dma_attrs);
208-
if (ret) {
209-
DRM_ERROR("failed to allocate sgt, %d\n", ret);
210-
kfree(sgt);
211-
return ERR_PTR(ret);
212-
}
213-
214-
return sgt;
215-
}
216-
217212
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
218-
struct dma_buf_attachment *attach, struct sg_table *sg)
213+
struct dma_buf_attachment *attach, struct sg_table *sgt)
219214
{
220-
struct mtk_gem_obj *mtk_gem;
215+
struct drm_gem_dma_object *dma_obj;
221216

222217
/* check if the entries in the sg_table are contiguous */
223-
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
218+
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
224219
DRM_ERROR("sg_table is not contiguous");
225220
return ERR_PTR(-EINVAL);
226221
}
227222

228-
mtk_gem = mtk_gem_init(dev, attach->dmabuf->size);
229-
if (IS_ERR(mtk_gem))
230-
return ERR_CAST(mtk_gem);
231-
232-
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
233-
mtk_gem->sg = sg;
234-
235-
return &mtk_gem->base;
236-
}
237-
238-
int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
239-
{
240-
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
241-
struct sg_table *sgt = NULL;
242-
unsigned int npages;
243-
244-
if (mtk_gem->kvaddr)
245-
goto out;
246-
247-
sgt = mtk_gem_prime_get_sg_table(obj);
248-
if (IS_ERR(sgt))
249-
return PTR_ERR(sgt);
250-
251-
npages = obj->size >> PAGE_SHIFT;
252-
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
253-
if (!mtk_gem->pages) {
254-
sg_free_table(sgt);
255-
kfree(sgt);
256-
return -ENOMEM;
257-
}
258-
259-
drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
260-
261-
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
262-
pgprot_writecombine(PAGE_KERNEL));
263-
if (!mtk_gem->kvaddr) {
264-
sg_free_table(sgt);
265-
kfree(sgt);
266-
kfree(mtk_gem->pages);
267-
return -ENOMEM;
268-
}
269-
sg_free_table(sgt);
270-
kfree(sgt);
271-
272-
out:
273-
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
274-
275-
return 0;
276-
}
277-
278-
void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
279-
{
280-
struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
281-
void *vaddr = map->vaddr;
223+
dma_obj = mtk_gem_init(dev, attach->dmabuf->size, true);
224+
if (IS_ERR(dma_obj))
225+
return ERR_CAST(dma_obj);
282226

283-
if (!mtk_gem->pages)
284-
return;
227+
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
228+
dma_obj->sgt = sgt;
285229

286-
vunmap(vaddr);
287-
mtk_gem->kvaddr = NULL;
288-
kfree(mtk_gem->pages);
230+
return &dma_obj->base;
289231
}

0 commit comments

Comments
 (0)