Skip to content

Commit eec7e23

Browse files
akagoe01bbrezillon
authored andcommitted
drm/panthor: Prevent potential UAF in group creation
This commit prevents the possibility of a use after free issue in the GROUP_CREATE ioctl function, which arose as pointer to the group is accessed in that ioctl function after storing it in the Xarray. A malicious userspace can second guess the handle of a group and try to call GROUP_DESTROY ioctl from another thread around the same time as GROUP_CREATE ioctl. To prevent the use after free exploit, this commit uses a mark on an entry of group pool Xarray which is added just before returning from the GROUP_CREATE ioctl function. The mark is checked for all ioctls that specify the group handle and so userspace won't be abe to delete a group that isn't marked yet. v2: Add R-bs and fixes tags Fixes: de85488 ("drm/panthor: Add the scheduler logical block") Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Signed-off-by: Akash Goel <akash.goel@arm.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Steven Price <steven.price@arm.com> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Link: https://patch.msgid.link/20251127164912.3788155-1-akash.goel@arm.com
1 parent 31d3354 commit eec7e23

1 file changed

Lines changed: 15 additions & 4 deletions

File tree

drivers/gpu/drm/panthor/panthor_sched.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -776,6 +776,12 @@ struct panthor_job_profiling_data {
776776
*/
777777
#define MAX_GROUPS_PER_POOL 128
778778

779+
/*
780+
* Mark added on an entry of group pool Xarray to identify if the group has
781+
* been fully initialized and can be accessed elsewhere in the driver code.
782+
*/
783+
#define GROUP_REGISTERED XA_MARK_1
784+
779785
/**
780786
* struct panthor_group_pool - Group pool
781787
*
@@ -2906,7 +2912,7 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
29062912
return;
29072913

29082914
xa_lock(&gpool->xa);
2909-
xa_for_each(&gpool->xa, i, group) {
2915+
xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) {
29102916
guard(spinlock)(&group->fdinfo.lock);
29112917
pfile->stats.cycles += group->fdinfo.data.cycles;
29122918
pfile->stats.time += group->fdinfo.data.time;
@@ -3591,6 +3597,8 @@ int panthor_group_create(struct panthor_file *pfile,
35913597

35923598
group_init_task_info(group);
35933599

3600+
xa_set_mark(&gpool->xa, gid, GROUP_REGISTERED);
3601+
35943602
return gid;
35953603

35963604
err_erase_gid:
@@ -3608,6 +3616,9 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
36083616
struct panthor_scheduler *sched = ptdev->scheduler;
36093617
struct panthor_group *group;
36103618

3619+
if (!xa_get_mark(&gpool->xa, group_handle, GROUP_REGISTERED))
3620+
return -EINVAL;
3621+
36113622
group = xa_erase(&gpool->xa, group_handle);
36123623
if (!group)
36133624
return -EINVAL;
@@ -3633,12 +3644,12 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
36333644
}
36343645

36353646
static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3636-
u32 group_handle)
3647+
unsigned long group_handle)
36373648
{
36383649
struct panthor_group *group;
36393650

36403651
xa_lock(&pool->xa);
3641-
group = group_get(xa_load(&pool->xa, group_handle));
3652+
group = group_get(xa_find(&pool->xa, &group_handle, group_handle, GROUP_REGISTERED));
36423653
xa_unlock(&pool->xa);
36433654

36443655
return group;
@@ -3725,7 +3736,7 @@ panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
37253736
return;
37263737

37273738
xa_lock(&gpool->xa);
3728-
xa_for_each(&gpool->xa, i, group) {
3739+
xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) {
37293740
stats->resident += group->fdinfo.kbo_sizes;
37303741
if (group->csg_id >= 0)
37313742
stats->active += group->fdinfo.kbo_sizes;

0 commit comments

Comments
 (0)