Skip to content

Commit 5343936

Browse files
author
Peter Zijlstra
committed
sched: Audit MOVE vs balance_callbacks
The {DE,EN}QUEUE_MOVE flag indicates a task is allowed to change priority, which means there could be balance callbacks queued. Therefore audit all MOVE users and make sure they do run balance callbacks before dropping rq-lock. Fixes: 6455ad5 ("sched: Move sched_class::prio_changed() into the change pattern") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Pierre Gondois <pierre.gondois@arm.com> Tested-by: Juri Lelli <juri.lelli@redhat.com> Link: https://patch.msgid.link/20260114130528.GB831285@noisy.programming.kicks-ass.net
1 parent 49041e8 commit 5343936

3 files changed

Lines changed: 8 additions & 2 deletions

File tree

kernel/sched/core.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4950,7 +4950,7 @@ struct balance_callback *splice_balance_callbacks(struct rq *rq)
49504950
return __splice_balance_callbacks(rq, true);
49514951
}
49524952

4953-
static void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
4953+
void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
49544954
{
49554955
if (rf)
49564956
rq_unpin_lock(rq, rf);
@@ -9126,6 +9126,8 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
91269126

91279127
if (resched)
91289128
resched_curr(rq);
9129+
9130+
__balance_callbacks(rq, &rq_guard.rf);
91299131
}
91309132

91319133
static struct cgroup_subsys_state *

kernel/sched/ext.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -545,6 +545,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
545545
static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
546546
{
547547
if (iter->locked_task) {
548+
__balance_callbacks(iter->rq, &iter->rf);
548549
task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
549550
iter->locked_task = NULL;
550551
}

kernel/sched/sched.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2388,7 +2388,8 @@ extern const u32 sched_prio_to_wmult[40];
23882388
* should preserve as much state as possible.
23892389
*
23902390
* MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
2391-
* in the runqueue.
2391+
* in the runqueue. IOW the priority is allowed to change. Callers
2392+
* must expect to deal with balance callbacks.
23922393
*
23932394
* NOCLOCK - skip the update_rq_clock() (avoids double updates)
23942395
*
@@ -3969,6 +3970,8 @@ extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
39693970
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
39703971

39713972
extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
3973+
3974+
extern void __balance_callbacks(struct rq *rq, struct rq_flags *rf);
39723975
extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
39733976

39743977
/*

0 commit comments

Comments
 (0)