Skip to content

Commit 5065321

Browse files
Joel FernandesPeter Zijlstra
authored andcommitted
sched: Add support to pick functions to take rf
Some pick functions like the internal pick_next_task_fair() already take rf but some others dont. We need this for scx's server pick function. Prepare for this by having pick functions accept it. [peterz: - added RETRY_TASK handling - removed pick_next_task_fair indirection] Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Tejun Heo <tj@kernel.org>
1 parent 1e900f4 commit 5065321

9 files changed

Lines changed: 48 additions & 46 deletions

File tree

include/linux/sched.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -637,8 +637,8 @@ struct sched_rt_entity {
637637
#endif
638638
} __randomize_layout;
639639

640-
typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
641-
typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
640+
struct rq_flags;
641+
typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
642642

643643
struct sched_dl_entity {
644644
struct rb_node rb_node;
@@ -730,9 +730,6 @@ struct sched_dl_entity {
730730
* dl_server_update().
731731
*
732732
* @rq the runqueue this server is for
733-
*
734-
* @server_has_tasks() returns true if @server_pick return a
735-
* runnable task.
736733
*/
737734
struct rq *rq;
738735
dl_server_pick_f server_pick_task;

kernel/sched/core.c

Lines changed: 26 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5901,7 +5901,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59015901

59025902
/* Assume the next prioritized class is idle_sched_class */
59035903
if (!p) {
5904-
p = pick_task_idle(rq);
5904+
p = pick_task_idle(rq, rf);
59055905
put_prev_set_next_task(rq, prev, p);
59065906
}
59075907

@@ -5913,11 +5913,15 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59135913

59145914
for_each_active_class(class) {
59155915
if (class->pick_next_task) {
5916-
p = class->pick_next_task(rq, prev);
5916+
p = class->pick_next_task(rq, prev, rf);
5917+
if (unlikely(p == RETRY_TASK))
5918+
goto restart;
59175919
if (p)
59185920
return p;
59195921
} else {
5920-
p = class->pick_task(rq);
5922+
p = class->pick_task(rq, rf);
5923+
if (unlikely(p == RETRY_TASK))
5924+
goto restart;
59215925
if (p) {
59225926
put_prev_set_next_task(rq, prev, p);
59235927
return p;
@@ -5947,15 +5951,19 @@ static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
59475951
return a->core_cookie == b->core_cookie;
59485952
}
59495953

5950-
static inline struct task_struct *pick_task(struct rq *rq)
5954+
/*
5955+
* Careful; this can return RETRY_TASK, it does not include the retry-loop
5956+
* itself due to the whole SMT pick retry thing below.
5957+
*/
5958+
static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
59515959
{
59525960
const struct sched_class *class;
59535961
struct task_struct *p;
59545962

59555963
rq->dl_server = NULL;
59565964

59575965
for_each_active_class(class) {
5958-
p = class->pick_task(rq);
5966+
p = class->pick_task(rq, rf);
59595967
if (p)
59605968
return p;
59615969
}
@@ -5970,7 +5978,7 @@ static void queue_core_balance(struct rq *rq);
59705978
static struct task_struct *
59715979
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59725980
{
5973-
struct task_struct *next, *p, *max = NULL;
5981+
struct task_struct *next, *p, *max;
59745982
const struct cpumask *smt_mask;
59755983
bool fi_before = false;
59765984
bool core_clock_updated = (rq == rq->core);
@@ -6055,7 +6063,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60556063
* and there are no cookied tasks running on siblings.
60566064
*/
60576065
if (!need_sync) {
6058-
next = pick_task(rq);
6066+
restart_single:
6067+
next = pick_task(rq, rf);
6068+
if (unlikely(next == RETRY_TASK))
6069+
goto restart_single;
60596070
if (!next->core_cookie) {
60606071
rq->core_pick = NULL;
60616072
rq->core_dl_server = NULL;
@@ -6075,6 +6086,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60756086
*
60766087
* Tie-break prio towards the current CPU
60776088
*/
6089+
restart_multi:
6090+
max = NULL;
60786091
for_each_cpu_wrap(i, smt_mask, cpu) {
60796092
rq_i = cpu_rq(i);
60806093

@@ -6086,7 +6099,11 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60866099
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
60876100
update_rq_clock(rq_i);
60886101

6089-
rq_i->core_pick = p = pick_task(rq_i);
6102+
p = pick_task(rq_i, rf);
6103+
if (unlikely(p == RETRY_TASK))
6104+
goto restart_multi;
6105+
6106+
rq_i->core_pick = p;
60906107
rq_i->core_dl_server = rq_i->dl_server;
60916108

60926109
if (!max || prio_less(max, p, fi_before))
@@ -6108,7 +6125,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
61086125
if (cookie)
61096126
p = sched_core_find(rq_i, cookie);
61106127
if (!p)
6111-
p = idle_sched_class.pick_task(rq_i);
6128+
p = idle_sched_class.pick_task(rq_i, rf);
61126129
}
61136130

61146131
rq_i->core_pick = p;

kernel/sched/deadline.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2352,7 +2352,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
23522352
* __pick_next_task_dl - Helper to pick the next -deadline task to run.
23532353
* @rq: The runqueue to pick the next task from.
23542354
*/
2355-
static struct task_struct *__pick_task_dl(struct rq *rq)
2355+
static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
23562356
{
23572357
struct sched_dl_entity *dl_se;
23582358
struct dl_rq *dl_rq = &rq->dl;
@@ -2366,7 +2366,7 @@ static struct task_struct *__pick_task_dl(struct rq *rq)
23662366
WARN_ON_ONCE(!dl_se);
23672367

23682368
if (dl_server(dl_se)) {
2369-
p = dl_se->server_pick_task(dl_se);
2369+
p = dl_se->server_pick_task(dl_se, rf);
23702370
if (!p) {
23712371
dl_server_stop(dl_se);
23722372
goto again;
@@ -2379,9 +2379,9 @@ static struct task_struct *__pick_task_dl(struct rq *rq)
23792379
return p;
23802380
}
23812381

2382-
static struct task_struct *pick_task_dl(struct rq *rq)
2382+
static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
23832383
{
2384-
return __pick_task_dl(rq);
2384+
return __pick_task_dl(rq, rf);
23852385
}
23862386

23872387
static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)

kernel/sched/ext.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2332,7 +2332,7 @@ static struct task_struct *first_local_task(struct rq *rq)
23322332
struct task_struct, scx.dsq_list.node);
23332333
}
23342334

2335-
static struct task_struct *pick_task_scx(struct rq *rq)
2335+
static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
23362336
{
23372337
struct task_struct *prev = rq->curr;
23382338
struct task_struct *p;

kernel/sched/fair.c

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -8705,15 +8705,6 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context
87058705
set_task_max_allowed_capacity(p);
87068706
}
87078707

8708-
static int
8709-
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8710-
{
8711-
if (sched_fair_runnable(rq))
8712-
return 1;
8713-
8714-
return sched_balance_newidle(rq, rf) != 0;
8715-
}
8716-
87178708
static void set_next_buddy(struct sched_entity *se)
87188709
{
87198710
for_each_sched_entity(se) {
@@ -8822,7 +8813,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
88228813
resched_curr_lazy(rq);
88238814
}
88248815

8825-
static struct task_struct *pick_task_fair(struct rq *rq)
8816+
static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
88268817
{
88278818
struct sched_entity *se;
88288819
struct cfs_rq *cfs_rq;
@@ -8866,7 +8857,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
88668857
int new_tasks;
88678858

88688859
again:
8869-
p = pick_task_fair(rq);
8860+
p = pick_task_fair(rq, rf);
88708861
if (!p)
88718862
goto idle;
88728863
se = &p->se;
@@ -8945,14 +8936,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
89458936
return NULL;
89468937
}
89478938

8948-
static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
8949-
{
8950-
return pick_next_task_fair(rq, prev, NULL);
8951-
}
8952-
8953-
static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
8939+
static struct task_struct *
8940+
fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
89548941
{
8955-
return pick_task_fair(dl_se->rq);
8942+
return pick_task_fair(dl_se->rq, rf);
89568943
}
89578944

89588945
void fair_server_init(struct rq *rq)
@@ -13644,11 +13631,10 @@ DEFINE_SCHED_CLASS(fair) = {
1364413631
.wakeup_preempt = check_preempt_wakeup_fair,
1364513632

1364613633
.pick_task = pick_task_fair,
13647-
.pick_next_task = __pick_next_task_fair,
13634+
.pick_next_task = pick_next_task_fair,
1364813635
.put_prev_task = put_prev_task_fair,
1364913636
.set_next_task = set_next_task_fair,
1365013637

13651-
.balance = balance_fair,
1365213638
.select_task_rq = select_task_rq_fair,
1365313639
.migrate_task_rq = migrate_task_rq_fair,
1365413640

kernel/sched/idle.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -466,7 +466,7 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
466466
next->se.exec_start = rq_clock_task(rq);
467467
}
468468

469-
struct task_struct *pick_task_idle(struct rq *rq)
469+
struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
470470
{
471471
scx_update_idle(rq, true, false);
472472
return rq->idle;

kernel/sched/rt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1695,7 +1695,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
16951695
return rt_task_of(rt_se);
16961696
}
16971697

1698-
static struct task_struct *pick_task_rt(struct rq *rq)
1698+
static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf)
16991699
{
17001700
struct task_struct *p;
17011701

kernel/sched/sched.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2470,7 +2470,7 @@ struct sched_class {
24702470
/*
24712471
* schedule/pick_next_task: rq->lock
24722472
*/
2473-
struct task_struct *(*pick_task)(struct rq *rq);
2473+
struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf);
24742474
/*
24752475
* Optional! When implemented pick_next_task() should be equivalent to:
24762476
*
@@ -2480,7 +2480,8 @@ struct sched_class {
24802480
* set_next_task_first(next);
24812481
* }
24822482
*/
2483-
struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
2483+
struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev,
2484+
struct rq_flags *rf);
24842485

24852486
/*
24862487
* sched_change:
@@ -2707,8 +2708,9 @@ static inline bool sched_fair_runnable(struct rq *rq)
27072708
return rq->cfs.nr_queued > 0;
27082709
}
27092710

2710-
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2711-
extern struct task_struct *pick_task_idle(struct rq *rq);
2711+
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
2712+
struct rq_flags *rf);
2713+
extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf);
27122714

27132715
#define SCA_CHECK 0x01
27142716
#define SCA_MIGRATE_DISABLE 0x02

kernel/sched/stop_task.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool fir
3232
stop->se.exec_start = rq_clock_task(rq);
3333
}
3434

35-
static struct task_struct *pick_task_stop(struct rq *rq)
35+
static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf)
3636
{
3737
if (!sched_stop_runnable(rq))
3838
return NULL;

0 commit comments

Comments
 (0)