Skip to content

Commit cd959a3

Browse files
arighiPeter Zijlstra
authored andcommitted
sched_ext: Add a DL server for sched_ext tasks
sched_ext currently suffers starvation due to RT. The same workload when converted to EXT can get zero runtime if RT is 100% running, causing EXT processes to stall. Fix it by adding a DL server for EXT. A kselftest is also included later to confirm that both DL servers are functioning correctly: # ./runner -t rt_stall ===== START ===== TEST: rt_stall DESCRIPTION: Verify that RT tasks cannot stall SCHED_EXT tasks OUTPUT: TAP version 13 1..1 # Runtime of FAIR task (PID 1511) is 0.250000 seconds # Runtime of RT task (PID 1512) is 4.750000 seconds # FAIR task got 5.00% of total runtime ok 1 PASS: FAIR task got more than 4.00% of runtime TAP version 13 1..1 # Runtime of EXT task (PID 1514) is 0.250000 seconds # Runtime of RT task (PID 1515) is 4.750000 seconds # EXT task got 5.00% of total runtime ok 2 PASS: EXT task got more than 4.00% of runtime TAP version 13 1..1 # Runtime of FAIR task (PID 1517) is 0.250000 seconds # Runtime of RT task (PID 1518) is 4.750000 seconds # FAIR task got 5.00% of total runtime ok 3 PASS: FAIR task got more than 4.00% of runtime TAP version 13 1..1 # Runtime of EXT task (PID 1521) is 0.250000 seconds # Runtime of RT task (PID 1522) is 4.750000 seconds # EXT task got 5.00% of total runtime ok 4 PASS: EXT task got more than 4.00% of runtime ok 1 rt_stall # ===== END ===== Co-developed-by: Joel Fernandes <joelagnelf@nvidia.com> Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com> Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Tested-by: Christian Loehle <christian.loehle@arm.com> Link: https://patch.msgid.link/20260126100050.3854740-5-arighi@nvidia.com
1 parent 68ec89d commit cd959a3

6 files changed

Lines changed: 109 additions & 23 deletions

File tree

kernel/sched/core.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8484,6 +8484,9 @@ int sched_cpu_dying(unsigned int cpu)
84848484
dump_rq_tasks(rq, KERN_WARNING);
84858485
}
84868486
dl_server_stop(&rq->fair_server);
8487+
#ifdef CONFIG_SCHED_CLASS_EXT
8488+
dl_server_stop(&rq->ext_server);
8489+
#endif
84878490
rq_unlock_irqrestore(rq, &rf);
84888491

84898492
calc_load_migrate(rq);
@@ -8689,6 +8692,9 @@ void __init sched_init(void)
86898692
hrtick_rq_init(rq);
86908693
atomic_set(&rq->nr_iowait, 0);
86918694
fair_server_init(rq);
8695+
#ifdef CONFIG_SCHED_CLASS_EXT
8696+
ext_server_init(rq);
8697+
#endif
86928698

86938699
#ifdef CONFIG_SCHED_CORE
86948700
rq->core = rq;

kernel/sched/deadline.c

Lines changed: 60 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1449,8 +1449,8 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
14491449
dl_se->dl_defer_idle = 0;
14501450

14511451
/*
1452-
* The fair server can consume its runtime while throttled (not queued/
1453-
* running as regular CFS).
1452+
* The DL server can consume its runtime while throttled (not
1453+
* queued / running as regular CFS).
14541454
*
14551455
* If the server consumes its entire runtime in this state. The server
14561456
* is not required for the current period. Thus, reset the server by
@@ -1535,10 +1535,10 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
15351535
}
15361536

15371537
/*
1538-
* The fair server (sole dl_server) does not account for real-time
1539-
* workload because it is running fair work.
1538+
* The dl_server does not account for real-time workload because it
1539+
* is running fair work.
15401540
*/
1541-
if (dl_se == &rq->fair_server)
1541+
if (dl_se->dl_server)
15421542
return;
15431543

15441544
#ifdef CONFIG_RT_GROUP_SCHED
@@ -1573,9 +1573,9 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
15731573
* In the non-defer mode, the idle time is not accounted, as the
15741574
* server provides a guarantee.
15751575
*
1576-
* If the dl_server is in defer mode, the idle time is also considered
1577-
* as time available for the fair server, avoiding a penalty for the
1578-
* rt scheduler that did not consumed that time.
1576+
* If the dl_server is in defer mode, the idle time is also considered as
1577+
* time available for the dl_server, avoiding a penalty for the rt
1578+
* scheduler that did not consumed that time.
15791579
*/
15801580
void dl_server_update_idle(struct sched_dl_entity *dl_se, s64 delta_exec)
15811581
{
@@ -1860,6 +1860,18 @@ void sched_init_dl_servers(void)
18601860
dl_se->dl_server = 1;
18611861
dl_se->dl_defer = 1;
18621862
setup_new_dl_entity(dl_se);
1863+
1864+
#ifdef CONFIG_SCHED_CLASS_EXT
1865+
dl_se = &rq->ext_server;
1866+
1867+
WARN_ON(dl_server(dl_se));
1868+
1869+
dl_server_apply_params(dl_se, runtime, period, 1);
1870+
1871+
dl_se->dl_server = 1;
1872+
dl_se->dl_defer = 1;
1873+
setup_new_dl_entity(dl_se);
1874+
#endif
18631875
}
18641876
}
18651877

@@ -3198,6 +3210,36 @@ void dl_add_task_root_domain(struct task_struct *p)
31983210
raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
31993211
}
32003212

3213+
static void dl_server_add_bw(struct root_domain *rd, int cpu)
3214+
{
3215+
struct sched_dl_entity *dl_se;
3216+
3217+
dl_se = &cpu_rq(cpu)->fair_server;
3218+
if (dl_server(dl_se) && cpu_active(cpu))
3219+
__dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
3220+
3221+
#ifdef CONFIG_SCHED_CLASS_EXT
3222+
dl_se = &cpu_rq(cpu)->ext_server;
3223+
if (dl_server(dl_se) && cpu_active(cpu))
3224+
__dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
3225+
#endif
3226+
}
3227+
3228+
static u64 dl_server_read_bw(int cpu)
3229+
{
3230+
u64 dl_bw = 0;
3231+
3232+
if (cpu_rq(cpu)->fair_server.dl_server)
3233+
dl_bw += cpu_rq(cpu)->fair_server.dl_bw;
3234+
3235+
#ifdef CONFIG_SCHED_CLASS_EXT
3236+
if (cpu_rq(cpu)->ext_server.dl_server)
3237+
dl_bw += cpu_rq(cpu)->ext_server.dl_bw;
3238+
#endif
3239+
3240+
return dl_bw;
3241+
}
3242+
32013243
void dl_clear_root_domain(struct root_domain *rd)
32023244
{
32033245
int i;
@@ -3216,12 +3258,8 @@ void dl_clear_root_domain(struct root_domain *rd)
32163258
* dl_servers are not tasks. Since dl_add_task_root_domain ignores
32173259
* them, we need to account for them here explicitly.
32183260
*/
3219-
for_each_cpu(i, rd->span) {
3220-
struct sched_dl_entity *dl_se = &cpu_rq(i)->fair_server;
3221-
3222-
if (dl_server(dl_se) && cpu_active(i))
3223-
__dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(i));
3224-
}
3261+
for_each_cpu(i, rd->span)
3262+
dl_server_add_bw(rd, i);
32253263
}
32263264

32273265
void dl_clear_root_domain_cpu(int cpu)
@@ -3720,7 +3758,7 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
37203758
unsigned long flags, cap;
37213759
struct dl_bw *dl_b;
37223760
bool overflow = 0;
3723-
u64 fair_server_bw = 0;
3761+
u64 dl_server_bw = 0;
37243762

37253763
rcu_read_lock_sched();
37263764
dl_b = dl_bw_of(cpu);
@@ -3753,27 +3791,26 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
37533791
cap -= arch_scale_cpu_capacity(cpu);
37543792

37553793
/*
3756-
* cpu is going offline and NORMAL tasks will be moved away
3757-
* from it. We can thus discount dl_server bandwidth
3758-
* contribution as it won't need to be servicing tasks after
3759-
* the cpu is off.
3794+
* cpu is going offline and NORMAL and EXT tasks will be
3795+
* moved away from it. We can thus discount dl_server
3796+
* bandwidth contribution as it won't need to be servicing
3797+
* tasks after the cpu is off.
37603798
*/
3761-
if (cpu_rq(cpu)->fair_server.dl_server)
3762-
fair_server_bw = cpu_rq(cpu)->fair_server.dl_bw;
3799+
dl_server_bw = dl_server_read_bw(cpu);
37633800

37643801
/*
37653802
* Not much to check if no DEADLINE bandwidth is present.
37663803
* dl_servers we can discount, as tasks will be moved out the
37673804
* offlined CPUs anyway.
37683805
*/
3769-
if (dl_b->total_bw - fair_server_bw > 0) {
3806+
if (dl_b->total_bw - dl_server_bw > 0) {
37703807
/*
37713808
* Leaving at least one CPU for DEADLINE tasks seems a
37723809
* wise thing to do. As said above, cpu is not offline
37733810
* yet, so account for that.
37743811
*/
37753812
if (dl_bw_cpus(cpu) - 1)
3776-
overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0);
3813+
overflow = __dl_overflow(dl_b, cap, dl_server_bw, 0);
37773814
else
37783815
overflow = 1;
37793816
}

kernel/sched/ext.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -958,6 +958,8 @@ static void update_curr_scx(struct rq *rq)
958958
if (!curr->scx.slice)
959959
touch_core_sched(rq, curr);
960960
}
961+
962+
dl_server_update(&rq->ext_server, delta_exec);
961963
}
962964

963965
static bool scx_dsq_priq_less(struct rb_node *node_a,
@@ -1501,6 +1503,10 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags
15011503
if (enq_flags & SCX_ENQ_WAKEUP)
15021504
touch_core_sched(rq, p);
15031505

1506+
/* Start dl_server if this is the first task being enqueued */
1507+
if (rq->scx.nr_running == 1)
1508+
dl_server_start(&rq->ext_server);
1509+
15041510
do_enqueue_task(rq, p, enq_flags, sticky_cpu);
15051511
out:
15061512
rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
@@ -2512,6 +2518,33 @@ static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
25122518
return do_pick_task_scx(rq, rf, false);
25132519
}
25142520

2521+
/*
2522+
* Select the next task to run from the ext scheduling class.
2523+
*
2524+
* Use do_pick_task_scx() directly with @force_scx enabled, since the
2525+
* dl_server must always select a sched_ext task.
2526+
*/
2527+
static struct task_struct *
2528+
ext_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
2529+
{
2530+
if (!scx_enabled())
2531+
return NULL;
2532+
2533+
return do_pick_task_scx(dl_se->rq, rf, true);
2534+
}
2535+
2536+
/*
2537+
* Initialize the ext server deadline entity.
2538+
*/
2539+
void ext_server_init(struct rq *rq)
2540+
{
2541+
struct sched_dl_entity *dl_se = &rq->ext_server;
2542+
2543+
init_dl_entity(dl_se);
2544+
2545+
dl_server_init(dl_se, rq, ext_server_pick_task);
2546+
}
2547+
25152548
#ifdef CONFIG_SCHED_CORE
25162549
/**
25172550
* scx_prio_less - Task ordering for core-sched

kernel/sched/idle.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -537,6 +537,9 @@ static void update_curr_idle(struct rq *rq)
537537
se->exec_start = now;
538538

539539
dl_server_update_idle(&rq->fair_server, delta_exec);
540+
#ifdef CONFIG_SCHED_CLASS_EXT
541+
dl_server_update_idle(&rq->ext_server, delta_exec);
542+
#endif
540543
}
541544

542545
/*

kernel/sched/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -414,6 +414,7 @@ extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
414414
extern void sched_init_dl_servers(void);
415415

416416
extern void fair_server_init(struct rq *rq);
417+
extern void ext_server_init(struct rq *rq);
417418
extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq);
418419
extern int dl_server_apply_params(struct sched_dl_entity *dl_se,
419420
u64 runtime, u64 period, bool init);
@@ -1171,6 +1172,7 @@ struct rq {
11711172
struct dl_rq dl;
11721173
#ifdef CONFIG_SCHED_CLASS_EXT
11731174
struct scx_rq scx;
1175+
struct sched_dl_entity ext_server;
11741176
#endif
11751177

11761178
struct sched_dl_entity fair_server;

kernel/sched/topology.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -508,6 +508,11 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
508508
if (rq->fair_server.dl_server)
509509
__dl_server_attach_root(&rq->fair_server, rq);
510510

511+
#ifdef CONFIG_SCHED_CLASS_EXT
512+
if (rq->ext_server.dl_server)
513+
__dl_server_attach_root(&rq->ext_server, rq);
514+
#endif
515+
511516
rq_unlock_irqrestore(rq, &rf);
512517

513518
if (old_rd)

0 commit comments

Comments
 (0)