Skip to content

Commit 2f7a0f5

Browse files
author
Peter Zijlstra
committed
sched/deadline: Move bandwidth accounting into {en,de}queue_dl_entity
In preparation of introducing !task sched_dl_entity; move the bandwidth accounting into {en.de}queue_dl_entity(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Daniel Bristot de Oliveira <bristot@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Phil Auld <pauld@redhat.com> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Link: https://lkml.kernel.org/r/a86dccbbe44e021b8771627e1dae01a69b73466d.1699095159.git.bristot@kernel.org
1 parent 9e07d45 commit 2f7a0f5

2 files changed

Lines changed: 78 additions & 58 deletions

File tree

kernel/sched/deadline.c

Lines changed: 72 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -391,12 +391,12 @@ static void __dl_clear_params(struct sched_dl_entity *dl_se);
391391
* up, and checks if the task is still in the "ACTIVE non contending"
392392
* state or not (in the second case, it updates running_bw).
393393
*/
394-
static void task_non_contending(struct task_struct *p)
394+
static void task_non_contending(struct sched_dl_entity *dl_se)
395395
{
396-
struct sched_dl_entity *dl_se = &p->dl;
397396
struct hrtimer *timer = &dl_se->inactive_timer;
398397
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
399398
struct rq *rq = rq_of_dl_rq(dl_rq);
399+
struct task_struct *p = dl_task_of(dl_se);
400400
s64 zerolag_time;
401401

402402
/*
@@ -428,13 +428,14 @@ static void task_non_contending(struct task_struct *p)
428428
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
429429
if (dl_task(p))
430430
sub_running_bw(dl_se, dl_rq);
431+
431432
if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
432433
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
433434

434435
if (READ_ONCE(p->__state) == TASK_DEAD)
435-
sub_rq_bw(&p->dl, &rq->dl);
436+
sub_rq_bw(dl_se, &rq->dl);
436437
raw_spin_lock(&dl_b->lock);
437-
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
438+
__dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
438439
raw_spin_unlock(&dl_b->lock);
439440
__dl_clear_params(dl_se);
440441
}
@@ -1601,6 +1602,41 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
16011602

16021603
update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
16031604

1605+
/*
1606+
* Check if a constrained deadline task was activated
1607+
* after the deadline but before the next period.
1608+
* If that is the case, the task will be throttled and
1609+
* the replenishment timer will be set to the next period.
1610+
*/
1611+
if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
1612+
dl_check_constrained_dl(dl_se);
1613+
1614+
if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
1615+
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1616+
1617+
add_rq_bw(dl_se, dl_rq);
1618+
add_running_bw(dl_se, dl_rq);
1619+
}
1620+
1621+
/*
1622+
* If p is throttled, we do not enqueue it. In fact, if it exhausted
1623+
* its budget it needs a replenishment and, since it now is on
1624+
* its rq, the bandwidth timer callback (which clearly has not
1625+
* run yet) will take care of this.
1626+
* However, the active utilization does not depend on the fact
1627+
* that the task is on the runqueue or not (but depends on the
1628+
* task's state - in GRUB parlance, "inactive" vs "active contending").
1629+
* In other words, even if a task is throttled its utilization must
1630+
* be counted in the active utilization; hence, we need to call
1631+
* add_running_bw().
1632+
*/
1633+
if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1634+
if (flags & ENQUEUE_WAKEUP)
1635+
task_contending(dl_se, flags);
1636+
1637+
return;
1638+
}
1639+
16041640
/*
16051641
* If this is a wakeup or a new instance, the scheduling
16061642
* parameters of the task might need updating. Otherwise,
@@ -1620,9 +1656,28 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
16201656
__enqueue_dl_entity(dl_se);
16211657
}
16221658

1623-
static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1659+
static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
16241660
{
16251661
__dequeue_dl_entity(dl_se);
1662+
1663+
if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
1664+
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1665+
1666+
sub_running_bw(dl_se, dl_rq);
1667+
sub_rq_bw(dl_se, dl_rq);
1668+
}
1669+
1670+
/*
1671+
* This check allows to start the inactive timer (or to immediately
1672+
* decrease the active utilization, if needed) in two cases:
1673+
* when the task blocks and when it is terminating
1674+
* (p->state == TASK_DEAD). We can handle the two cases in the same
1675+
* way, because from GRUB's point of view the same thing is happening
1676+
* (the task moves from "active contending" to "active non contending"
1677+
* or "inactive")
1678+
*/
1679+
if (flags & DEQUEUE_SLEEP)
1680+
task_non_contending(dl_se);
16261681
}
16271682

16281683
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -1667,76 +1722,35 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
16671722
return;
16681723
}
16691724

1670-
/*
1671-
* Check if a constrained deadline task was activated
1672-
* after the deadline but before the next period.
1673-
* If that is the case, the task will be throttled and
1674-
* the replenishment timer will be set to the next period.
1675-
*/
1676-
if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1677-
dl_check_constrained_dl(&p->dl);
1678-
1679-
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1680-
add_rq_bw(&p->dl, &rq->dl);
1681-
add_running_bw(&p->dl, &rq->dl);
1682-
}
1683-
1684-
/*
1685-
* If p is throttled, we do not enqueue it. In fact, if it exhausted
1686-
* its budget it needs a replenishment and, since it now is on
1687-
* its rq, the bandwidth timer callback (which clearly has not
1688-
* run yet) will take care of this.
1689-
* However, the active utilization does not depend on the fact
1690-
* that the task is on the runqueue or not (but depends on the
1691-
* task's state - in GRUB parlance, "inactive" vs "active contending").
1692-
* In other words, even if a task is throttled its utilization must
1693-
* be counted in the active utilization; hence, we need to call
1694-
* add_running_bw().
1695-
*/
1696-
if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1697-
if (flags & ENQUEUE_WAKEUP)
1698-
task_contending(&p->dl, flags);
1699-
1700-
return;
1701-
}
1702-
17031725
check_schedstat_required();
17041726
update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
17051727

1728+
if (p->on_rq == TASK_ON_RQ_MIGRATING)
1729+
flags |= ENQUEUE_MIGRATING;
1730+
17061731
enqueue_dl_entity(&p->dl, flags);
17071732

1708-
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1733+
if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
17091734
enqueue_pushable_dl_task(rq, p);
17101735
}
17111736

17121737
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
17131738
{
17141739
update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
1715-
dequeue_dl_entity(&p->dl);
1716-
dequeue_pushable_dl_task(rq, p);
1740+
dequeue_dl_entity(&p->dl, flags);
1741+
1742+
if (!p->dl.dl_throttled)
1743+
dequeue_pushable_dl_task(rq, p);
17171744
}
17181745

17191746
static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
17201747
{
17211748
update_curr_dl(rq);
1722-
__dequeue_task_dl(rq, p, flags);
17231749

1724-
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1725-
sub_running_bw(&p->dl, &rq->dl);
1726-
sub_rq_bw(&p->dl, &rq->dl);
1727-
}
1750+
if (p->on_rq == TASK_ON_RQ_MIGRATING)
1751+
flags |= DEQUEUE_MIGRATING;
17281752

1729-
/*
1730-
* This check allows to start the inactive timer (or to immediately
1731-
* decrease the active utilization, if needed) in two cases:
1732-
* when the task blocks and when it is terminating
1733-
* (p->state == TASK_DEAD). We can handle the two cases in the same
1734-
* way, because from GRUB's point of view the same thing is happening
1735-
* (the task moves from "active contending" to "active non contending"
1736-
* or "inactive")
1737-
*/
1738-
if (flags & DEQUEUE_SLEEP)
1739-
task_non_contending(p);
1753+
__dequeue_task_dl(rq, p, flags);
17401754
}
17411755

17421756
/*
@@ -2551,7 +2565,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
25512565
* will reset the task parameters.
25522566
*/
25532567
if (task_on_rq_queued(p) && p->dl.dl_runtime)
2554-
task_non_contending(p);
2568+
task_non_contending(&p->dl);
25552569

25562570
/*
25572571
* In case a task is setscheduled out from SCHED_DEADLINE we need to

kernel/sched/sched.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2177,6 +2177,10 @@ extern const u32 sched_prio_to_wmult[40];
21772177
* MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
21782178
* in the runqueue.
21792179
*
2180+
* NOCLOCK - skip the update_rq_clock() (avoids double updates)
2181+
*
2182+
* MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE)
2183+
*
21802184
* ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
21812185
* ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
21822186
* ENQUEUE_MIGRATED - the task was migrated during wakeup
@@ -2187,6 +2191,7 @@ extern const u32 sched_prio_to_wmult[40];
21872191
#define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */
21882192
#define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */
21892193
#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */
2194+
#define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */
21902195

21912196
#define ENQUEUE_WAKEUP 0x01
21922197
#define ENQUEUE_RESTORE 0x02
@@ -2201,6 +2206,7 @@ extern const u32 sched_prio_to_wmult[40];
22012206
#define ENQUEUE_MIGRATED 0x00
22022207
#endif
22032208
#define ENQUEUE_INITIAL 0x80
2209+
#define ENQUEUE_MIGRATING 0x100
22042210

22052211
#define RETRY_TASK ((void *)-1UL)
22062212

0 commit comments

Comments
 (0)