@@ -391,12 +391,12 @@ static void __dl_clear_params(struct sched_dl_entity *dl_se);
391391 * up, and checks if the task is still in the "ACTIVE non contending"
392392 * state or not (in the second case, it updates running_bw).
393393 */
394- static void task_non_contending (struct task_struct * p )
394+ static void task_non_contending (struct sched_dl_entity * dl_se )
395395{
396- struct sched_dl_entity * dl_se = & p -> dl ;
397396 struct hrtimer * timer = & dl_se -> inactive_timer ;
398397 struct dl_rq * dl_rq = dl_rq_of_se (dl_se );
399398 struct rq * rq = rq_of_dl_rq (dl_rq );
399+ struct task_struct * p = dl_task_of (dl_se );
400400 s64 zerolag_time ;
401401
402402 /*
@@ -428,13 +428,14 @@ static void task_non_contending(struct task_struct *p)
428428 if ((zerolag_time < 0 ) || hrtimer_active (& dl_se -> inactive_timer )) {
429429 if (dl_task (p ))
430430 sub_running_bw (dl_se , dl_rq );
431+
431432 if (!dl_task (p ) || READ_ONCE (p -> __state ) == TASK_DEAD ) {
432433 struct dl_bw * dl_b = dl_bw_of (task_cpu (p ));
433434
434435 if (READ_ONCE (p -> __state ) == TASK_DEAD )
435- sub_rq_bw (& p -> dl , & rq -> dl );
436+ sub_rq_bw (dl_se , & rq -> dl );
436437 raw_spin_lock (& dl_b -> lock );
437- __dl_sub (dl_b , p -> dl . dl_bw , dl_bw_cpus (task_cpu (p )));
438+ __dl_sub (dl_b , dl_se -> dl_bw , dl_bw_cpus (task_cpu (p )));
438439 raw_spin_unlock (& dl_b -> lock );
439440 __dl_clear_params (dl_se );
440441 }
@@ -1601,6 +1602,41 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
16011602
16021603 update_stats_enqueue_dl (dl_rq_of_se (dl_se ), dl_se , flags );
16031604
1605+ /*
1606+ * Check if a constrained deadline task was activated
1607+ * after the deadline but before the next period.
1608+ * If that is the case, the task will be throttled and
1609+ * the replenishment timer will be set to the next period.
1610+ */
1611+ if (!dl_se -> dl_throttled && !dl_is_implicit (dl_se ))
1612+ dl_check_constrained_dl (dl_se );
1613+
1614+ if (flags & (ENQUEUE_RESTORE |ENQUEUE_MIGRATING )) {
1615+ struct dl_rq * dl_rq = dl_rq_of_se (dl_se );
1616+
1617+ add_rq_bw (dl_se , dl_rq );
1618+ add_running_bw (dl_se , dl_rq );
1619+ }
1620+
1621+ /*
1622+ * If p is throttled, we do not enqueue it. In fact, if it exhausted
1623+ * its budget it needs a replenishment and, since it now is on
1624+ * its rq, the bandwidth timer callback (which clearly has not
1625+ * run yet) will take care of this.
1626+ * However, the active utilization does not depend on the fact
1627+ * that the task is on the runqueue or not (but depends on the
1628+ * task's state - in GRUB parlance, "inactive" vs "active contending").
1629+ * In other words, even if a task is throttled its utilization must
1630+ * be counted in the active utilization; hence, we need to call
1631+ * add_running_bw().
1632+ */
1633+ if (dl_se -> dl_throttled && !(flags & ENQUEUE_REPLENISH )) {
1634+ if (flags & ENQUEUE_WAKEUP )
1635+ task_contending (dl_se , flags );
1636+
1637+ return ;
1638+ }
1639+
16041640 /*
16051641 * If this is a wakeup or a new instance, the scheduling
16061642 * parameters of the task might need updating. Otherwise,
@@ -1620,9 +1656,28 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
16201656 __enqueue_dl_entity (dl_se );
16211657}
16221658
1623- static void dequeue_dl_entity (struct sched_dl_entity * dl_se )
1659+ static void dequeue_dl_entity (struct sched_dl_entity * dl_se , int flags )
16241660{
16251661 __dequeue_dl_entity (dl_se );
1662+
1663+ if (flags & (DEQUEUE_SAVE |DEQUEUE_MIGRATING )) {
1664+ struct dl_rq * dl_rq = dl_rq_of_se (dl_se );
1665+
1666+ sub_running_bw (dl_se , dl_rq );
1667+ sub_rq_bw (dl_se , dl_rq );
1668+ }
1669+
1670+ /*
1671+ * This check allows to start the inactive timer (or to immediately
1672+ * decrease the active utilization, if needed) in two cases:
1673+ * when the task blocks and when it is terminating
1674+ * (p->state == TASK_DEAD). We can handle the two cases in the same
1675+ * way, because from GRUB's point of view the same thing is happening
1676+ * (the task moves from "active contending" to "active non contending"
1677+ * or "inactive")
1678+ */
1679+ if (flags & DEQUEUE_SLEEP )
1680+ task_non_contending (dl_se );
16261681}
16271682
16281683static void enqueue_task_dl (struct rq * rq , struct task_struct * p , int flags )
@@ -1667,76 +1722,35 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
16671722 return ;
16681723 }
16691724
1670- /*
1671- * Check if a constrained deadline task was activated
1672- * after the deadline but before the next period.
1673- * If that is the case, the task will be throttled and
1674- * the replenishment timer will be set to the next period.
1675- */
1676- if (!p -> dl .dl_throttled && !dl_is_implicit (& p -> dl ))
1677- dl_check_constrained_dl (& p -> dl );
1678-
1679- if (p -> on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE ) {
1680- add_rq_bw (& p -> dl , & rq -> dl );
1681- add_running_bw (& p -> dl , & rq -> dl );
1682- }
1683-
1684- /*
1685- * If p is throttled, we do not enqueue it. In fact, if it exhausted
1686- * its budget it needs a replenishment and, since it now is on
1687- * its rq, the bandwidth timer callback (which clearly has not
1688- * run yet) will take care of this.
1689- * However, the active utilization does not depend on the fact
1690- * that the task is on the runqueue or not (but depends on the
1691- * task's state - in GRUB parlance, "inactive" vs "active contending").
1692- * In other words, even if a task is throttled its utilization must
1693- * be counted in the active utilization; hence, we need to call
1694- * add_running_bw().
1695- */
1696- if (p -> dl .dl_throttled && !(flags & ENQUEUE_REPLENISH )) {
1697- if (flags & ENQUEUE_WAKEUP )
1698- task_contending (& p -> dl , flags );
1699-
1700- return ;
1701- }
1702-
17031725 check_schedstat_required ();
17041726 update_stats_wait_start_dl (dl_rq_of_se (& p -> dl ), & p -> dl );
17051727
1728+ if (p -> on_rq == TASK_ON_RQ_MIGRATING )
1729+ flags |= ENQUEUE_MIGRATING ;
1730+
17061731 enqueue_dl_entity (& p -> dl , flags );
17071732
1708- if (!task_current (rq , p ) && p -> nr_cpus_allowed > 1 )
1733+ if (!task_current (rq , p ) && ! p -> dl . dl_throttled && p -> nr_cpus_allowed > 1 )
17091734 enqueue_pushable_dl_task (rq , p );
17101735}
17111736
17121737static void __dequeue_task_dl (struct rq * rq , struct task_struct * p , int flags )
17131738{
17141739 update_stats_dequeue_dl (& rq -> dl , & p -> dl , flags );
1715- dequeue_dl_entity (& p -> dl );
1716- dequeue_pushable_dl_task (rq , p );
1740+ dequeue_dl_entity (& p -> dl , flags );
1741+
1742+ if (!p -> dl .dl_throttled )
1743+ dequeue_pushable_dl_task (rq , p );
17171744}
17181745
17191746static void dequeue_task_dl (struct rq * rq , struct task_struct * p , int flags )
17201747{
17211748 update_curr_dl (rq );
1722- __dequeue_task_dl (rq , p , flags );
17231749
1724- if (p -> on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE ) {
1725- sub_running_bw (& p -> dl , & rq -> dl );
1726- sub_rq_bw (& p -> dl , & rq -> dl );
1727- }
1750+ if (p -> on_rq == TASK_ON_RQ_MIGRATING )
1751+ flags |= DEQUEUE_MIGRATING ;
17281752
1729- /*
1730- * This check allows to start the inactive timer (or to immediately
1731- * decrease the active utilization, if needed) in two cases:
1732- * when the task blocks and when it is terminating
1733- * (p->state == TASK_DEAD). We can handle the two cases in the same
1734- * way, because from GRUB's point of view the same thing is happening
1735- * (the task moves from "active contending" to "active non contending"
1736- * or "inactive")
1737- */
1738- if (flags & DEQUEUE_SLEEP )
1739- task_non_contending (p );
1753+ __dequeue_task_dl (rq , p , flags );
17401754}
17411755
17421756/*
@@ -2551,7 +2565,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
25512565 * will reset the task parameters.
25522566 */
25532567 if (task_on_rq_queued (p ) && p -> dl .dl_runtime )
2554- task_non_contending (p );
2568+ task_non_contending (& p -> dl );
25552569
25562570 /*
25572571 * In case a task is setscheduled out from SCHED_DEADLINE we need to
0 commit comments