@@ -5901,7 +5901,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59015901
59025902 /* Assume the next prioritized class is idle_sched_class */
59035903 if (!p ) {
5904- p = pick_task_idle (rq );
5904+ p = pick_task_idle (rq , rf );
59055905 put_prev_set_next_task (rq , prev , p );
59065906 }
59075907
@@ -5913,11 +5913,15 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59135913
59145914 for_each_active_class (class ) {
59155915 if (class -> pick_next_task ) {
5916- p = class -> pick_next_task (rq , prev );
5916+ p = class -> pick_next_task (rq , prev , rf );
5917+ if (unlikely (p == RETRY_TASK ))
5918+ goto restart ;
59175919 if (p )
59185920 return p ;
59195921 } else {
5920- p = class -> pick_task (rq );
5922+ p = class -> pick_task (rq , rf );
5923+ if (unlikely (p == RETRY_TASK ))
5924+ goto restart ;
59215925 if (p ) {
59225926 put_prev_set_next_task (rq , prev , p );
59235927 return p ;
@@ -5947,15 +5951,19 @@ static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
59475951 return a -> core_cookie == b -> core_cookie ;
59485952}
59495953
5950- static inline struct task_struct * pick_task (struct rq * rq )
5954+ /*
5955+ * Careful; this can return RETRY_TASK, it does not include the retry-loop
5956+ * itself due to the whole SMT pick retry thing below.
5957+ */
5958+ static inline struct task_struct * pick_task (struct rq * rq , struct rq_flags * rf )
59515959{
59525960 const struct sched_class * class ;
59535961 struct task_struct * p ;
59545962
59555963 rq -> dl_server = NULL ;
59565964
59575965 for_each_active_class (class ) {
5958- p = class -> pick_task (rq );
5966+ p = class -> pick_task (rq , rf );
59595967 if (p )
59605968 return p ;
59615969 }
@@ -5970,7 +5978,7 @@ static void queue_core_balance(struct rq *rq);
59705978static struct task_struct *
59715979pick_next_task (struct rq * rq , struct task_struct * prev , struct rq_flags * rf )
59725980{
5973- struct task_struct * next , * p , * max = NULL ;
5981+ struct task_struct * next , * p , * max ;
59745982 const struct cpumask * smt_mask ;
59755983 bool fi_before = false;
59765984 bool core_clock_updated = (rq == rq -> core );
@@ -6055,7 +6063,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60556063 * and there are no cookied tasks running on siblings.
60566064 */
60576065 if (!need_sync ) {
6058- next = pick_task (rq );
6066+ restart_single :
6067+ next = pick_task (rq , rf );
6068+ if (unlikely (next == RETRY_TASK ))
6069+ goto restart_single ;
60596070 if (!next -> core_cookie ) {
60606071 rq -> core_pick = NULL ;
60616072 rq -> core_dl_server = NULL ;
@@ -6075,6 +6086,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60756086 *
60766087 * Tie-break prio towards the current CPU
60776088 */
6089+ restart_multi :
6090+ max = NULL ;
60786091 for_each_cpu_wrap (i , smt_mask , cpu ) {
60796092 rq_i = cpu_rq (i );
60806093
@@ -6086,7 +6099,11 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60866099 if (i != cpu && (rq_i != rq -> core || !core_clock_updated ))
60876100 update_rq_clock (rq_i );
60886101
6089- rq_i -> core_pick = p = pick_task (rq_i );
6102+ p = pick_task (rq_i , rf );
6103+ if (unlikely (p == RETRY_TASK ))
6104+ goto restart_multi ;
6105+
6106+ rq_i -> core_pick = p ;
60906107 rq_i -> core_dl_server = rq_i -> dl_server ;
60916108
60926109 if (!max || prio_less (max , p , fi_before ))
@@ -6108,7 +6125,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
61086125 if (cookie )
61096126 p = sched_core_find (rq_i , cookie );
61106127 if (!p )
6111- p = idle_sched_class .pick_task (rq_i );
6128+ p = idle_sched_class .pick_task (rq_i , rf );
61126129 }
61136130
61146131 rq_i -> core_pick = p ;
0 commit comments