Skip to content

Commit f125607

Browse files
vingu-linaroingomolnar
authored andcommitted
sched/cpufreq: Rework iowait boost
Use the max value that has already been computed inside sugov_get_util() to cap the iowait boost and remove dependency with uclamp_rq_util_with() which is not used anymore. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Rafael J. Wysocki <rafael@kernel.org> Link: https://lore.kernel.org/r/20231122133904.446032-3-vincent.guittot@linaro.org
1 parent 9c0b4bb commit f125607

2 files changed

Lines changed: 14 additions & 75 deletions

File tree

kernel/sched/cpufreq_schedutil.c

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -169,11 +169,12 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
169169
return max(min, max);
170170
}
171171

172-
static void sugov_get_util(struct sugov_cpu *sg_cpu)
172+
static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
173173
{
174174
unsigned long min, max, util = cpu_util_cfs_boost(sg_cpu->cpu);
175175

176176
util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
177+
util = max(util, boost);
177178
sg_cpu->bw_min = min;
178179
sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
179180
}
@@ -266,18 +267,16 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
266267
* This mechanism is designed to boost high frequently IO waiting tasks, while
267268
* being more conservative on tasks which does sporadic IO operations.
268269
*/
269-
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
270+
static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
270271
unsigned long max_cap)
271272
{
272-
unsigned long boost;
273-
274273
/* No boost currently required */
275274
if (!sg_cpu->iowait_boost)
276-
return;
275+
return 0;
277276

278277
/* Reset boost if the CPU appears to have been idle enough */
279278
if (sugov_iowait_reset(sg_cpu, time, false))
280-
return;
279+
return 0;
281280

282281
if (!sg_cpu->iowait_boost_pending) {
283282
/*
@@ -286,7 +285,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
286285
sg_cpu->iowait_boost >>= 1;
287286
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
288287
sg_cpu->iowait_boost = 0;
289-
return;
288+
return 0;
290289
}
291290
}
292291

@@ -296,10 +295,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
296295
* sg_cpu->util is already in capacity scale; convert iowait_boost
297296
* into the same scale so we can compare.
298297
*/
299-
boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
300-
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
301-
if (sg_cpu->util < boost)
302-
sg_cpu->util = boost;
298+
return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
303299
}
304300

305301
#ifdef CONFIG_NO_HZ_COMMON
@@ -329,6 +325,8 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
329325
u64 time, unsigned long max_cap,
330326
unsigned int flags)
331327
{
328+
unsigned long boost;
329+
332330
sugov_iowait_boost(sg_cpu, time, flags);
333331
sg_cpu->last_update = time;
334332

@@ -337,8 +335,8 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
337335
if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
338336
return false;
339337

340-
sugov_get_util(sg_cpu);
341-
sugov_iowait_apply(sg_cpu, time, max_cap);
338+
boost = sugov_iowait_apply(sg_cpu, time, max_cap);
339+
sugov_get_util(sg_cpu, boost);
342340

343341
return true;
344342
}
@@ -439,9 +437,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
439437

440438
for_each_cpu(j, policy->cpus) {
441439
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
440+
unsigned long boost;
442441

443-
sugov_get_util(j_sg_cpu);
444-
sugov_iowait_apply(j_sg_cpu, time, max_cap);
442+
boost = sugov_iowait_apply(j_sg_cpu, time, max_cap);
443+
sugov_get_util(j_sg_cpu, boost);
445444

446445
util = max(j_sg_cpu->util, util);
447446
}

kernel/sched/sched.h

Lines changed: 0 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -3058,59 +3058,6 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
30583058
return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
30593059
}
30603060

3061-
/**
3062-
* uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values.
3063-
* @rq: The rq to clamp against. Must not be NULL.
3064-
* @util: The util value to clamp.
3065-
* @p: The task to clamp against. Can be NULL if you want to clamp
3066-
* against @rq only.
3067-
*
3068-
* Clamps the passed @util to the max(@rq, @p) effective uclamp values.
3069-
*
3070-
* If sched_uclamp_used static key is disabled, then just return the util
3071-
* without any clamping since uclamp aggregation at the rq level in the fast
3072-
* path is disabled, rendering this operation a NOP.
3073-
*
3074-
* Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
3075-
* will return the correct effective uclamp value of the task even if the
3076-
* static key is disabled.
3077-
*/
3078-
static __always_inline
3079-
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
3080-
struct task_struct *p)
3081-
{
3082-
unsigned long min_util = 0;
3083-
unsigned long max_util = 0;
3084-
3085-
if (!static_branch_likely(&sched_uclamp_used))
3086-
return util;
3087-
3088-
if (p) {
3089-
min_util = uclamp_eff_value(p, UCLAMP_MIN);
3090-
max_util = uclamp_eff_value(p, UCLAMP_MAX);
3091-
3092-
/*
3093-
* Ignore last runnable task's max clamp, as this task will
3094-
* reset it. Similarly, no need to read the rq's min clamp.
3095-
*/
3096-
if (uclamp_rq_is_idle(rq))
3097-
goto out;
3098-
}
3099-
3100-
min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN));
3101-
max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX));
3102-
out:
3103-
/*
3104-
* Since CPU's {min,max}_util clamps are MAX aggregated considering
3105-
* RUNNABLE tasks with _different_ clamps, we can end up with an
3106-
* inversion. Fix it now when the clamps are applied.
3107-
*/
3108-
if (unlikely(min_util >= max_util))
3109-
return min_util;
3110-
3111-
return clamp(util, min_util, max_util);
3112-
}
3113-
31143061
/* Is the rq being capped/throttled by uclamp_max? */
31153062
static inline bool uclamp_rq_is_capped(struct rq *rq)
31163063
{
@@ -3148,13 +3095,6 @@ static inline unsigned long uclamp_eff_value(struct task_struct *p,
31483095
return SCHED_CAPACITY_SCALE;
31493096
}
31503097

3151-
static inline
3152-
unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
3153-
struct task_struct *p)
3154-
{
3155-
return util;
3156-
}
3157-
31583098
static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
31593099

31603100
static inline bool uclamp_is_used(void)

0 commit comments

Comments
 (0)