Skip to content

Commit 85db0c0

Browse files
committed
Merge tag 'pm-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki: "These revert a cpuidle menu governor commit leading to a performance regression, fix an amd-pstate driver regression introduced recently, and fix new conditional guard definitions for runtime PM. - Add missing _RET == 0 condition to recently introduced conditional guard definitions for runtime PM (Rafael Wysocki) - Revert a cpuidle menu governor change that introduced a serious performance regression on Chromebooks with Intel Jasper Lake processors (Rafael Wysocki) - Fix an amd-pstate driver regression leading to EPP=0 after hibernation (Mario Limonciello)" * tag 'pm-6.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM: runtime: Fix conditional guard definitions Revert "cpuidle: menu: Avoid discarding useful information" cpufreq/amd-pstate: Fix a regression leading to EPP 0 after hibernate
2 parents 942048d + b62bd2c commit 85db0c0

3 files changed

Lines changed: 18 additions & 17 deletions

File tree

drivers/cpufreq/amd-pstate.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1614,7 +1614,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
16141614
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
16151615
* limits, epp and desired perf will get reset to the cached values in cpudata struct
16161616
*/
1617-
return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
1617+
return amd_pstate_update_perf(policy, perf.bios_min_perf,
1618+
FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
1619+
FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
1620+
FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
1621+
false);
16181622
}
16191623

16201624
static int amd_pstate_suspend(struct cpufreq_policy *policy)

drivers/cpuidle/governors/menu.c

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -188,20 +188,17 @@ static unsigned int get_typical_interval(struct menu_device *data)
188188
*
189189
* This can deal with workloads that have long pauses interspersed
190190
* with sporadic activity with a bunch of short pauses.
191+
*
192+
* However, if the number of remaining samples is too small to exclude
193+
* any more outliers, allow the deepest available idle state to be
194+
* selected because there are systems where the time spent by CPUs in
195+
* deep idle states is correlated to the maximum frequency the CPUs
196+
* can get to. On those systems, shallow idle states should be avoided
197+
* unless there is a clear indication that the given CPU is most likley
198+
* going to be woken up shortly.
191199
*/
192-
if (divisor * 4 <= INTERVALS * 3) {
193-
/*
194-
* If there are sufficiently many data points still under
195-
* consideration after the outliers have been eliminated,
196-
* returning without a prediction would be a mistake because it
197-
* is likely that the next interval will not exceed the current
198-
* maximum, so return the latter in that case.
199-
*/
200-
if (divisor >= INTERVALS / 2)
201-
return max;
202-
200+
if (divisor * 4 <= INTERVALS * 3)
203201
return UINT_MAX;
204-
}
205202

206203
/* Update the thresholds for the next round. */
207204
if (avg - min > max - avg)

include/linux/pm_runtime.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -629,13 +629,13 @@ DEFINE_GUARD(pm_runtime_active_auto, struct device *,
629629
* device.
630630
*/
631631
DEFINE_GUARD_COND(pm_runtime_active, _try,
632-
pm_runtime_get_active(_T, RPM_TRANSPARENT))
632+
pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
633633
DEFINE_GUARD_COND(pm_runtime_active, _try_enabled,
634-
pm_runtime_resume_and_get(_T))
634+
pm_runtime_resume_and_get(_T), _RET == 0)
635635
DEFINE_GUARD_COND(pm_runtime_active_auto, _try,
636-
pm_runtime_get_active(_T, RPM_TRANSPARENT))
636+
pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
637637
DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled,
638-
pm_runtime_resume_and_get(_T))
638+
pm_runtime_resume_and_get(_T), _RET == 0)
639639

640640
/**
641641
* pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.

0 commit comments

Comments
 (0)