Skip to content

Commit bb6e89d

Browse files
committed
x86/aperfmperf: Make parts of the frequency invariance code unconditional
The frequency invariance support is currently limited to x86/64 and SMP, which is the vast majority of machines. arch_scale_freq_tick() is called every tick on all CPUs and reads the APERF and MPERF MSRs. The CPU frequency getters function do the same via dedicated IPIs. While it could be argued that on systems where frequency invariance support is disabled (32bit, !SMP) the per tick read of the APERF and MPERF MSRs can be avoided, it does not make sense to keep the extra code and the resulting runtime issues of mass IPIs around. As a first step split out the non frequency invariance specific initialization code and the read MSR portion of arch_scale_freq_tick(). The rest of the code is still conditional and guarded with a static key. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@kernel.org> Link: https://lore.kernel.org/r/20220415161206.761988704@linutronix.de
1 parent 73a5fa7 commit bb6e89d

4 files changed

Lines changed: 41 additions & 31 deletions

File tree

arch/x86/include/asm/cpu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ extern int _debug_hotplug_cpu(int cpu, int action);
3636
#endif
3737
#endif
3838

39+
extern void ap_init_aperfmperf(void);
40+
3941
int mwait_usable(const struct cpuinfo_x86 *);
4042

4143
unsigned int x86_family(unsigned int sig);

arch/x86/include/asm/topology.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -217,13 +217,9 @@ extern void arch_scale_freq_tick(void);
217217

218218
extern void arch_set_max_freq_ratio(bool turbo_disabled);
219219
extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
220-
extern void bp_init_freq_invariance(void);
221-
extern void ap_init_freq_invariance(void);
222220
#else
223221
static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
224222
static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
225-
static inline void bp_init_freq_invariance(void) { }
226-
static inline void ap_init_freq_invariance(void) { }
227223
#endif
228224

229225
#ifdef CONFIG_ACPI_CPPC_LIB

arch/x86/kernel/cpu/aperfmperf.c

Lines changed: 38 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/smp.h>
1818
#include <linux/syscore_ops.h>
1919

20+
#include <asm/cpu.h>
2021
#include <asm/cpu_device_id.h>
2122
#include <asm/intel-family.h>
2223

@@ -164,6 +165,17 @@ unsigned int arch_freq_get_on_cpu(int cpu)
164165
return per_cpu(samples.khz, cpu);
165166
}
166167

168+
static void init_counter_refs(void)
169+
{
170+
u64 aperf, mperf;
171+
172+
rdmsrl(MSR_IA32_APERF, aperf);
173+
rdmsrl(MSR_IA32_MPERF, mperf);
174+
175+
this_cpu_write(cpu_samples.aperf, aperf);
176+
this_cpu_write(cpu_samples.mperf, mperf);
177+
}
178+
167179
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
168180
/*
169181
* APERF/MPERF frequency ratio computation.
@@ -405,17 +417,6 @@ static bool __init intel_set_max_freq_ratio(void)
405417
return true;
406418
}
407419

408-
static void init_counter_refs(void)
409-
{
410-
u64 aperf, mperf;
411-
412-
rdmsrl(MSR_IA32_APERF, aperf);
413-
rdmsrl(MSR_IA32_MPERF, mperf);
414-
415-
this_cpu_write(cpu_samples.aperf, aperf);
416-
this_cpu_write(cpu_samples.mperf, mperf);
417-
}
418-
419420
#ifdef CONFIG_PM_SLEEP
420421
static struct syscore_ops freq_invariance_syscore_ops = {
421422
.resume = init_counter_refs,
@@ -447,26 +448,15 @@ void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled)
447448
freq_invariance_enable();
448449
}
449450

450-
void __init bp_init_freq_invariance(void)
451+
static void __init bp_init_freq_invariance(void)
451452
{
452-
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
453-
return;
454-
455-
init_counter_refs();
456-
457453
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
458454
return;
459455

460456
if (intel_set_max_freq_ratio())
461457
freq_invariance_enable();
462458
}
463459

464-
void ap_init_freq_invariance(void)
465-
{
466-
if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
467-
init_counter_refs();
468-
}
469-
470460
static void disable_freq_invariance_workfn(struct work_struct *work)
471461
{
472462
static_branch_disable(&arch_scale_freq_key);
@@ -481,6 +471,9 @@ static void scale_freq_tick(u64 acnt, u64 mcnt)
481471
{
482472
u64 freq_scale;
483473

474+
if (!arch_scale_freq_invariant())
475+
return;
476+
484477
if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
485478
goto error;
486479

@@ -501,13 +494,17 @@ static void scale_freq_tick(u64 acnt, u64 mcnt)
501494
pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
502495
schedule_work(&disable_freq_invariance_work);
503496
}
497+
#else
498+
static inline void bp_init_freq_invariance(void) { }
499+
static inline void scale_freq_tick(u64 acnt, u64 mcnt) { }
500+
#endif /* CONFIG_X86_64 && CONFIG_SMP */
504501

505502
void arch_scale_freq_tick(void)
506503
{
507504
struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
508505
u64 acnt, mcnt, aperf, mperf;
509506

510-
if (!arch_scale_freq_invariant())
507+
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
511508
return;
512509

513510
rdmsrl(MSR_IA32_APERF, aperf);
@@ -520,4 +517,20 @@ void arch_scale_freq_tick(void)
520517

521518
scale_freq_tick(acnt, mcnt);
522519
}
523-
#endif /* CONFIG_X86_64 && CONFIG_SMP */
520+
521+
static int __init bp_init_aperfmperf(void)
522+
{
523+
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
524+
return 0;
525+
526+
init_counter_refs();
527+
bp_init_freq_invariance();
528+
return 0;
529+
}
530+
early_initcall(bp_init_aperfmperf);
531+
532+
void ap_init_aperfmperf(void)
533+
{
534+
if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
535+
init_counter_refs();
536+
}

arch/x86/kernel/smpboot.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ static void smp_callin(void)
186186
*/
187187
set_cpu_sibling_map(raw_smp_processor_id());
188188

189-
ap_init_freq_invariance();
189+
ap_init_aperfmperf();
190190

191191
/*
192192
* Get our bogomips.
@@ -1396,7 +1396,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
13961396
{
13971397
smp_prepare_cpus_common();
13981398

1399-
bp_init_freq_invariance();
14001399
smp_sanity_check();
14011400

14021401
switch (apic_intr_mode) {

0 commit comments

Comments
 (0)