@@ -369,7 +369,7 @@ static void intel_pstate_set_itmt_prio(int cpu)
369369 }
370370}
371371
372- static int intel_pstate_get_cppc_guranteed (int cpu )
372+ static int intel_pstate_get_cppc_guaranteed (int cpu )
373373{
374374 struct cppc_perf_caps cppc_perf ;
375375 int ret ;
@@ -385,7 +385,7 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
385385}
386386
387387#else /* CONFIG_ACPI_CPPC_LIB */
388- static void intel_pstate_set_itmt_prio (int cpu )
388+ static inline void intel_pstate_set_itmt_prio (int cpu )
389389{
390390}
391391#endif /* CONFIG_ACPI_CPPC_LIB */
@@ -470,6 +470,20 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
470470
471471 acpi_processor_unregister_performance (policy -> cpu );
472472}
473+
474+ static bool intel_pstate_cppc_perf_valid (u32 perf , struct cppc_perf_caps * caps )
475+ {
476+ return perf && perf <= caps -> highest_perf && perf >= caps -> lowest_perf ;
477+ }
478+
479+ static bool intel_pstate_cppc_perf_caps (struct cpudata * cpu ,
480+ struct cppc_perf_caps * caps )
481+ {
482+ if (cppc_get_perf_caps (cpu -> cpu , caps ))
483+ return false;
484+
485+ return caps -> highest_perf && caps -> lowest_perf <= caps -> highest_perf ;
486+ }
473487#else /* CONFIG_ACPI */
474488static inline void intel_pstate_init_acpi_perf_limits (struct cpufreq_policy * policy )
475489{
@@ -486,26 +500,12 @@ static inline bool intel_pstate_acpi_pm_profile_server(void)
486500#endif /* CONFIG_ACPI */
487501
488502#ifndef CONFIG_ACPI_CPPC_LIB
489- static int intel_pstate_get_cppc_guranteed (int cpu )
503+ static inline int intel_pstate_get_cppc_guaranteed (int cpu )
490504{
491505 return - ENOTSUPP ;
492506}
493507#endif /* CONFIG_ACPI_CPPC_LIB */
494508
495- static bool intel_pstate_cppc_perf_valid (u32 perf , struct cppc_perf_caps * caps )
496- {
497- return perf && perf <= caps -> highest_perf && perf >= caps -> lowest_perf ;
498- }
499-
500- static bool intel_pstate_cppc_perf_caps (struct cpudata * cpu ,
501- struct cppc_perf_caps * caps )
502- {
503- if (cppc_get_perf_caps (cpu -> cpu , caps ))
504- return false;
505-
506- return caps -> highest_perf && caps -> lowest_perf <= caps -> highest_perf ;
507- }
508-
509509static void intel_pstate_hybrid_hwp_perf_ctl_parity (struct cpudata * cpu )
510510{
511511 pr_debug ("CPU%d: Using PERF_CTL scaling for HWP\n" , cpu -> cpu );
@@ -530,7 +530,6 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu)
530530 */
531531static void intel_pstate_hybrid_hwp_calibrate (struct cpudata * cpu )
532532{
533- struct cppc_perf_caps caps ;
534533 int perf_ctl_max_phys = cpu -> pstate .max_pstate_physical ;
535534 int perf_ctl_scaling = cpu -> pstate .perf_ctl_scaling ;
536535 int perf_ctl_turbo = pstate_funcs .get_turbo ();
@@ -548,33 +547,39 @@ static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu)
548547 pr_debug ("CPU%d: HWP_CAP guaranteed = %d\n" , cpu -> cpu , cpu -> pstate .max_pstate );
549548 pr_debug ("CPU%d: HWP_CAP highest = %d\n" , cpu -> cpu , cpu -> pstate .turbo_pstate );
550549
551- if (intel_pstate_cppc_perf_caps (cpu , & caps )) {
552- if (intel_pstate_cppc_perf_valid (caps .nominal_perf , & caps )) {
553- pr_debug ("CPU%d: Using CPPC nominal\n" , cpu -> cpu );
554-
555- /*
556- * If the CPPC nominal performance is valid, it can be
557- * assumed to correspond to cpu_khz.
558- */
559- if (caps .nominal_perf == perf_ctl_max_phys ) {
560- intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
561- return ;
562- }
563- scaling = DIV_ROUND_UP (cpu_khz , caps .nominal_perf );
564- } else if (intel_pstate_cppc_perf_valid (caps .guaranteed_perf , & caps )) {
565- pr_debug ("CPU%d: Using CPPC guaranteed\n" , cpu -> cpu );
566-
567- /*
568- * If the CPPC guaranteed performance is valid, it can
569- * be assumed to correspond to max_freq.
570- */
571- if (caps .guaranteed_perf == perf_ctl_max ) {
572- intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
573- return ;
550+ #ifdef CONFIG_ACPI
551+ if (IS_ENABLED (CONFIG_ACPI_CPPC_LIB )) {
552+ struct cppc_perf_caps caps ;
553+
554+ if (intel_pstate_cppc_perf_caps (cpu , & caps )) {
555+ if (intel_pstate_cppc_perf_valid (caps .nominal_perf , & caps )) {
556+ pr_debug ("CPU%d: Using CPPC nominal\n" , cpu -> cpu );
557+
558+ /*
559+ * If the CPPC nominal performance is valid, it
560+ * can be assumed to correspond to cpu_khz.
561+ */
562+ if (caps .nominal_perf == perf_ctl_max_phys ) {
563+ intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
564+ return ;
565+ }
566+ scaling = DIV_ROUND_UP (cpu_khz , caps .nominal_perf );
567+ } else if (intel_pstate_cppc_perf_valid (caps .guaranteed_perf , & caps )) {
568+ pr_debug ("CPU%d: Using CPPC guaranteed\n" , cpu -> cpu );
569+
570+ /*
571+ * If the CPPC guaranteed performance is valid,
572+ * it can be assumed to correspond to max_freq.
573+ */
574+ if (caps .guaranteed_perf == perf_ctl_max ) {
575+ intel_pstate_hybrid_hwp_perf_ctl_parity (cpu );
576+ return ;
577+ }
578+ scaling = DIV_ROUND_UP (max_freq , caps .guaranteed_perf );
574579 }
575- scaling = DIV_ROUND_UP (max_freq , caps .guaranteed_perf );
576580 }
577581 }
582+ #endif
578583 /*
579584 * If using the CPPC data to compute the HWP-to-frequency scaling factor
580585 * doesn't work, use the HWP_CAP gauranteed perf for this purpose with
@@ -944,7 +949,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
944949 struct cpudata * cpu = all_cpu_data [policy -> cpu ];
945950 int ratio , freq ;
946951
947- ratio = intel_pstate_get_cppc_guranteed (policy -> cpu );
952+ ratio = intel_pstate_get_cppc_guaranteed (policy -> cpu );
948953 if (ratio <= 0 ) {
949954 u64 cap ;
950955
0 commit comments