Skip to content

Commit b753c32

Browse files
committed
Merge tag 'cpufreq-arm-updates-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm
Pull CPUFreq Arm updates for 7.0 from Viresh Kumar: "- Update cpufreq-dt-platdev list for tegra, qcom, TI (Aaron Kling, Dhruva Gole, and Konrad Dybcio). - Minor improvements to the cpufreq / cpumask rust implementation (Alexandre Courbot, Alice Ryhl, Tamir Duberstein, and Yilin Chen). - Add support for AM62L3 SoC to ti-cpufreq driver (Dhruva Gole). - Update FIE arch_freq_scale in ticks for non-PCC regs (Jie Zhan). - Other minor cleanups / improvements (Felix Gu, Juan Martinez, Luca Weiss, and Sergey Shtylyov)." * tag 'cpufreq-arm-updates-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm: cpufreq: scmi: Fix device_node reference leak in scmi_cpu_domain_id() cpufreq: ti-cpufreq: add support for AM62L3 SoC cpufreq: dt-platdev: Add ti,am62l3 to blocklist cpufreq/amd-pstate: Add comment explaining nominal_perf usage for performance policy cpufreq: scmi: correct SCMI explanation cpufreq: dt-platdev: Block the driver from probing on more QC platforms rust: cpumask: rename methods of Cpumask for clarity and consistency cpufreq: CPPC: Update FIE arch_freq_scale in ticks for non-PCC regs cpufreq: CPPC: Factor out cppc_fie_kworker_init() ACPI: CPPC: Factor out and export per-cpu cppc_perf_ctrs_in_pcc_cpu() rust: cpufreq: replace `kernel::c_str!` with C-Strings cpufreq: Add Tegra186 and Tegra194 to cpufreq-dt-platdev blocklist dt-bindings: cpufreq: qcom-hw: document Milos CPUFREQ Hardware rust: cpufreq: add __rust_helper to helpers rust: cpufreq: always inline functions using build_assert with arguments
2 parents 39385cb + 0b7fbf9 commit b753c32

12 files changed

Lines changed: 164 additions & 66 deletions

File tree

Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ properties:
3535
- description: v2 of CPUFREQ HW (EPSS)
3636
items:
3737
- enum:
38+
- qcom,milos-cpufreq-epss
3839
- qcom,qcs8300-cpufreq-epss
3940
- qcom,qdu1000-cpufreq-epss
4041
- qcom,sa8255p-cpufreq-epss
@@ -169,6 +170,7 @@ allOf:
169170
compatible:
170171
contains:
171172
enum:
173+
- qcom,milos-cpufreq-epss
172174
- qcom,qcs8300-cpufreq-epss
173175
- qcom,sc7280-cpufreq-epss
174176
- qcom,sm8250-cpufreq-epss

drivers/acpi/cppc_acpi.c

Lines changed: 27 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1423,6 +1423,32 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
14231423
}
14241424
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
14251425

1426+
/**
1427+
* cppc_perf_ctrs_in_pcc_cpu - Check if any perf counters of a CPU are in PCC.
1428+
* @cpu: CPU on which to check perf counters.
1429+
*
1430+
* Return: true if any of the counters are in PCC regions, false otherwise
1431+
*/
1432+
bool cppc_perf_ctrs_in_pcc_cpu(unsigned int cpu)
1433+
{
1434+
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1435+
struct cpc_register_resource *ref_perf_reg;
1436+
1437+
/*
1438+
* If reference perf register is not supported then we should use the
1439+
* nominal perf value
1440+
*/
1441+
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1442+
if (!CPC_SUPPORTED(ref_perf_reg))
1443+
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1444+
1445+
return CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1446+
CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1447+
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]) ||
1448+
CPC_IN_PCC(ref_perf_reg);
1449+
}
1450+
EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc_cpu);
1451+
14261452
/**
14271453
* cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
14281454
*
@@ -1437,27 +1463,7 @@ bool cppc_perf_ctrs_in_pcc(void)
14371463
int cpu;
14381464

14391465
for_each_online_cpu(cpu) {
1440-
struct cpc_register_resource *ref_perf_reg;
1441-
struct cpc_desc *cpc_desc;
1442-
1443-
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1444-
1445-
if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1446-
CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1447-
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1448-
return true;
1449-
1450-
1451-
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1452-
1453-
/*
1454-
* If reference perf register is not supported then we should
1455-
* use the nominal perf value
1456-
*/
1457-
if (!CPC_SUPPORTED(ref_perf_reg))
1458-
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1459-
1460-
if (CPC_IN_PCC(ref_perf_reg))
1466+
if (cppc_perf_ctrs_in_pcc_cpu(cpu))
14611467
return true;
14621468
}
14631469

drivers/cpufreq/amd-pstate.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -636,6 +636,19 @@ static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
636636
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
637637

638638
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
639+
/*
640+
* For performance policy, set MinPerf to nominal_perf rather than
641+
* highest_perf or lowest_nonlinear_perf.
642+
*
643+
* Per commit 0c411b39e4f4c, using highest_perf was observed
644+
* to cause frequency throttling on power-limited platforms, leading to
645+
* performance regressions. Using lowest_nonlinear_perf would limit
646+
* performance too much for HPC workloads requiring high frequency
647+
* operation and minimal wakeup latency from idle states.
648+
*
649+
* nominal_perf therefore provides a balance by avoiding throttling
650+
* while still maintaining enough performance for HPC workloads.
651+
*/
639652
perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
640653
WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
641654
} else {

drivers/cpufreq/cppc_cpufreq.c

Lines changed: 64 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -54,31 +54,24 @@ static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
5454
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
5555

5656
/**
57-
* cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
58-
* @work: The work item.
57+
* __cppc_scale_freq_tick - CPPC arch_freq_scale updater for frequency invariance
58+
* @cppc_fi: per-cpu CPPC FIE data.
5959
*
60-
* The CPPC driver register itself with the topology core to provide its own
60+
* The CPPC driver registers itself with the topology core to provide its own
6161
* implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
6262
* gets called by the scheduler on every tick.
6363
*
6464
* Note that the arch specific counters have higher priority than CPPC counters,
6565
* if available, though the CPPC driver doesn't need to have any special
6666
* handling for that.
67-
*
68-
* On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
69-
* reach here from hard-irq context), which then schedules a normal work item
70-
* and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
71-
* based on the counter updates since the last tick.
7267
*/
73-
static void cppc_scale_freq_workfn(struct kthread_work *work)
68+
static void __cppc_scale_freq_tick(struct cppc_freq_invariance *cppc_fi)
7469
{
75-
struct cppc_freq_invariance *cppc_fi;
7670
struct cppc_perf_fb_ctrs fb_ctrs = {0};
7771
struct cppc_cpudata *cpu_data;
7872
unsigned long local_freq_scale;
7973
u64 perf;
8074

81-
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
8275
cpu_data = cppc_fi->cpu_data;
8376

8477
if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
@@ -102,6 +95,24 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
10295
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
10396
}
10497

98+
static void cppc_scale_freq_tick(void)
99+
{
100+
__cppc_scale_freq_tick(&per_cpu(cppc_freq_inv, smp_processor_id()));
101+
}
102+
103+
static struct scale_freq_data cppc_sftd = {
104+
.source = SCALE_FREQ_SOURCE_CPPC,
105+
.set_freq_scale = cppc_scale_freq_tick,
106+
};
107+
108+
static void cppc_scale_freq_workfn(struct kthread_work *work)
109+
{
110+
struct cppc_freq_invariance *cppc_fi;
111+
112+
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
113+
__cppc_scale_freq_tick(cppc_fi);
114+
}
115+
105116
static void cppc_irq_work(struct irq_work *irq_work)
106117
{
107118
struct cppc_freq_invariance *cppc_fi;
@@ -110,7 +121,14 @@ static void cppc_irq_work(struct irq_work *irq_work)
110121
kthread_queue_work(kworker_fie, &cppc_fi->work);
111122
}
112123

113-
static void cppc_scale_freq_tick(void)
124+
/*
125+
* Reading perf counters may sleep if the CPC regs are in PCC. Thus, we
126+
* schedule an irq work in scale_freq_tick (since we reach here from hard-irq
127+
* context), which then schedules a normal work item cppc_scale_freq_workfn()
128+
* that updates the per_cpu arch_freq_scale variable based on the counter
129+
* updates since the last tick.
130+
*/
131+
static void cppc_scale_freq_tick_pcc(void)
114132
{
115133
struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
116134

@@ -121,13 +139,14 @@ static void cppc_scale_freq_tick(void)
121139
irq_work_queue(&cppc_fi->irq_work);
122140
}
123141

124-
static struct scale_freq_data cppc_sftd = {
142+
static struct scale_freq_data cppc_sftd_pcc = {
125143
.source = SCALE_FREQ_SOURCE_CPPC,
126-
.set_freq_scale = cppc_scale_freq_tick,
144+
.set_freq_scale = cppc_scale_freq_tick_pcc,
127145
};
128146

129147
static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
130148
{
149+
struct scale_freq_data *sftd = &cppc_sftd;
131150
struct cppc_freq_invariance *cppc_fi;
132151
int cpu, ret;
133152

@@ -138,8 +157,11 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
138157
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
139158
cppc_fi->cpu = cpu;
140159
cppc_fi->cpu_data = policy->driver_data;
141-
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
142-
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
160+
if (cppc_perf_ctrs_in_pcc_cpu(cpu)) {
161+
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
162+
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
163+
sftd = &cppc_sftd_pcc;
164+
}
143165

144166
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
145167

@@ -155,7 +177,7 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
155177
}
156178

157179
/* Register for freq-invariance */
158-
topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
180+
topology_set_scale_freq_source(sftd, policy->cpus);
159181
}
160182

161183
/*
@@ -178,13 +200,15 @@ static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
178200
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
179201

180202
for_each_cpu(cpu, policy->related_cpus) {
203+
if (!cppc_perf_ctrs_in_pcc_cpu(cpu))
204+
continue;
181205
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
182206
irq_work_sync(&cppc_fi->irq_work);
183207
kthread_cancel_work_sync(&cppc_fi->work);
184208
}
185209
}
186210

187-
static void __init cppc_freq_invariance_init(void)
211+
static void cppc_fie_kworker_init(void)
188212
{
189213
struct sched_attr attr = {
190214
.size = sizeof(struct sched_attr),
@@ -201,22 +225,12 @@ static void __init cppc_freq_invariance_init(void)
201225
};
202226
int ret;
203227

204-
if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
205-
fie_disabled = FIE_ENABLED;
206-
if (cppc_perf_ctrs_in_pcc()) {
207-
pr_info("FIE not enabled on systems with registers in PCC\n");
208-
fie_disabled = FIE_DISABLED;
209-
}
210-
}
211-
212-
if (fie_disabled)
213-
return;
214-
215228
kworker_fie = kthread_run_worker(0, "cppc_fie");
216229
if (IS_ERR(kworker_fie)) {
217230
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
218231
PTR_ERR(kworker_fie));
219232
fie_disabled = FIE_DISABLED;
233+
kworker_fie = NULL;
220234
return;
221235
}
222236

@@ -226,15 +240,33 @@ static void __init cppc_freq_invariance_init(void)
226240
ret);
227241
kthread_destroy_worker(kworker_fie);
228242
fie_disabled = FIE_DISABLED;
243+
kworker_fie = NULL;
229244
}
230245
}
231246

232-
static void cppc_freq_invariance_exit(void)
247+
static void __init cppc_freq_invariance_init(void)
233248
{
234-
if (fie_disabled)
249+
bool perf_ctrs_in_pcc = cppc_perf_ctrs_in_pcc();
250+
251+
if (fie_disabled == FIE_UNSET) {
252+
if (perf_ctrs_in_pcc) {
253+
pr_info("FIE not enabled on systems with registers in PCC\n");
254+
fie_disabled = FIE_DISABLED;
255+
} else {
256+
fie_disabled = FIE_ENABLED;
257+
}
258+
}
259+
260+
if (fie_disabled || !perf_ctrs_in_pcc)
235261
return;
236262

237-
kthread_destroy_worker(kworker_fie);
263+
cppc_fie_kworker_init();
264+
}
265+
266+
static void cppc_freq_invariance_exit(void)
267+
{
268+
if (kworker_fie)
269+
kthread_destroy_worker(kworker_fie);
238270
}
239271

240272
#else

drivers/cpufreq/cpufreq-dt-platdev.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,8 @@ static const struct of_device_id blocklist[] __initconst = {
147147
{ .compatible = "nvidia,tegra30", },
148148
{ .compatible = "nvidia,tegra114", },
149149
{ .compatible = "nvidia,tegra124", },
150+
{ .compatible = "nvidia,tegra186", },
151+
{ .compatible = "nvidia,tegra194", },
150152
{ .compatible = "nvidia,tegra210", },
151153
{ .compatible = "nvidia,tegra234", },
152154

@@ -169,8 +171,11 @@ static const struct of_device_id blocklist[] __initconst = {
169171
{ .compatible = "qcom,sdm845", },
170172
{ .compatible = "qcom,sdx75", },
171173
{ .compatible = "qcom,sm6115", },
174+
{ .compatible = "qcom,sm6125", },
175+
{ .compatible = "qcom,sm6150", },
172176
{ .compatible = "qcom,sm6350", },
173177
{ .compatible = "qcom,sm6375", },
178+
{ .compatible = "qcom,sm7125", },
174179
{ .compatible = "qcom,sm7225", },
175180
{ .compatible = "qcom,sm7325", },
176181
{ .compatible = "qcom,sm8150", },
@@ -191,6 +196,7 @@ static const struct of_device_id blocklist[] __initconst = {
191196
{ .compatible = "ti,am625", },
192197
{ .compatible = "ti,am62a7", },
193198
{ .compatible = "ti,am62d2", },
199+
{ .compatible = "ti,am62l3", },
194200
{ .compatible = "ti,am62p5", },
195201

196202
{ .compatible = "qcom,ipq5332", },

drivers/cpufreq/rcpufreq_dt.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
//! Rust based implementation of the cpufreq-dt driver.
44
55
use kernel::{
6-
c_str,
76
clk::Clk,
87
cpu, cpufreq,
98
cpumask::CpumaskVar,
@@ -52,7 +51,7 @@ impl opp::ConfigOps for CPUFreqDTDriver {}
5251

5352
#[vtable]
5453
impl cpufreq::Driver for CPUFreqDTDriver {
55-
const NAME: &'static CStr = c_str!("cpufreq-dt");
54+
const NAME: &'static CStr = c"cpufreq-dt";
5655
const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
5756
const BOOST_ENABLED: bool = true;
5857

@@ -197,7 +196,7 @@ kernel::of_device_table!(
197196
OF_TABLE,
198197
MODULE_OF_TABLE,
199198
<CPUFreqDTDriver as platform::Driver>::IdInfo,
200-
[(of::DeviceId::new(c_str!("operating-points-v2")), ())]
199+
[(of::DeviceId::new(c"operating-points-v2"), ())]
201200
);
202201

203202
impl platform::Driver for CPUFreqDTDriver {

drivers/cpufreq/scmi-cpufreq.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0
22
/*
3-
* System Control and Power Interface (SCMI) based CPUFreq Interface driver
3+
* System Control and Management Interface (SCMI) based CPUFreq Interface driver
44
*
55
* Copyright (C) 2018-2021 ARM Ltd.
66
* Sudeep Holla <sudeep.holla@arm.com>
@@ -101,6 +101,7 @@ static int scmi_cpu_domain_id(struct device *cpu_dev)
101101
return -EINVAL;
102102
}
103103

104+
of_node_put(domain_id.np);
104105
return domain_id.args[0];
105106
}
106107

0 commit comments

Comments
 (0)