Skip to content

Commit 997c021

Browse files
jiezhan0214vireshk
authored andcommitted
cpufreq: CPPC: Update FIE arch_freq_scale in ticks for non-PCC regs
Currently, the CPPC Frequency Invariance Engine (FIE) is invoked from the scheduler tick but defers the update of arch_freq_scale to a separate thread because cppc_get_perf_ctrs() would sleep if the CPC regs are in PCC. However, this deferred update mechanism is unnecessary and introduces extra overhead for non-PCC register spaces (e.g. System Memory or FFH), where accessing the regs won't sleep and can be safely performed from the tick context. Furthermore, with the CPPC FIE registered, it throws repeated warnings of "cppc_scale_freq_workfn: failed to read perf counters" on our platform with the CPC regs in System Memory and a power-down idle state enabled. That's because the remote CPU can be in a power-down idle state, and reading its perf counters returns 0. Moving the FIE handling back to the scheduler tick process makes the CPU handle its own perf counters, so it won't be idle and the issue would be inherently solved. To address the above issues, update arch_freq_scale directly in ticks for non-PCC regs and keep the deferred update mechanism for PCC regs. Reviewed-by: Lifeng Zheng <zhenglifeng1@huawei.com> Reviewed-by: Pierre Gondois <pierre.gondois@arm.com> Signed-off-by: Jie Zhan <zhanjie9@hisilicon.com> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
1 parent 206b661 commit 997c021

1 file changed

Lines changed: 52 additions & 25 deletions

File tree

drivers/cpufreq/cppc_cpufreq.c

Lines changed: 52 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -54,31 +54,24 @@ static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
5454
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
5555

5656
/**
57-
* cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
58-
* @work: The work item.
57+
* __cppc_scale_freq_tick - CPPC arch_freq_scale updater for frequency invariance
58+
* @cppc_fi: per-cpu CPPC FIE data.
5959
*
60-
* The CPPC driver register itself with the topology core to provide its own
60+
* The CPPC driver registers itself with the topology core to provide its own
6161
* implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
6262
* gets called by the scheduler on every tick.
6363
*
6464
* Note that the arch specific counters have higher priority than CPPC counters,
6565
* if available, though the CPPC driver doesn't need to have any special
6666
* handling for that.
67-
*
68-
* On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
69-
* reach here from hard-irq context), which then schedules a normal work item
70-
* and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
71-
* based on the counter updates since the last tick.
7267
*/
73-
static void cppc_scale_freq_workfn(struct kthread_work *work)
68+
static void __cppc_scale_freq_tick(struct cppc_freq_invariance *cppc_fi)
7469
{
75-
struct cppc_freq_invariance *cppc_fi;
7670
struct cppc_perf_fb_ctrs fb_ctrs = {0};
7771
struct cppc_cpudata *cpu_data;
7872
unsigned long local_freq_scale;
7973
u64 perf;
8074

81-
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
8275
cpu_data = cppc_fi->cpu_data;
8376

8477
if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
@@ -102,6 +95,24 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
10295
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
10396
}
10497

98+
static void cppc_scale_freq_tick(void)
99+
{
100+
__cppc_scale_freq_tick(&per_cpu(cppc_freq_inv, smp_processor_id()));
101+
}
102+
103+
static struct scale_freq_data cppc_sftd = {
104+
.source = SCALE_FREQ_SOURCE_CPPC,
105+
.set_freq_scale = cppc_scale_freq_tick,
106+
};
107+
108+
static void cppc_scale_freq_workfn(struct kthread_work *work)
109+
{
110+
struct cppc_freq_invariance *cppc_fi;
111+
112+
cppc_fi = container_of(work, struct cppc_freq_invariance, work);
113+
__cppc_scale_freq_tick(cppc_fi);
114+
}
115+
105116
static void cppc_irq_work(struct irq_work *irq_work)
106117
{
107118
struct cppc_freq_invariance *cppc_fi;
@@ -110,7 +121,14 @@ static void cppc_irq_work(struct irq_work *irq_work)
110121
kthread_queue_work(kworker_fie, &cppc_fi->work);
111122
}
112123

113-
static void cppc_scale_freq_tick(void)
124+
/*
125+
* Reading perf counters may sleep if the CPC regs are in PCC. Thus, we
126+
* schedule an irq work in scale_freq_tick (since we reach here from hard-irq
127+
* context), which then schedules a normal work item cppc_scale_freq_workfn()
128+
* that updates the per_cpu arch_freq_scale variable based on the counter
129+
* updates since the last tick.
130+
*/
131+
static void cppc_scale_freq_tick_pcc(void)
114132
{
115133
struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
116134

@@ -121,13 +139,14 @@ static void cppc_scale_freq_tick(void)
121139
irq_work_queue(&cppc_fi->irq_work);
122140
}
123141

124-
static struct scale_freq_data cppc_sftd = {
142+
static struct scale_freq_data cppc_sftd_pcc = {
125143
.source = SCALE_FREQ_SOURCE_CPPC,
126-
.set_freq_scale = cppc_scale_freq_tick,
144+
.set_freq_scale = cppc_scale_freq_tick_pcc,
127145
};
128146

129147
static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
130148
{
149+
struct scale_freq_data *sftd = &cppc_sftd;
131150
struct cppc_freq_invariance *cppc_fi;
132151
int cpu, ret;
133152

@@ -138,8 +157,11 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
138157
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
139158
cppc_fi->cpu = cpu;
140159
cppc_fi->cpu_data = policy->driver_data;
141-
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
142-
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
160+
if (cppc_perf_ctrs_in_pcc_cpu(cpu)) {
161+
kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
162+
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
163+
sftd = &cppc_sftd_pcc;
164+
}
143165

144166
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
145167

@@ -155,7 +177,7 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
155177
}
156178

157179
/* Register for freq-invariance */
158-
topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
180+
topology_set_scale_freq_source(sftd, policy->cpus);
159181
}
160182

161183
/*
@@ -178,6 +200,8 @@ static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
178200
topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
179201

180202
for_each_cpu(cpu, policy->related_cpus) {
203+
if (!cppc_perf_ctrs_in_pcc_cpu(cpu))
204+
continue;
181205
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
182206
irq_work_sync(&cppc_fi->irq_work);
183207
kthread_cancel_work_sync(&cppc_fi->work);
@@ -206,6 +230,7 @@ static void cppc_fie_kworker_init(void)
206230
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
207231
PTR_ERR(kworker_fie));
208232
fie_disabled = FIE_DISABLED;
233+
kworker_fie = NULL;
209234
return;
210235
}
211236

@@ -215,31 +240,33 @@ static void cppc_fie_kworker_init(void)
215240
ret);
216241
kthread_destroy_worker(kworker_fie);
217242
fie_disabled = FIE_DISABLED;
243+
kworker_fie = NULL;
218244
}
219245
}
220246

221247
static void __init cppc_freq_invariance_init(void)
222248
{
223-
if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
224-
fie_disabled = FIE_ENABLED;
225-
if (cppc_perf_ctrs_in_pcc()) {
249+
bool perf_ctrs_in_pcc = cppc_perf_ctrs_in_pcc();
250+
251+
if (fie_disabled == FIE_UNSET) {
252+
if (perf_ctrs_in_pcc) {
226253
pr_info("FIE not enabled on systems with registers in PCC\n");
227254
fie_disabled = FIE_DISABLED;
255+
} else {
256+
fie_disabled = FIE_ENABLED;
228257
}
229258
}
230259

231-
if (fie_disabled)
260+
if (fie_disabled || !perf_ctrs_in_pcc)
232261
return;
233262

234263
cppc_fie_kworker_init();
235264
}
236265

237266
static void cppc_freq_invariance_exit(void)
238267
{
239-
if (fie_disabled)
240-
return;
241-
242-
kthread_destroy_worker(kworker_fie);
268+
if (kworker_fie)
269+
kthread_destroy_worker(kworker_fie);
243270
}
244271

245272
#else

0 commit comments

Comments
 (0)