|
8 | 8 | */ |
9 | 9 |
|
10 | 10 | #include <linux/module.h> |
| 11 | +#include <linux/cpu.h> |
| 12 | +#include <linux/cpufreq.h> |
| 13 | +#include <linux/cpumask.h> |
| 14 | +#include <linux/slab.h> |
11 | 15 | #include <linux/device.h> |
12 | 16 | #include <linux/devfreq.h> |
13 | 17 | #include "governor.h" |
14 | 18 |
|
15 | | -static int devfreq_passive_get_target_freq(struct devfreq *devfreq, |
| 19 | +#define HZ_PER_KHZ 1000 |
| 20 | + |
| 21 | +static unsigned long get_target_freq_by_required_opp(struct device *p_dev, |
| 22 | + struct opp_table *p_opp_table, |
| 23 | + struct opp_table *opp_table, |
| 24 | + unsigned long *freq) |
| 25 | +{ |
| 26 | + struct dev_pm_opp *opp = NULL, *p_opp = NULL; |
| 27 | + unsigned long target_freq; |
| 28 | + |
| 29 | + if (!p_dev || !p_opp_table || !opp_table || !freq) |
| 30 | + return 0; |
| 31 | + |
| 32 | + p_opp = devfreq_recommended_opp(p_dev, freq, 0); |
| 33 | + if (IS_ERR(p_opp)) |
| 34 | + return 0; |
| 35 | + |
| 36 | + opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp); |
| 37 | + dev_pm_opp_put(p_opp); |
| 38 | + |
| 39 | + if (IS_ERR(opp)) |
| 40 | + return 0; |
| 41 | + |
| 42 | + target_freq = dev_pm_opp_get_freq(opp); |
| 43 | + dev_pm_opp_put(opp); |
| 44 | + |
| 45 | + return target_freq; |
| 46 | +} |
| 47 | + |
| 48 | +static int get_target_freq_with_cpufreq(struct devfreq *devfreq, |
| 49 | + unsigned long *target_freq) |
| 50 | +{ |
| 51 | + struct devfreq_passive_data *p_data = |
| 52 | + (struct devfreq_passive_data *)devfreq->data; |
| 53 | + struct devfreq_cpu_data *parent_cpu_data; |
| 54 | + unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent; |
| 55 | + unsigned long dev_min, dev_max; |
| 56 | + unsigned long freq = 0; |
| 57 | + |
| 58 | + for_each_online_cpu(cpu) { |
| 59 | + parent_cpu_data = p_data->parent_cpu_data[cpu]; |
| 60 | + if (!parent_cpu_data || parent_cpu_data->first_cpu != cpu) |
| 61 | + continue; |
| 62 | + |
| 63 | + /* Get target freq via required opps */ |
| 64 | + cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ; |
| 65 | + freq = get_target_freq_by_required_opp(parent_cpu_data->dev, |
| 66 | + parent_cpu_data->opp_table, |
| 67 | + devfreq->opp_table, &cpu_cur); |
| 68 | + if (freq) { |
| 69 | + *target_freq = max(freq, *target_freq); |
| 70 | + continue; |
| 71 | + } |
| 72 | + |
| 73 | + /* Use interpolation if required opps is not available */ |
| 74 | + devfreq_get_freq_range(devfreq, &dev_min, &dev_max); |
| 75 | + |
| 76 | + cpu_min = parent_cpu_data->min_freq; |
| 77 | + cpu_max = parent_cpu_data->max_freq; |
| 78 | + cpu_cur = parent_cpu_data->cur_freq; |
| 79 | + |
| 80 | + cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min); |
| 81 | + freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100); |
| 82 | + |
| 83 | + *target_freq = max(freq, *target_freq); |
| 84 | + } |
| 85 | + |
| 86 | + return 0; |
| 87 | +} |
| 88 | + |
| 89 | +static int get_target_freq_with_devfreq(struct devfreq *devfreq, |
16 | 90 | unsigned long *freq) |
17 | 91 | { |
18 | 92 | struct devfreq_passive_data *p_data |
@@ -99,6 +173,181 @@ static int devfreq_passive_get_target_freq(struct devfreq *devfreq, |
99 | 173 | return 0; |
100 | 174 | } |
101 | 175 |
|
| 176 | +static int devfreq_passive_get_target_freq(struct devfreq *devfreq, |
| 177 | + unsigned long *freq) |
| 178 | +{ |
| 179 | + struct devfreq_passive_data *p_data = |
| 180 | + (struct devfreq_passive_data *)devfreq->data; |
| 181 | + int ret; |
| 182 | + |
| 183 | + if (!p_data) |
| 184 | + return -EINVAL; |
| 185 | + |
| 186 | + /* |
| 187 | + * If the devfreq device with passive governor has the specific method |
| 188 | + * to determine the next frequency, should use the get_target_freq() |
| 189 | + * of struct devfreq_passive_data. |
| 190 | + */ |
| 191 | + if (p_data->get_target_freq) |
| 192 | + return p_data->get_target_freq(devfreq, freq); |
| 193 | + |
| 194 | + switch (p_data->parent_type) { |
| 195 | + case DEVFREQ_PARENT_DEV: |
| 196 | + ret = get_target_freq_with_devfreq(devfreq, freq); |
| 197 | + break; |
| 198 | + case CPUFREQ_PARENT_DEV: |
| 199 | + ret = get_target_freq_with_cpufreq(devfreq, freq); |
| 200 | + break; |
| 201 | + default: |
| 202 | + ret = -EINVAL; |
| 203 | + dev_err(&devfreq->dev, "Invalid parent type\n"); |
| 204 | + break; |
| 205 | + } |
| 206 | + |
| 207 | + return ret; |
| 208 | +} |
| 209 | + |
| 210 | +static int cpufreq_passive_notifier_call(struct notifier_block *nb, |
| 211 | + unsigned long event, void *ptr) |
| 212 | +{ |
| 213 | + struct devfreq_passive_data *p_data = |
| 214 | + container_of(nb, struct devfreq_passive_data, nb); |
| 215 | + struct devfreq *devfreq = (struct devfreq *)p_data->this; |
| 216 | + struct devfreq_cpu_data *parent_cpu_data; |
| 217 | + struct cpufreq_freqs *freqs = ptr; |
| 218 | + unsigned int cur_freq; |
| 219 | + int ret; |
| 220 | + |
| 221 | + if (event != CPUFREQ_POSTCHANGE || !freqs || |
| 222 | + !p_data->parent_cpu_data[freqs->policy->cpu]) |
| 223 | + return 0; |
| 224 | + |
| 225 | + parent_cpu_data = p_data->parent_cpu_data[freqs->policy->cpu]; |
| 226 | + if (parent_cpu_data->cur_freq == freqs->new) |
| 227 | + return 0; |
| 228 | + |
| 229 | + cur_freq = parent_cpu_data->cur_freq; |
| 230 | + parent_cpu_data->cur_freq = freqs->new; |
| 231 | + |
| 232 | + mutex_lock(&devfreq->lock); |
| 233 | + ret = devfreq_update_target(devfreq, freqs->new); |
| 234 | + mutex_unlock(&devfreq->lock); |
| 235 | + if (ret) { |
| 236 | + parent_cpu_data->cur_freq = cur_freq; |
| 237 | + dev_err(&devfreq->dev, "failed to update the frequency.\n"); |
| 238 | + return ret; |
| 239 | + } |
| 240 | + |
| 241 | + return 0; |
| 242 | +} |
| 243 | + |
| 244 | +static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq) |
| 245 | +{ |
| 246 | + struct devfreq_passive_data *p_data |
| 247 | + = (struct devfreq_passive_data *)devfreq->data; |
| 248 | + struct devfreq_cpu_data *parent_cpu_data; |
| 249 | + int cpu, ret; |
| 250 | + |
| 251 | + if (p_data->nb.notifier_call) { |
| 252 | + ret = cpufreq_unregister_notifier(&p_data->nb, |
| 253 | + CPUFREQ_TRANSITION_NOTIFIER); |
| 254 | + if (ret < 0) |
| 255 | + return ret; |
| 256 | + } |
| 257 | + |
| 258 | + for_each_possible_cpu(cpu) { |
| 259 | + parent_cpu_data = p_data->parent_cpu_data[cpu]; |
| 260 | + if (!parent_cpu_data) |
| 261 | + continue; |
| 262 | + |
| 263 | + if (parent_cpu_data->opp_table) |
| 264 | + dev_pm_opp_put_opp_table(parent_cpu_data->opp_table); |
| 265 | + kfree(parent_cpu_data); |
| 266 | + } |
| 267 | + |
| 268 | + return 0; |
| 269 | +} |
| 270 | + |
| 271 | +static int cpufreq_passive_register_notifier(struct devfreq *devfreq) |
| 272 | +{ |
| 273 | + struct devfreq_passive_data *p_data |
| 274 | + = (struct devfreq_passive_data *)devfreq->data; |
| 275 | + struct device *dev = devfreq->dev.parent; |
| 276 | + struct opp_table *opp_table = NULL; |
| 277 | + struct devfreq_cpu_data *parent_cpu_data; |
| 278 | + struct cpufreq_policy *policy; |
| 279 | + struct device *cpu_dev; |
| 280 | + unsigned int cpu; |
| 281 | + int ret; |
| 282 | + |
| 283 | + p_data->nb.notifier_call = cpufreq_passive_notifier_call; |
| 284 | + ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER); |
| 285 | + if (ret) { |
| 286 | + dev_err(dev, "failed to register cpufreq notifier\n"); |
| 287 | + p_data->nb.notifier_call = NULL; |
| 288 | + goto err; |
| 289 | + } |
| 290 | + |
| 291 | + for_each_possible_cpu(cpu) { |
| 292 | + if (p_data->parent_cpu_data[cpu]) |
| 293 | + continue; |
| 294 | + |
| 295 | + policy = cpufreq_cpu_get(cpu); |
| 296 | + if (!policy) { |
| 297 | + ret = -EPROBE_DEFER; |
| 298 | + goto err; |
| 299 | + } |
| 300 | + |
| 301 | + parent_cpu_data = kzalloc(sizeof(*parent_cpu_data), |
| 302 | + GFP_KERNEL); |
| 303 | + if (!parent_cpu_data) { |
| 304 | + ret = -ENOMEM; |
| 305 | + goto err_put_policy; |
| 306 | + } |
| 307 | + |
| 308 | + cpu_dev = get_cpu_device(cpu); |
| 309 | + if (!cpu_dev) { |
| 310 | + dev_err(dev, "failed to get cpu device\n"); |
| 311 | + ret = -ENODEV; |
| 312 | + goto err_free_cpu_data; |
| 313 | + } |
| 314 | + |
| 315 | + opp_table = dev_pm_opp_get_opp_table(cpu_dev); |
| 316 | + if (IS_ERR(opp_table)) { |
| 317 | + dev_err(dev, "failed to get opp_table of cpu%d\n", cpu); |
| 318 | + ret = PTR_ERR(opp_table); |
| 319 | + goto err_free_cpu_data; |
| 320 | + } |
| 321 | + |
| 322 | + parent_cpu_data->dev = cpu_dev; |
| 323 | + parent_cpu_data->opp_table = opp_table; |
| 324 | + parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus); |
| 325 | + parent_cpu_data->cur_freq = policy->cur; |
| 326 | + parent_cpu_data->min_freq = policy->cpuinfo.min_freq; |
| 327 | + parent_cpu_data->max_freq = policy->cpuinfo.max_freq; |
| 328 | + |
| 329 | + p_data->parent_cpu_data[cpu] = parent_cpu_data; |
| 330 | + cpufreq_cpu_put(policy); |
| 331 | + } |
| 332 | + |
| 333 | + mutex_lock(&devfreq->lock); |
| 334 | + ret = devfreq_update_target(devfreq, 0L); |
| 335 | + mutex_unlock(&devfreq->lock); |
| 336 | + if (ret) |
| 337 | + dev_err(dev, "failed to update the frequency\n"); |
| 338 | + |
| 339 | + return ret; |
| 340 | + |
| 341 | +err_free_cpu_data: |
| 342 | + kfree(parent_cpu_data); |
| 343 | +err_put_policy: |
| 344 | + cpufreq_cpu_put(policy); |
| 345 | +err: |
| 346 | + WARN_ON(cpufreq_passive_unregister_notifier(devfreq)); |
| 347 | + |
| 348 | + return ret; |
| 349 | +} |
| 350 | + |
102 | 351 | static int devfreq_passive_notifier_call(struct notifier_block *nb, |
103 | 352 | unsigned long event, void *ptr) |
104 | 353 | { |
@@ -131,30 +380,55 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb, |
131 | 380 | return NOTIFY_DONE; |
132 | 381 | } |
133 | 382 |
|
134 | | -static int devfreq_passive_event_handler(struct devfreq *devfreq, |
135 | | - unsigned int event, void *data) |
| 383 | +static int devfreq_passive_unregister_notifier(struct devfreq *devfreq) |
| 384 | +{ |
| 385 | + struct devfreq_passive_data *p_data |
| 386 | + = (struct devfreq_passive_data *)devfreq->data; |
| 387 | + struct devfreq *parent = (struct devfreq *)p_data->parent; |
| 388 | + struct notifier_block *nb = &p_data->nb; |
| 389 | + |
| 390 | + return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); |
| 391 | +} |
| 392 | + |
| 393 | +static int devfreq_passive_register_notifier(struct devfreq *devfreq) |
136 | 394 | { |
137 | 395 | struct devfreq_passive_data *p_data |
138 | 396 | = (struct devfreq_passive_data *)devfreq->data; |
139 | 397 | struct devfreq *parent = (struct devfreq *)p_data->parent; |
140 | 398 | struct notifier_block *nb = &p_data->nb; |
141 | | - int ret = 0; |
142 | 399 |
|
143 | 400 | if (!parent) |
144 | 401 | return -EPROBE_DEFER; |
145 | 402 |
|
| 403 | + nb->notifier_call = devfreq_passive_notifier_call; |
| 404 | + return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); |
| 405 | +} |
| 406 | + |
| 407 | +static int devfreq_passive_event_handler(struct devfreq *devfreq, |
| 408 | + unsigned int event, void *data) |
| 409 | +{ |
| 410 | + struct devfreq_passive_data *p_data |
| 411 | + = (struct devfreq_passive_data *)devfreq->data; |
| 412 | + int ret = -EINVAL; |
| 413 | + |
| 414 | + if (!p_data) |
| 415 | + return -EINVAL; |
| 416 | + |
| 417 | + if (!p_data->this) |
| 418 | + p_data->this = devfreq; |
| 419 | + |
146 | 420 | switch (event) { |
147 | 421 | case DEVFREQ_GOV_START: |
148 | | - if (!p_data->this) |
149 | | - p_data->this = devfreq; |
150 | | - |
151 | | - nb->notifier_call = devfreq_passive_notifier_call; |
152 | | - ret = devfreq_register_notifier(parent, nb, |
153 | | - DEVFREQ_TRANSITION_NOTIFIER); |
| 422 | + if (p_data->parent_type == DEVFREQ_PARENT_DEV) |
| 423 | + ret = devfreq_passive_register_notifier(devfreq); |
| 424 | + else if (p_data->parent_type == CPUFREQ_PARENT_DEV) |
| 425 | + ret = cpufreq_passive_register_notifier(devfreq); |
154 | 426 | break; |
155 | 427 | case DEVFREQ_GOV_STOP: |
156 | | - WARN_ON(devfreq_unregister_notifier(parent, nb, |
157 | | - DEVFREQ_TRANSITION_NOTIFIER)); |
| 428 | + if (p_data->parent_type == DEVFREQ_PARENT_DEV) |
| 429 | + WARN_ON(devfreq_passive_unregister_notifier(devfreq)); |
| 430 | + else if (p_data->parent_type == CPUFREQ_PARENT_DEV) |
| 431 | + WARN_ON(cpufreq_passive_unregister_notifier(devfreq)); |
158 | 432 | break; |
159 | 433 | default: |
160 | 434 | break; |
|
0 commit comments