1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/drivers/devfreq/governor_passive.c 4 * 5 * Copyright (C) 2016 Samsung Electronics 6 * Author: Chanwoo Choi <cw00.choi@samsung.com> 7 * Author: MyungJoo Ham <myungjoo.ham@samsung.com> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/cpu.h> 12 #include <linux/cpufreq.h> 13 #include <linux/cpumask.h> 14 #include <linux/slab.h> 15 #include <linux/device.h> 16 #include <linux/devfreq.h> 17 #include "governor.h" 18 19 #define HZ_PER_KHZ 1000 20 21 static struct devfreq_cpu_data * 22 get_parent_cpu_data(struct devfreq_passive_data *p_data, 23 struct cpufreq_policy *policy) 24 { 25 struct devfreq_cpu_data *parent_cpu_data; 26 27 if (!p_data || !policy) 28 return NULL; 29 30 list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node) 31 if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus)) 32 return parent_cpu_data; 33 34 return NULL; 35 } 36 37 static unsigned long get_target_freq_by_required_opp(struct device *p_dev, 38 struct opp_table *p_opp_table, 39 struct opp_table *opp_table, 40 unsigned long *freq) 41 { 42 struct dev_pm_opp *opp = NULL, *p_opp = NULL; 43 unsigned long target_freq; 44 45 if (!p_dev || !p_opp_table || !opp_table || !freq) 46 return 0; 47 48 p_opp = devfreq_recommended_opp(p_dev, freq, 0); 49 if (IS_ERR(p_opp)) 50 return 0; 51 52 opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp); 53 dev_pm_opp_put(p_opp); 54 55 if (IS_ERR(opp)) 56 return 0; 57 58 target_freq = dev_pm_opp_get_freq(opp); 59 dev_pm_opp_put(opp); 60 61 return target_freq; 62 } 63 64 static int get_target_freq_with_cpufreq(struct devfreq *devfreq, 65 unsigned long *target_freq) 66 { 67 struct devfreq_passive_data *p_data = 68 (struct devfreq_passive_data *)devfreq->data; 69 struct devfreq_cpu_data *parent_cpu_data; 70 struct cpufreq_policy *policy; 71 unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent; 72 unsigned long dev_min, dev_max; 73 unsigned long freq = 0; 74 int ret = 0; 75 76 for_each_online_cpu(cpu) { 77 policy = cpufreq_cpu_get(cpu); 78 if (!policy) { 79 ret = -EINVAL; 80 continue; 81 } 82 83 parent_cpu_data = get_parent_cpu_data(p_data, policy); 84 if (!parent_cpu_data) { 85 cpufreq_cpu_put(policy); 86 continue; 87 } 88 89 /* Get target freq via required opps */ 90 cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ; 91 freq = get_target_freq_by_required_opp(parent_cpu_data->dev, 92 parent_cpu_data->opp_table, 93 devfreq->opp_table, &cpu_cur); 94 if (freq) { 95 *target_freq = max(freq, *target_freq); 96 cpufreq_cpu_put(policy); 97 continue; 98 } 99 100 /* Use interpolation if required opps is not available */ 101 devfreq_get_freq_range(devfreq, &dev_min, &dev_max); 102 103 cpu_min = parent_cpu_data->min_freq; 104 cpu_max = parent_cpu_data->max_freq; 105 cpu_cur = parent_cpu_data->cur_freq; 106 107 cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min); 108 freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100); 109 110 *target_freq = max(freq, *target_freq); 111 cpufreq_cpu_put(policy); 112 } 113 114 return ret; 115 } 116 117 static int get_target_freq_with_devfreq(struct devfreq *devfreq, 118 unsigned long *freq) 119 { 120 struct devfreq_passive_data *p_data 121 = (struct devfreq_passive_data *)devfreq->data; 122 struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent; 123 unsigned long child_freq = ULONG_MAX; 124 int i, count; 125 126 /* Get target freq via required opps */ 127 child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent, 128 parent_devfreq->opp_table, 129 devfreq->opp_table, freq); 130 if (child_freq) 131 goto out; 132 133 /* Use interpolation if required opps is not available */ 134 for (i = 0; i < parent_devfreq->profile->max_state; i++) 135 if (parent_devfreq->profile->freq_table[i] == *freq) 136 break; 137 138 if (i == parent_devfreq->profile->max_state) 139 return -EINVAL; 140 141 if (i < devfreq->profile->max_state) { 142 child_freq = devfreq->profile->freq_table[i]; 143 } else { 144 count = devfreq->profile->max_state; 145 child_freq = devfreq->profile->freq_table[count - 1]; 146 } 147 148 out: 149 *freq = child_freq; 150 151 return 0; 152 } 153 154 static int devfreq_passive_get_target_freq(struct devfreq *devfreq, 155 unsigned long *freq) 156 { 157 struct devfreq_passive_data *p_data = 158 (struct devfreq_passive_data *)devfreq->data; 159 int ret; 160 161 if (!p_data) 162 return -EINVAL; 163 164 /* 165 * If the devfreq device with passive governor has the specific method 166 * to determine the next frequency, should use the get_target_freq() 167 * of struct devfreq_passive_data. 168 */ 169 if (p_data->get_target_freq) 170 return p_data->get_target_freq(devfreq, freq); 171 172 switch (p_data->parent_type) { 173 case DEVFREQ_PARENT_DEV: 174 ret = get_target_freq_with_devfreq(devfreq, freq); 175 break; 176 case CPUFREQ_PARENT_DEV: 177 ret = get_target_freq_with_cpufreq(devfreq, freq); 178 break; 179 default: 180 ret = -EINVAL; 181 dev_err(&devfreq->dev, "Invalid parent type\n"); 182 break; 183 } 184 185 return ret; 186 } 187 188 static int cpufreq_passive_notifier_call(struct notifier_block *nb, 189 unsigned long event, void *ptr) 190 { 191 struct devfreq_passive_data *p_data = 192 container_of(nb, struct devfreq_passive_data, nb); 193 struct devfreq *devfreq = (struct devfreq *)p_data->this; 194 struct devfreq_cpu_data *parent_cpu_data; 195 struct cpufreq_freqs *freqs = ptr; 196 unsigned int cur_freq; 197 int ret; 198 199 if (event != CPUFREQ_POSTCHANGE || !freqs) 200 return 0; 201 202 parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy); 203 if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new) 204 return 0; 205 206 cur_freq = parent_cpu_data->cur_freq; 207 parent_cpu_data->cur_freq = freqs->new; 208 209 mutex_lock(&devfreq->lock); 210 ret = devfreq_update_target(devfreq, freqs->new); 211 mutex_unlock(&devfreq->lock); 212 if (ret) { 213 parent_cpu_data->cur_freq = cur_freq; 214 dev_err(&devfreq->dev, "failed to update the frequency.\n"); 215 return ret; 216 } 217 218 return 0; 219 } 220 221 static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq) 222 { 223 struct devfreq_passive_data *p_data 224 = (struct devfreq_passive_data *)devfreq->data; 225 struct devfreq_cpu_data *parent_cpu_data; 226 int cpu, ret = 0; 227 228 if (p_data->nb.notifier_call) { 229 ret = cpufreq_unregister_notifier(&p_data->nb, 230 CPUFREQ_TRANSITION_NOTIFIER); 231 if (ret < 0) 232 return ret; 233 } 234 235 for_each_possible_cpu(cpu) { 236 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 237 if (!policy) { 238 ret = -EINVAL; 239 continue; 240 } 241 242 parent_cpu_data = get_parent_cpu_data(p_data, policy); 243 if (!parent_cpu_data) { 244 cpufreq_cpu_put(policy); 245 continue; 246 } 247 248 list_del(&parent_cpu_data->node); 249 if (parent_cpu_data->opp_table) 250 dev_pm_opp_put_opp_table(parent_cpu_data->opp_table); 251 kfree(parent_cpu_data); 252 cpufreq_cpu_put(policy); 253 } 254 255 return ret; 256 } 257 258 static int cpufreq_passive_register_notifier(struct devfreq *devfreq) 259 { 260 struct devfreq_passive_data *p_data 261 = (struct devfreq_passive_data *)devfreq->data; 262 struct device *dev = devfreq->dev.parent; 263 struct opp_table *opp_table = NULL; 264 struct devfreq_cpu_data *parent_cpu_data; 265 struct cpufreq_policy *policy; 266 struct device *cpu_dev; 267 unsigned int cpu; 268 int ret; 269 270 p_data->cpu_data_list 271 = (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list); 272 273 p_data->nb.notifier_call = cpufreq_passive_notifier_call; 274 ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER); 275 if (ret) { 276 dev_err(dev, "failed to register cpufreq notifier\n"); 277 p_data->nb.notifier_call = NULL; 278 goto err; 279 } 280 281 for_each_possible_cpu(cpu) { 282 policy = cpufreq_cpu_get(cpu); 283 if (!policy) { 284 ret = -EPROBE_DEFER; 285 goto err; 286 } 287 288 parent_cpu_data = get_parent_cpu_data(p_data, policy); 289 if (parent_cpu_data) { 290 cpufreq_cpu_put(policy); 291 continue; 292 } 293 294 parent_cpu_data = kzalloc(sizeof(*parent_cpu_data), 295 GFP_KERNEL); 296 if (!parent_cpu_data) { 297 ret = -ENOMEM; 298 goto err_put_policy; 299 } 300 301 cpu_dev = get_cpu_device(cpu); 302 if (!cpu_dev) { 303 dev_err(dev, "failed to get cpu device\n"); 304 ret = -ENODEV; 305 goto err_free_cpu_data; 306 } 307 308 opp_table = dev_pm_opp_get_opp_table(cpu_dev); 309 if (IS_ERR(opp_table)) { 310 dev_err(dev, "failed to get opp_table of cpu%d\n", cpu); 311 ret = PTR_ERR(opp_table); 312 goto err_free_cpu_data; 313 } 314 315 parent_cpu_data->dev = cpu_dev; 316 parent_cpu_data->opp_table = opp_table; 317 parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus); 318 parent_cpu_data->cur_freq = policy->cur; 319 parent_cpu_data->min_freq = policy->cpuinfo.min_freq; 320 parent_cpu_data->max_freq = policy->cpuinfo.max_freq; 321 322 list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list); 323 cpufreq_cpu_put(policy); 324 } 325 326 mutex_lock(&devfreq->lock); 327 ret = devfreq_update_target(devfreq, 0L); 328 mutex_unlock(&devfreq->lock); 329 if (ret) 330 dev_err(dev, "failed to update the frequency\n"); 331 332 return ret; 333 334 err_free_cpu_data: 335 kfree(parent_cpu_data); 336 err_put_policy: 337 cpufreq_cpu_put(policy); 338 err: 339 WARN_ON(cpufreq_passive_unregister_notifier(devfreq)); 340 341 return ret; 342 } 343 344 static int devfreq_passive_notifier_call(struct notifier_block *nb, 345 unsigned long event, void *ptr) 346 { 347 struct devfreq_passive_data *data 348 = container_of(nb, struct devfreq_passive_data, nb); 349 struct devfreq *devfreq = (struct devfreq *)data->this; 350 struct devfreq *parent = (struct devfreq *)data->parent; 351 struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr; 352 unsigned long freq = freqs->new; 353 int ret = 0; 354 355 mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING); 356 switch (event) { 357 case DEVFREQ_PRECHANGE: 358 if (parent->previous_freq > freq) 359 ret = devfreq_update_target(devfreq, freq); 360 361 break; 362 case DEVFREQ_POSTCHANGE: 363 if (parent->previous_freq < freq) 364 ret = devfreq_update_target(devfreq, freq); 365 break; 366 } 367 mutex_unlock(&devfreq->lock); 368 369 if (ret < 0) 370 dev_warn(&devfreq->dev, 371 "failed to update devfreq using passive governor\n"); 372 373 return NOTIFY_DONE; 374 } 375 376 static int devfreq_passive_unregister_notifier(struct devfreq *devfreq) 377 { 378 struct devfreq_passive_data *p_data 379 = (struct devfreq_passive_data *)devfreq->data; 380 struct devfreq *parent = (struct devfreq *)p_data->parent; 381 struct notifier_block *nb = &p_data->nb; 382 383 return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); 384 } 385 386 static int devfreq_passive_register_notifier(struct devfreq *devfreq) 387 { 388 struct devfreq_passive_data *p_data 389 = (struct devfreq_passive_data *)devfreq->data; 390 struct devfreq *parent = (struct devfreq *)p_data->parent; 391 struct notifier_block *nb = &p_data->nb; 392 393 if (!parent) 394 return -EPROBE_DEFER; 395 396 nb->notifier_call = devfreq_passive_notifier_call; 397 return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER); 398 } 399 400 static int devfreq_passive_event_handler(struct devfreq *devfreq, 401 unsigned int event, void *data) 402 { 403 struct devfreq_passive_data *p_data 404 = (struct devfreq_passive_data *)devfreq->data; 405 int ret = 0; 406 407 if (!p_data) 408 return -EINVAL; 409 410 if (!p_data->this) 411 p_data->this = devfreq; 412 413 switch (event) { 414 case DEVFREQ_GOV_START: 415 if (p_data->parent_type == DEVFREQ_PARENT_DEV) 416 ret = devfreq_passive_register_notifier(devfreq); 417 else if (p_data->parent_type == CPUFREQ_PARENT_DEV) 418 ret = cpufreq_passive_register_notifier(devfreq); 419 break; 420 case DEVFREQ_GOV_STOP: 421 if (p_data->parent_type == DEVFREQ_PARENT_DEV) 422 WARN_ON(devfreq_passive_unregister_notifier(devfreq)); 423 else if (p_data->parent_type == CPUFREQ_PARENT_DEV) 424 WARN_ON(cpufreq_passive_unregister_notifier(devfreq)); 425 break; 426 default: 427 break; 428 } 429 430 return ret; 431 } 432 433 static struct devfreq_governor devfreq_passive = { 434 .name = DEVFREQ_GOV_PASSIVE, 435 .flags = DEVFREQ_GOV_FLAG_IMMUTABLE, 436 .get_target_freq = devfreq_passive_get_target_freq, 437 .event_handler = devfreq_passive_event_handler, 438 }; 439 440 static int __init devfreq_passive_init(void) 441 { 442 return devfreq_add_governor(&devfreq_passive); 443 } 444 subsys_initcall(devfreq_passive_init); 445 446 static void __exit devfreq_passive_exit(void) 447 { 448 int ret; 449 450 ret = devfreq_remove_governor(&devfreq_passive); 451 if (ret) 452 pr_err("%s: failed remove governor %d\n", __func__, ret); 453 } 454 module_exit(devfreq_passive_exit); 455 456 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>"); 457 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 458 MODULE_DESCRIPTION("DEVFREQ Passive governor"); 459 MODULE_LICENSE("GPL v2"); 460