1 /* 2 * linux/drivers/cpufreq/cpufreq.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 7 * 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 9 * Added handling for CPU hotplug 10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 11 * Fix handling for CPU hotplug -- affected CPUs 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License version 2 as 15 * published by the Free Software Foundation. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/cpu.h> 21 #include <linux/cpufreq.h> 22 #include <linux/cpu_cooling.h> 23 #include <linux/delay.h> 24 #include <linux/device.h> 25 #include <linux/init.h> 26 #include <linux/kernel_stat.h> 27 #include <linux/module.h> 28 #include <linux/mutex.h> 29 #include <linux/slab.h> 30 #include <linux/suspend.h> 31 #include <linux/syscore_ops.h> 32 #include <linux/tick.h> 33 #include <trace/events/power.h> 34 35 static LIST_HEAD(cpufreq_policy_list); 36 37 static inline bool policy_is_inactive(struct cpufreq_policy *policy) 38 { 39 return cpumask_empty(policy->cpus); 40 } 41 42 /* Macros to iterate over CPU policies */ 43 #define for_each_suitable_policy(__policy, __active) \ 44 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ 45 if ((__active) == !policy_is_inactive(__policy)) 46 47 #define for_each_active_policy(__policy) \ 48 for_each_suitable_policy(__policy, true) 49 #define for_each_inactive_policy(__policy) \ 50 for_each_suitable_policy(__policy, false) 51 52 #define for_each_policy(__policy) \ 53 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) 54 55 /* Iterate over governors */ 56 static LIST_HEAD(cpufreq_governor_list); 57 #define for_each_governor(__governor) \ 58 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) 59 60 /** 61 * The "cpufreq driver" - the arch- or hardware-dependent low 62 * level driver of CPUFreq support, and its spinlock. This lock 63 * also protects the cpufreq_cpu_data array. 64 */ 65 static struct cpufreq_driver *cpufreq_driver; 66 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 67 static DEFINE_RWLOCK(cpufreq_driver_lock); 68 69 /* Flag to suspend/resume CPUFreq governors */ 70 static bool cpufreq_suspended; 71 72 static inline bool has_target(void) 73 { 74 return cpufreq_driver->target_index || cpufreq_driver->target; 75 } 76 77 /* internal prototypes */ 78 static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 79 static int cpufreq_init_governor(struct cpufreq_policy *policy); 80 static void cpufreq_exit_governor(struct cpufreq_policy *policy); 81 static int cpufreq_start_governor(struct cpufreq_policy *policy); 82 static void cpufreq_stop_governor(struct cpufreq_policy *policy); 83 static void cpufreq_governor_limits(struct cpufreq_policy *policy); 84 85 /** 86 * Two notifier lists: the "policy" list is involved in the 87 * validation process for a new CPU frequency policy; the 88 * "transition" list for kernel code that needs to handle 89 * changes to devices when the CPU clock speed changes. 90 * The mutex locks both lists. 91 */ 92 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 93 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list); 94 95 static int off __read_mostly; 96 static int cpufreq_disabled(void) 97 { 98 return off; 99 } 100 void disable_cpufreq(void) 101 { 102 off = 1; 103 } 104 static DEFINE_MUTEX(cpufreq_governor_mutex); 105 106 bool have_governor_per_policy(void) 107 { 108 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); 109 } 110 EXPORT_SYMBOL_GPL(have_governor_per_policy); 111 112 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 113 { 114 if (have_governor_per_policy()) 115 return &policy->kobj; 116 else 117 return cpufreq_global_kobject; 118 } 119 EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 120 121 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 122 { 123 u64 idle_time; 124 u64 cur_wall_time; 125 u64 busy_time; 126 127 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64()); 128 129 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 135 136 idle_time = cur_wall_time - busy_time; 137 if (wall) 138 *wall = div_u64(cur_wall_time, NSEC_PER_USEC); 139 140 return div_u64(idle_time, NSEC_PER_USEC); 141 } 142 143 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 144 { 145 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 146 147 if (idle_time == -1ULL) 148 return get_cpu_idle_time_jiffy(cpu, wall); 149 else if (!io_busy) 150 idle_time += get_cpu_iowait_time_us(cpu, wall); 151 152 return idle_time; 153 } 154 EXPORT_SYMBOL_GPL(get_cpu_idle_time); 155 156 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, 157 unsigned long max_freq) 158 { 159 } 160 EXPORT_SYMBOL_GPL(arch_set_freq_scale); 161 162 /* 163 * This is a generic cpufreq init() routine which can be used by cpufreq 164 * drivers of SMP systems. It will do following: 165 * - validate & show freq table passed 166 * - set policies transition latency 167 * - policy->cpus with all possible CPUs 168 */ 169 int cpufreq_generic_init(struct cpufreq_policy *policy, 170 struct cpufreq_frequency_table *table, 171 unsigned int transition_latency) 172 { 173 policy->freq_table = table; 174 policy->cpuinfo.transition_latency = transition_latency; 175 176 /* 177 * The driver only supports the SMP configuration where all processors 178 * share the clock and voltage and clock. 179 */ 180 cpumask_setall(policy->cpus); 181 182 return 0; 183 } 184 EXPORT_SYMBOL_GPL(cpufreq_generic_init); 185 186 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) 187 { 188 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 189 190 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; 191 } 192 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw); 193 194 unsigned int cpufreq_generic_get(unsigned int cpu) 195 { 196 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); 197 198 if (!policy || IS_ERR(policy->clk)) { 199 pr_err("%s: No %s associated to cpu: %d\n", 200 __func__, policy ? "clk" : "policy", cpu); 201 return 0; 202 } 203 204 return clk_get_rate(policy->clk) / 1000; 205 } 206 EXPORT_SYMBOL_GPL(cpufreq_generic_get); 207 208 /** 209 * cpufreq_cpu_get: returns policy for a cpu and marks it busy. 210 * 211 * @cpu: cpu to find policy for. 212 * 213 * This returns policy for 'cpu', returns NULL if it doesn't exist. 214 * It also increments the kobject reference count to mark it busy and so would 215 * require a corresponding call to cpufreq_cpu_put() to decrement it back. 216 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be 217 * freed as that depends on the kobj count. 218 * 219 * Return: A valid policy on success, otherwise NULL on failure. 220 */ 221 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 222 { 223 struct cpufreq_policy *policy = NULL; 224 unsigned long flags; 225 226 if (WARN_ON(cpu >= nr_cpu_ids)) 227 return NULL; 228 229 /* get the cpufreq driver */ 230 read_lock_irqsave(&cpufreq_driver_lock, flags); 231 232 if (cpufreq_driver) { 233 /* get the CPU */ 234 policy = cpufreq_cpu_get_raw(cpu); 235 if (policy) 236 kobject_get(&policy->kobj); 237 } 238 239 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 240 241 return policy; 242 } 243 EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 244 245 /** 246 * cpufreq_cpu_put: Decrements the usage count of a policy 247 * 248 * @policy: policy earlier returned by cpufreq_cpu_get(). 249 * 250 * This decrements the kobject reference count incremented earlier by calling 251 * cpufreq_cpu_get(). 252 */ 253 void cpufreq_cpu_put(struct cpufreq_policy *policy) 254 { 255 kobject_put(&policy->kobj); 256 } 257 EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 258 259 /********************************************************************* 260 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 261 *********************************************************************/ 262 263 /** 264 * adjust_jiffies - adjust the system "loops_per_jiffy" 265 * 266 * This function alters the system "loops_per_jiffy" for the clock 267 * speed change. Note that loops_per_jiffy cannot be updated on SMP 268 * systems as each CPU might be scaled differently. So, use the arch 269 * per-CPU loops_per_jiffy value wherever possible. 270 */ 271 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 272 { 273 #ifndef CONFIG_SMP 274 static unsigned long l_p_j_ref; 275 static unsigned int l_p_j_ref_freq; 276 277 if (ci->flags & CPUFREQ_CONST_LOOPS) 278 return; 279 280 if (!l_p_j_ref_freq) { 281 l_p_j_ref = loops_per_jiffy; 282 l_p_j_ref_freq = ci->old; 283 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", 284 l_p_j_ref, l_p_j_ref_freq); 285 } 286 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { 287 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 288 ci->new); 289 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 290 loops_per_jiffy, ci->new); 291 } 292 #endif 293 } 294 295 /** 296 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies. 297 * @policy: cpufreq policy to enable fast frequency switching for. 298 * @freqs: contain details of the frequency update. 299 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. 300 * 301 * This function calls the transition notifiers and the "adjust_jiffies" 302 * function. It is called twice on all CPU frequency changes that have 303 * external effects. 304 */ 305 static void cpufreq_notify_transition(struct cpufreq_policy *policy, 306 struct cpufreq_freqs *freqs, 307 unsigned int state) 308 { 309 BUG_ON(irqs_disabled()); 310 311 if (cpufreq_disabled()) 312 return; 313 314 freqs->flags = cpufreq_driver->flags; 315 pr_debug("notification %u of frequency transition to %u kHz\n", 316 state, freqs->new); 317 318 switch (state) { 319 case CPUFREQ_PRECHANGE: 320 /* 321 * Detect if the driver reported a value as "old frequency" 322 * which is not equal to what the cpufreq core thinks is 323 * "old frequency". 324 */ 325 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 326 if (policy->cur && (policy->cur != freqs->old)) { 327 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", 328 freqs->old, policy->cur); 329 freqs->old = policy->cur; 330 } 331 } 332 333 for_each_cpu(freqs->cpu, policy->cpus) { 334 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 335 CPUFREQ_PRECHANGE, freqs); 336 } 337 338 adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 339 break; 340 341 case CPUFREQ_POSTCHANGE: 342 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 343 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new, 344 cpumask_pr_args(policy->cpus)); 345 346 for_each_cpu(freqs->cpu, policy->cpus) { 347 trace_cpu_frequency(freqs->new, freqs->cpu); 348 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 349 CPUFREQ_POSTCHANGE, freqs); 350 } 351 352 cpufreq_stats_record_transition(policy, freqs->new); 353 policy->cur = freqs->new; 354 } 355 } 356 357 /* Do post notifications when there are chances that transition has failed */ 358 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, 359 struct cpufreq_freqs *freqs, int transition_failed) 360 { 361 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 362 if (!transition_failed) 363 return; 364 365 swap(freqs->old, freqs->new); 366 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 367 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); 368 } 369 370 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, 371 struct cpufreq_freqs *freqs) 372 { 373 374 /* 375 * Catch double invocations of _begin() which lead to self-deadlock. 376 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core 377 * doesn't invoke _begin() on their behalf, and hence the chances of 378 * double invocations are very low. Moreover, there are scenarios 379 * where these checks can emit false-positive warnings in these 380 * drivers; so we avoid that by skipping them altogether. 381 */ 382 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) 383 && current == policy->transition_task); 384 385 wait: 386 wait_event(policy->transition_wait, !policy->transition_ongoing); 387 388 spin_lock(&policy->transition_lock); 389 390 if (unlikely(policy->transition_ongoing)) { 391 spin_unlock(&policy->transition_lock); 392 goto wait; 393 } 394 395 policy->transition_ongoing = true; 396 policy->transition_task = current; 397 398 spin_unlock(&policy->transition_lock); 399 400 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); 401 } 402 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); 403 404 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, 405 struct cpufreq_freqs *freqs, int transition_failed) 406 { 407 if (WARN_ON(!policy->transition_ongoing)) 408 return; 409 410 cpufreq_notify_post_transition(policy, freqs, transition_failed); 411 412 policy->transition_ongoing = false; 413 policy->transition_task = NULL; 414 415 wake_up(&policy->transition_wait); 416 } 417 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); 418 419 /* 420 * Fast frequency switching status count. Positive means "enabled", negative 421 * means "disabled" and 0 means "not decided yet". 422 */ 423 static int cpufreq_fast_switch_count; 424 static DEFINE_MUTEX(cpufreq_fast_switch_lock); 425 426 static void cpufreq_list_transition_notifiers(void) 427 { 428 struct notifier_block *nb; 429 430 pr_info("Registered transition notifiers:\n"); 431 432 mutex_lock(&cpufreq_transition_notifier_list.mutex); 433 434 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) 435 pr_info("%pF\n", nb->notifier_call); 436 437 mutex_unlock(&cpufreq_transition_notifier_list.mutex); 438 } 439 440 /** 441 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy. 442 * @policy: cpufreq policy to enable fast frequency switching for. 443 * 444 * Try to enable fast frequency switching for @policy. 445 * 446 * The attempt will fail if there is at least one transition notifier registered 447 * at this point, as fast frequency switching is quite fundamentally at odds 448 * with transition notifiers. Thus if successful, it will make registration of 449 * transition notifiers fail going forward. 450 */ 451 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) 452 { 453 lockdep_assert_held(&policy->rwsem); 454 455 if (!policy->fast_switch_possible) 456 return; 457 458 mutex_lock(&cpufreq_fast_switch_lock); 459 if (cpufreq_fast_switch_count >= 0) { 460 cpufreq_fast_switch_count++; 461 policy->fast_switch_enabled = true; 462 } else { 463 pr_warn("CPU%u: Fast frequency switching not enabled\n", 464 policy->cpu); 465 cpufreq_list_transition_notifiers(); 466 } 467 mutex_unlock(&cpufreq_fast_switch_lock); 468 } 469 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch); 470 471 /** 472 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy. 473 * @policy: cpufreq policy to disable fast frequency switching for. 474 */ 475 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) 476 { 477 mutex_lock(&cpufreq_fast_switch_lock); 478 if (policy->fast_switch_enabled) { 479 policy->fast_switch_enabled = false; 480 if (!WARN_ON(cpufreq_fast_switch_count <= 0)) 481 cpufreq_fast_switch_count--; 482 } 483 mutex_unlock(&cpufreq_fast_switch_lock); 484 } 485 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); 486 487 /** 488 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported 489 * one. 490 * @target_freq: target frequency to resolve. 491 * 492 * The target to driver frequency mapping is cached in the policy. 493 * 494 * Return: Lowest driver-supported frequency greater than or equal to the 495 * given target_freq, subject to policy (min/max) and driver limitations. 496 */ 497 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, 498 unsigned int target_freq) 499 { 500 target_freq = clamp_val(target_freq, policy->min, policy->max); 501 policy->cached_target_freq = target_freq; 502 503 if (cpufreq_driver->target_index) { 504 int idx; 505 506 idx = cpufreq_frequency_table_target(policy, target_freq, 507 CPUFREQ_RELATION_L); 508 policy->cached_resolved_idx = idx; 509 return policy->freq_table[idx].frequency; 510 } 511 512 if (cpufreq_driver->resolve_freq) 513 return cpufreq_driver->resolve_freq(policy, target_freq); 514 515 return target_freq; 516 } 517 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); 518 519 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) 520 { 521 unsigned int latency; 522 523 if (policy->transition_delay_us) 524 return policy->transition_delay_us; 525 526 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; 527 if (latency) { 528 /* 529 * For platforms that can change the frequency very fast (< 10 530 * us), the above formula gives a decent transition delay. But 531 * for platforms where transition_latency is in milliseconds, it 532 * ends up giving unrealistic values. 533 * 534 * Cap the default transition delay to 10 ms, which seems to be 535 * a reasonable amount of time after which we should reevaluate 536 * the frequency. 537 */ 538 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000); 539 } 540 541 return LATENCY_MULTIPLIER; 542 } 543 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); 544 545 /********************************************************************* 546 * SYSFS INTERFACE * 547 *********************************************************************/ 548 static ssize_t show_boost(struct kobject *kobj, 549 struct kobj_attribute *attr, char *buf) 550 { 551 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 552 } 553 554 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, 555 const char *buf, size_t count) 556 { 557 int ret, enable; 558 559 ret = sscanf(buf, "%d", &enable); 560 if (ret != 1 || enable < 0 || enable > 1) 561 return -EINVAL; 562 563 if (cpufreq_boost_trigger_state(enable)) { 564 pr_err("%s: Cannot %s BOOST!\n", 565 __func__, enable ? "enable" : "disable"); 566 return -EINVAL; 567 } 568 569 pr_debug("%s: cpufreq BOOST %s\n", 570 __func__, enable ? "enabled" : "disabled"); 571 572 return count; 573 } 574 define_one_global_rw(boost); 575 576 static struct cpufreq_governor *find_governor(const char *str_governor) 577 { 578 struct cpufreq_governor *t; 579 580 for_each_governor(t) 581 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 582 return t; 583 584 return NULL; 585 } 586 587 /** 588 * cpufreq_parse_governor - parse a governor string 589 */ 590 static int cpufreq_parse_governor(char *str_governor, 591 struct cpufreq_policy *policy) 592 { 593 if (cpufreq_driver->setpolicy) { 594 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 595 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 596 return 0; 597 } 598 599 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 600 policy->policy = CPUFREQ_POLICY_POWERSAVE; 601 return 0; 602 } 603 } else { 604 struct cpufreq_governor *t; 605 606 mutex_lock(&cpufreq_governor_mutex); 607 608 t = find_governor(str_governor); 609 if (!t) { 610 int ret; 611 612 mutex_unlock(&cpufreq_governor_mutex); 613 614 ret = request_module("cpufreq_%s", str_governor); 615 if (ret) 616 return -EINVAL; 617 618 mutex_lock(&cpufreq_governor_mutex); 619 620 t = find_governor(str_governor); 621 } 622 if (t && !try_module_get(t->owner)) 623 t = NULL; 624 625 mutex_unlock(&cpufreq_governor_mutex); 626 627 if (t) { 628 policy->governor = t; 629 return 0; 630 } 631 } 632 633 return -EINVAL; 634 } 635 636 /** 637 * cpufreq_per_cpu_attr_read() / show_##file_name() - 638 * print out cpufreq information 639 * 640 * Write out information from cpufreq_driver->policy[cpu]; object must be 641 * "unsigned int". 642 */ 643 644 #define show_one(file_name, object) \ 645 static ssize_t show_##file_name \ 646 (struct cpufreq_policy *policy, char *buf) \ 647 { \ 648 return sprintf(buf, "%u\n", policy->object); \ 649 } 650 651 show_one(cpuinfo_min_freq, cpuinfo.min_freq); 652 show_one(cpuinfo_max_freq, cpuinfo.max_freq); 653 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 654 show_one(scaling_min_freq, min); 655 show_one(scaling_max_freq, max); 656 657 __weak unsigned int arch_freq_get_on_cpu(int cpu) 658 { 659 return 0; 660 } 661 662 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) 663 { 664 ssize_t ret; 665 unsigned int freq; 666 667 freq = arch_freq_get_on_cpu(policy->cpu); 668 if (freq) 669 ret = sprintf(buf, "%u\n", freq); 670 else if (cpufreq_driver && cpufreq_driver->setpolicy && 671 cpufreq_driver->get) 672 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); 673 else 674 ret = sprintf(buf, "%u\n", policy->cur); 675 return ret; 676 } 677 678 static int cpufreq_set_policy(struct cpufreq_policy *policy, 679 struct cpufreq_policy *new_policy); 680 681 /** 682 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 683 */ 684 #define store_one(file_name, object) \ 685 static ssize_t store_##file_name \ 686 (struct cpufreq_policy *policy, const char *buf, size_t count) \ 687 { \ 688 int ret, temp; \ 689 struct cpufreq_policy new_policy; \ 690 \ 691 memcpy(&new_policy, policy, sizeof(*policy)); \ 692 new_policy.min = policy->user_policy.min; \ 693 new_policy.max = policy->user_policy.max; \ 694 \ 695 ret = sscanf(buf, "%u", &new_policy.object); \ 696 if (ret != 1) \ 697 return -EINVAL; \ 698 \ 699 temp = new_policy.object; \ 700 ret = cpufreq_set_policy(policy, &new_policy); \ 701 if (!ret) \ 702 policy->user_policy.object = temp; \ 703 \ 704 return ret ? ret : count; \ 705 } 706 707 store_one(scaling_min_freq, min); 708 store_one(scaling_max_freq, max); 709 710 /** 711 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 712 */ 713 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 714 char *buf) 715 { 716 unsigned int cur_freq = __cpufreq_get(policy); 717 718 if (cur_freq) 719 return sprintf(buf, "%u\n", cur_freq); 720 721 return sprintf(buf, "<unknown>\n"); 722 } 723 724 /** 725 * show_scaling_governor - show the current policy for the specified CPU 726 */ 727 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 728 { 729 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 730 return sprintf(buf, "powersave\n"); 731 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 732 return sprintf(buf, "performance\n"); 733 else if (policy->governor) 734 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 735 policy->governor->name); 736 return -EINVAL; 737 } 738 739 /** 740 * store_scaling_governor - store policy for the specified CPU 741 */ 742 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 743 const char *buf, size_t count) 744 { 745 int ret; 746 char str_governor[16]; 747 struct cpufreq_policy new_policy; 748 749 memcpy(&new_policy, policy, sizeof(*policy)); 750 751 ret = sscanf(buf, "%15s", str_governor); 752 if (ret != 1) 753 return -EINVAL; 754 755 if (cpufreq_parse_governor(str_governor, &new_policy)) 756 return -EINVAL; 757 758 ret = cpufreq_set_policy(policy, &new_policy); 759 760 if (new_policy.governor) 761 module_put(new_policy.governor->owner); 762 763 return ret ? ret : count; 764 } 765 766 /** 767 * show_scaling_driver - show the cpufreq driver currently loaded 768 */ 769 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 770 { 771 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 772 } 773 774 /** 775 * show_scaling_available_governors - show the available CPUfreq governors 776 */ 777 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 778 char *buf) 779 { 780 ssize_t i = 0; 781 struct cpufreq_governor *t; 782 783 if (!has_target()) { 784 i += sprintf(buf, "performance powersave"); 785 goto out; 786 } 787 788 for_each_governor(t) { 789 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 790 - (CPUFREQ_NAME_LEN + 2))) 791 goto out; 792 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 793 } 794 out: 795 i += sprintf(&buf[i], "\n"); 796 return i; 797 } 798 799 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 800 { 801 ssize_t i = 0; 802 unsigned int cpu; 803 804 for_each_cpu(cpu, mask) { 805 if (i) 806 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 807 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 808 if (i >= (PAGE_SIZE - 5)) 809 break; 810 } 811 i += sprintf(&buf[i], "\n"); 812 return i; 813 } 814 EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 815 816 /** 817 * show_related_cpus - show the CPUs affected by each transition even if 818 * hw coordination is in use 819 */ 820 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 821 { 822 return cpufreq_show_cpus(policy->related_cpus, buf); 823 } 824 825 /** 826 * show_affected_cpus - show the CPUs affected by each transition 827 */ 828 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 829 { 830 return cpufreq_show_cpus(policy->cpus, buf); 831 } 832 833 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 834 const char *buf, size_t count) 835 { 836 unsigned int freq = 0; 837 unsigned int ret; 838 839 if (!policy->governor || !policy->governor->store_setspeed) 840 return -EINVAL; 841 842 ret = sscanf(buf, "%u", &freq); 843 if (ret != 1) 844 return -EINVAL; 845 846 policy->governor->store_setspeed(policy, freq); 847 848 return count; 849 } 850 851 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 852 { 853 if (!policy->governor || !policy->governor->show_setspeed) 854 return sprintf(buf, "<unsupported>\n"); 855 856 return policy->governor->show_setspeed(policy, buf); 857 } 858 859 /** 860 * show_bios_limit - show the current cpufreq HW/BIOS limitation 861 */ 862 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 863 { 864 unsigned int limit; 865 int ret; 866 if (cpufreq_driver->bios_limit) { 867 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 868 if (!ret) 869 return sprintf(buf, "%u\n", limit); 870 } 871 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 872 } 873 874 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 875 cpufreq_freq_attr_ro(cpuinfo_min_freq); 876 cpufreq_freq_attr_ro(cpuinfo_max_freq); 877 cpufreq_freq_attr_ro(cpuinfo_transition_latency); 878 cpufreq_freq_attr_ro(scaling_available_governors); 879 cpufreq_freq_attr_ro(scaling_driver); 880 cpufreq_freq_attr_ro(scaling_cur_freq); 881 cpufreq_freq_attr_ro(bios_limit); 882 cpufreq_freq_attr_ro(related_cpus); 883 cpufreq_freq_attr_ro(affected_cpus); 884 cpufreq_freq_attr_rw(scaling_min_freq); 885 cpufreq_freq_attr_rw(scaling_max_freq); 886 cpufreq_freq_attr_rw(scaling_governor); 887 cpufreq_freq_attr_rw(scaling_setspeed); 888 889 static struct attribute *default_attrs[] = { 890 &cpuinfo_min_freq.attr, 891 &cpuinfo_max_freq.attr, 892 &cpuinfo_transition_latency.attr, 893 &scaling_min_freq.attr, 894 &scaling_max_freq.attr, 895 &affected_cpus.attr, 896 &related_cpus.attr, 897 &scaling_governor.attr, 898 &scaling_driver.attr, 899 &scaling_available_governors.attr, 900 &scaling_setspeed.attr, 901 NULL 902 }; 903 904 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 905 #define to_attr(a) container_of(a, struct freq_attr, attr) 906 907 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 908 { 909 struct cpufreq_policy *policy = to_policy(kobj); 910 struct freq_attr *fattr = to_attr(attr); 911 ssize_t ret; 912 913 down_read(&policy->rwsem); 914 ret = fattr->show(policy, buf); 915 up_read(&policy->rwsem); 916 917 return ret; 918 } 919 920 static ssize_t store(struct kobject *kobj, struct attribute *attr, 921 const char *buf, size_t count) 922 { 923 struct cpufreq_policy *policy = to_policy(kobj); 924 struct freq_attr *fattr = to_attr(attr); 925 ssize_t ret = -EINVAL; 926 927 /* 928 * cpus_read_trylock() is used here to work around a circular lock 929 * dependency problem with respect to the cpufreq_register_driver(). 930 */ 931 if (!cpus_read_trylock()) 932 return -EBUSY; 933 934 if (cpu_online(policy->cpu)) { 935 down_write(&policy->rwsem); 936 ret = fattr->store(policy, buf, count); 937 up_write(&policy->rwsem); 938 } 939 940 cpus_read_unlock(); 941 942 return ret; 943 } 944 945 static void cpufreq_sysfs_release(struct kobject *kobj) 946 { 947 struct cpufreq_policy *policy = to_policy(kobj); 948 pr_debug("last reference is dropped\n"); 949 complete(&policy->kobj_unregister); 950 } 951 952 static const struct sysfs_ops sysfs_ops = { 953 .show = show, 954 .store = store, 955 }; 956 957 static struct kobj_type ktype_cpufreq = { 958 .sysfs_ops = &sysfs_ops, 959 .default_attrs = default_attrs, 960 .release = cpufreq_sysfs_release, 961 }; 962 963 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) 964 { 965 struct device *dev = get_cpu_device(cpu); 966 967 if (!dev) 968 return; 969 970 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 971 return; 972 973 dev_dbg(dev, "%s: Adding symlink\n", __func__); 974 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) 975 dev_err(dev, "cpufreq symlink creation failed\n"); 976 } 977 978 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, 979 struct device *dev) 980 { 981 dev_dbg(dev, "%s: Removing symlink\n", __func__); 982 sysfs_remove_link(&dev->kobj, "cpufreq"); 983 } 984 985 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) 986 { 987 struct freq_attr **drv_attr; 988 int ret = 0; 989 990 /* set up files for this cpu device */ 991 drv_attr = cpufreq_driver->attr; 992 while (drv_attr && *drv_attr) { 993 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 994 if (ret) 995 return ret; 996 drv_attr++; 997 } 998 if (cpufreq_driver->get) { 999 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 1000 if (ret) 1001 return ret; 1002 } 1003 1004 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 1005 if (ret) 1006 return ret; 1007 1008 if (cpufreq_driver->bios_limit) { 1009 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 1010 if (ret) 1011 return ret; 1012 } 1013 1014 return 0; 1015 } 1016 1017 __weak struct cpufreq_governor *cpufreq_default_governor(void) 1018 { 1019 return NULL; 1020 } 1021 1022 static int cpufreq_init_policy(struct cpufreq_policy *policy) 1023 { 1024 struct cpufreq_governor *gov = NULL; 1025 struct cpufreq_policy new_policy; 1026 1027 memcpy(&new_policy, policy, sizeof(*policy)); 1028 1029 /* Update governor of new_policy to the governor used before hotplug */ 1030 gov = find_governor(policy->last_governor); 1031 if (gov) { 1032 pr_debug("Restoring governor %s for cpu %d\n", 1033 policy->governor->name, policy->cpu); 1034 } else { 1035 gov = cpufreq_default_governor(); 1036 if (!gov) 1037 return -ENODATA; 1038 } 1039 1040 new_policy.governor = gov; 1041 1042 /* Use the default policy if there is no last_policy. */ 1043 if (cpufreq_driver->setpolicy) { 1044 if (policy->last_policy) 1045 new_policy.policy = policy->last_policy; 1046 else 1047 cpufreq_parse_governor(gov->name, &new_policy); 1048 } 1049 /* set default policy */ 1050 return cpufreq_set_policy(policy, &new_policy); 1051 } 1052 1053 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 1054 { 1055 int ret = 0; 1056 1057 /* Has this CPU been taken care of already? */ 1058 if (cpumask_test_cpu(cpu, policy->cpus)) 1059 return 0; 1060 1061 down_write(&policy->rwsem); 1062 if (has_target()) 1063 cpufreq_stop_governor(policy); 1064 1065 cpumask_set_cpu(cpu, policy->cpus); 1066 1067 if (has_target()) { 1068 ret = cpufreq_start_governor(policy); 1069 if (ret) 1070 pr_err("%s: Failed to start governor\n", __func__); 1071 } 1072 up_write(&policy->rwsem); 1073 return ret; 1074 } 1075 1076 static void handle_update(struct work_struct *work) 1077 { 1078 struct cpufreq_policy *policy = 1079 container_of(work, struct cpufreq_policy, update); 1080 unsigned int cpu = policy->cpu; 1081 pr_debug("handle_update for cpu %u called\n", cpu); 1082 cpufreq_update_policy(cpu); 1083 } 1084 1085 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) 1086 { 1087 struct cpufreq_policy *policy; 1088 int ret; 1089 1090 policy = kzalloc(sizeof(*policy), GFP_KERNEL); 1091 if (!policy) 1092 return NULL; 1093 1094 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 1095 goto err_free_policy; 1096 1097 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1098 goto err_free_cpumask; 1099 1100 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) 1101 goto err_free_rcpumask; 1102 1103 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 1104 cpufreq_global_kobject, "policy%u", cpu); 1105 if (ret) { 1106 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1107 goto err_free_real_cpus; 1108 } 1109 1110 INIT_LIST_HEAD(&policy->policy_list); 1111 init_rwsem(&policy->rwsem); 1112 spin_lock_init(&policy->transition_lock); 1113 init_waitqueue_head(&policy->transition_wait); 1114 init_completion(&policy->kobj_unregister); 1115 INIT_WORK(&policy->update, handle_update); 1116 1117 policy->cpu = cpu; 1118 return policy; 1119 1120 err_free_real_cpus: 1121 free_cpumask_var(policy->real_cpus); 1122 err_free_rcpumask: 1123 free_cpumask_var(policy->related_cpus); 1124 err_free_cpumask: 1125 free_cpumask_var(policy->cpus); 1126 err_free_policy: 1127 kfree(policy); 1128 1129 return NULL; 1130 } 1131 1132 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) 1133 { 1134 struct kobject *kobj; 1135 struct completion *cmp; 1136 1137 down_write(&policy->rwsem); 1138 cpufreq_stats_free_table(policy); 1139 kobj = &policy->kobj; 1140 cmp = &policy->kobj_unregister; 1141 up_write(&policy->rwsem); 1142 kobject_put(kobj); 1143 1144 /* 1145 * We need to make sure that the underlying kobj is 1146 * actually not referenced anymore by anybody before we 1147 * proceed with unloading. 1148 */ 1149 pr_debug("waiting for dropping of refcount\n"); 1150 wait_for_completion(cmp); 1151 pr_debug("wait complete\n"); 1152 } 1153 1154 static void cpufreq_policy_free(struct cpufreq_policy *policy) 1155 { 1156 unsigned long flags; 1157 int cpu; 1158 1159 /* Remove policy from list */ 1160 write_lock_irqsave(&cpufreq_driver_lock, flags); 1161 list_del(&policy->policy_list); 1162 1163 for_each_cpu(cpu, policy->related_cpus) 1164 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1165 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1166 1167 cpufreq_policy_put_kobj(policy); 1168 free_cpumask_var(policy->real_cpus); 1169 free_cpumask_var(policy->related_cpus); 1170 free_cpumask_var(policy->cpus); 1171 kfree(policy); 1172 } 1173 1174 static int cpufreq_online(unsigned int cpu) 1175 { 1176 struct cpufreq_policy *policy; 1177 bool new_policy; 1178 unsigned long flags; 1179 unsigned int j; 1180 int ret; 1181 1182 pr_debug("%s: bringing CPU%u online\n", __func__, cpu); 1183 1184 /* Check if this CPU already has a policy to manage it */ 1185 policy = per_cpu(cpufreq_cpu_data, cpu); 1186 if (policy) { 1187 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 1188 if (!policy_is_inactive(policy)) 1189 return cpufreq_add_policy_cpu(policy, cpu); 1190 1191 /* This is the only online CPU for the policy. Start over. */ 1192 new_policy = false; 1193 down_write(&policy->rwsem); 1194 policy->cpu = cpu; 1195 policy->governor = NULL; 1196 up_write(&policy->rwsem); 1197 } else { 1198 new_policy = true; 1199 policy = cpufreq_policy_alloc(cpu); 1200 if (!policy) 1201 return -ENOMEM; 1202 } 1203 1204 if (!new_policy && cpufreq_driver->online) { 1205 ret = cpufreq_driver->online(policy); 1206 if (ret) { 1207 pr_debug("%s: %d: initialization failed\n", __func__, 1208 __LINE__); 1209 goto out_exit_policy; 1210 } 1211 1212 /* Recover policy->cpus using related_cpus */ 1213 cpumask_copy(policy->cpus, policy->related_cpus); 1214 } else { 1215 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1216 1217 /* 1218 * Call driver. From then on the cpufreq must be able 1219 * to accept all calls to ->verify and ->setpolicy for this CPU. 1220 */ 1221 ret = cpufreq_driver->init(policy); 1222 if (ret) { 1223 pr_debug("%s: %d: initialization failed\n", __func__, 1224 __LINE__); 1225 goto out_free_policy; 1226 } 1227 1228 ret = cpufreq_table_validate_and_sort(policy); 1229 if (ret) 1230 goto out_exit_policy; 1231 1232 /* related_cpus should at least include policy->cpus. */ 1233 cpumask_copy(policy->related_cpus, policy->cpus); 1234 } 1235 1236 down_write(&policy->rwsem); 1237 /* 1238 * affected cpus must always be the one, which are online. We aren't 1239 * managing offline cpus here. 1240 */ 1241 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1242 1243 if (new_policy) { 1244 policy->user_policy.min = policy->min; 1245 policy->user_policy.max = policy->max; 1246 1247 for_each_cpu(j, policy->related_cpus) { 1248 per_cpu(cpufreq_cpu_data, j) = policy; 1249 add_cpu_dev_symlink(policy, j); 1250 } 1251 } else { 1252 policy->min = policy->user_policy.min; 1253 policy->max = policy->user_policy.max; 1254 } 1255 1256 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1257 policy->cur = cpufreq_driver->get(policy->cpu); 1258 if (!policy->cur) { 1259 pr_err("%s: ->get() failed\n", __func__); 1260 goto out_destroy_policy; 1261 } 1262 } 1263 1264 /* 1265 * Sometimes boot loaders set CPU frequency to a value outside of 1266 * frequency table present with cpufreq core. In such cases CPU might be 1267 * unstable if it has to run on that frequency for long duration of time 1268 * and so its better to set it to a frequency which is specified in 1269 * freq-table. This also makes cpufreq stats inconsistent as 1270 * cpufreq-stats would fail to register because current frequency of CPU 1271 * isn't found in freq-table. 1272 * 1273 * Because we don't want this change to effect boot process badly, we go 1274 * for the next freq which is >= policy->cur ('cur' must be set by now, 1275 * otherwise we will end up setting freq to lowest of the table as 'cur' 1276 * is initialized to zero). 1277 * 1278 * We are passing target-freq as "policy->cur - 1" otherwise 1279 * __cpufreq_driver_target() would simply fail, as policy->cur will be 1280 * equal to target-freq. 1281 */ 1282 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) 1283 && has_target()) { 1284 /* Are we running at unknown frequency ? */ 1285 ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1286 if (ret == -EINVAL) { 1287 /* Warn user and fix it */ 1288 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n", 1289 __func__, policy->cpu, policy->cur); 1290 ret = __cpufreq_driver_target(policy, policy->cur - 1, 1291 CPUFREQ_RELATION_L); 1292 1293 /* 1294 * Reaching here after boot in a few seconds may not 1295 * mean that system will remain stable at "unknown" 1296 * frequency for longer duration. Hence, a BUG_ON(). 1297 */ 1298 BUG_ON(ret); 1299 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n", 1300 __func__, policy->cpu, policy->cur); 1301 } 1302 } 1303 1304 if (new_policy) { 1305 ret = cpufreq_add_dev_interface(policy); 1306 if (ret) 1307 goto out_destroy_policy; 1308 1309 cpufreq_stats_create_table(policy); 1310 1311 write_lock_irqsave(&cpufreq_driver_lock, flags); 1312 list_add(&policy->policy_list, &cpufreq_policy_list); 1313 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1314 } 1315 1316 ret = cpufreq_init_policy(policy); 1317 if (ret) { 1318 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 1319 __func__, cpu, ret); 1320 goto out_destroy_policy; 1321 } 1322 1323 up_write(&policy->rwsem); 1324 1325 kobject_uevent(&policy->kobj, KOBJ_ADD); 1326 1327 /* Callback for handling stuff after policy is ready */ 1328 if (cpufreq_driver->ready) 1329 cpufreq_driver->ready(policy); 1330 1331 if (IS_ENABLED(CONFIG_CPU_THERMAL) && 1332 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) 1333 policy->cdev = of_cpufreq_cooling_register(policy); 1334 1335 pr_debug("initialization complete\n"); 1336 1337 return 0; 1338 1339 out_destroy_policy: 1340 for_each_cpu(j, policy->real_cpus) 1341 remove_cpu_dev_symlink(policy, get_cpu_device(j)); 1342 1343 up_write(&policy->rwsem); 1344 1345 out_exit_policy: 1346 if (cpufreq_driver->exit) 1347 cpufreq_driver->exit(policy); 1348 1349 out_free_policy: 1350 cpufreq_policy_free(policy); 1351 return ret; 1352 } 1353 1354 /** 1355 * cpufreq_add_dev - the cpufreq interface for a CPU device. 1356 * @dev: CPU device. 1357 * @sif: Subsystem interface structure pointer (not used) 1358 */ 1359 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 1360 { 1361 struct cpufreq_policy *policy; 1362 unsigned cpu = dev->id; 1363 int ret; 1364 1365 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); 1366 1367 if (cpu_online(cpu)) { 1368 ret = cpufreq_online(cpu); 1369 if (ret) 1370 return ret; 1371 } 1372 1373 /* Create sysfs link on CPU registration */ 1374 policy = per_cpu(cpufreq_cpu_data, cpu); 1375 if (policy) 1376 add_cpu_dev_symlink(policy, cpu); 1377 1378 return 0; 1379 } 1380 1381 static int cpufreq_offline(unsigned int cpu) 1382 { 1383 struct cpufreq_policy *policy; 1384 int ret; 1385 1386 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1387 1388 policy = cpufreq_cpu_get_raw(cpu); 1389 if (!policy) { 1390 pr_debug("%s: No cpu_data found\n", __func__); 1391 return 0; 1392 } 1393 1394 down_write(&policy->rwsem); 1395 if (has_target()) 1396 cpufreq_stop_governor(policy); 1397 1398 cpumask_clear_cpu(cpu, policy->cpus); 1399 1400 if (policy_is_inactive(policy)) { 1401 if (has_target()) 1402 strncpy(policy->last_governor, policy->governor->name, 1403 CPUFREQ_NAME_LEN); 1404 else 1405 policy->last_policy = policy->policy; 1406 } else if (cpu == policy->cpu) { 1407 /* Nominate new CPU */ 1408 policy->cpu = cpumask_any(policy->cpus); 1409 } 1410 1411 /* Start governor again for active policy */ 1412 if (!policy_is_inactive(policy)) { 1413 if (has_target()) { 1414 ret = cpufreq_start_governor(policy); 1415 if (ret) 1416 pr_err("%s: Failed to start governor\n", __func__); 1417 } 1418 1419 goto unlock; 1420 } 1421 1422 if (IS_ENABLED(CONFIG_CPU_THERMAL) && 1423 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) { 1424 cpufreq_cooling_unregister(policy->cdev); 1425 policy->cdev = NULL; 1426 } 1427 1428 if (cpufreq_driver->stop_cpu) 1429 cpufreq_driver->stop_cpu(policy); 1430 1431 if (has_target()) 1432 cpufreq_exit_governor(policy); 1433 1434 /* 1435 * Perform the ->offline() during light-weight tear-down, as 1436 * that allows fast recovery when the CPU comes back. 1437 */ 1438 if (cpufreq_driver->offline) { 1439 cpufreq_driver->offline(policy); 1440 } else if (cpufreq_driver->exit) { 1441 cpufreq_driver->exit(policy); 1442 policy->freq_table = NULL; 1443 } 1444 1445 unlock: 1446 up_write(&policy->rwsem); 1447 return 0; 1448 } 1449 1450 /** 1451 * cpufreq_remove_dev - remove a CPU device 1452 * 1453 * Removes the cpufreq interface for a CPU device. 1454 */ 1455 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1456 { 1457 unsigned int cpu = dev->id; 1458 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1459 1460 if (!policy) 1461 return; 1462 1463 if (cpu_online(cpu)) 1464 cpufreq_offline(cpu); 1465 1466 cpumask_clear_cpu(cpu, policy->real_cpus); 1467 remove_cpu_dev_symlink(policy, dev); 1468 1469 if (cpumask_empty(policy->real_cpus)) { 1470 /* We did light-weight exit earlier, do full tear down now */ 1471 if (cpufreq_driver->offline) 1472 cpufreq_driver->exit(policy); 1473 1474 cpufreq_policy_free(policy); 1475 } 1476 } 1477 1478 /** 1479 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1480 * in deep trouble. 1481 * @policy: policy managing CPUs 1482 * @new_freq: CPU frequency the CPU actually runs at 1483 * 1484 * We adjust to current frequency first, and need to clean up later. 1485 * So either call to cpufreq_update_policy() or schedule handle_update()). 1486 */ 1487 static void cpufreq_out_of_sync(struct cpufreq_policy *policy, 1488 unsigned int new_freq) 1489 { 1490 struct cpufreq_freqs freqs; 1491 1492 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1493 policy->cur, new_freq); 1494 1495 freqs.old = policy->cur; 1496 freqs.new = new_freq; 1497 1498 cpufreq_freq_transition_begin(policy, &freqs); 1499 cpufreq_freq_transition_end(policy, &freqs, 0); 1500 } 1501 1502 /** 1503 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 1504 * @cpu: CPU number 1505 * 1506 * This is the last known freq, without actually getting it from the driver. 1507 * Return value will be same as what is shown in scaling_cur_freq in sysfs. 1508 */ 1509 unsigned int cpufreq_quick_get(unsigned int cpu) 1510 { 1511 struct cpufreq_policy *policy; 1512 unsigned int ret_freq = 0; 1513 unsigned long flags; 1514 1515 read_lock_irqsave(&cpufreq_driver_lock, flags); 1516 1517 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { 1518 ret_freq = cpufreq_driver->get(cpu); 1519 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1520 return ret_freq; 1521 } 1522 1523 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1524 1525 policy = cpufreq_cpu_get(cpu); 1526 if (policy) { 1527 ret_freq = policy->cur; 1528 cpufreq_cpu_put(policy); 1529 } 1530 1531 return ret_freq; 1532 } 1533 EXPORT_SYMBOL(cpufreq_quick_get); 1534 1535 /** 1536 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 1537 * @cpu: CPU number 1538 * 1539 * Just return the max possible frequency for a given CPU. 1540 */ 1541 unsigned int cpufreq_quick_get_max(unsigned int cpu) 1542 { 1543 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1544 unsigned int ret_freq = 0; 1545 1546 if (policy) { 1547 ret_freq = policy->max; 1548 cpufreq_cpu_put(policy); 1549 } 1550 1551 return ret_freq; 1552 } 1553 EXPORT_SYMBOL(cpufreq_quick_get_max); 1554 1555 static unsigned int __cpufreq_get(struct cpufreq_policy *policy) 1556 { 1557 unsigned int ret_freq = 0; 1558 1559 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) 1560 return ret_freq; 1561 1562 ret_freq = cpufreq_driver->get(policy->cpu); 1563 1564 /* 1565 * If fast frequency switching is used with the given policy, the check 1566 * against policy->cur is pointless, so skip it in that case too. 1567 */ 1568 if (policy->fast_switch_enabled) 1569 return ret_freq; 1570 1571 if (ret_freq && policy->cur && 1572 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1573 /* verify no discrepancy between actual and 1574 saved value exists */ 1575 if (unlikely(ret_freq != policy->cur)) { 1576 cpufreq_out_of_sync(policy, ret_freq); 1577 schedule_work(&policy->update); 1578 } 1579 } 1580 1581 return ret_freq; 1582 } 1583 1584 /** 1585 * cpufreq_get - get the current CPU frequency (in kHz) 1586 * @cpu: CPU number 1587 * 1588 * Get the CPU current (static) CPU frequency 1589 */ 1590 unsigned int cpufreq_get(unsigned int cpu) 1591 { 1592 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1593 unsigned int ret_freq = 0; 1594 1595 if (policy) { 1596 down_read(&policy->rwsem); 1597 ret_freq = __cpufreq_get(policy); 1598 up_read(&policy->rwsem); 1599 1600 cpufreq_cpu_put(policy); 1601 } 1602 1603 return ret_freq; 1604 } 1605 EXPORT_SYMBOL(cpufreq_get); 1606 1607 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy) 1608 { 1609 unsigned int new_freq; 1610 1611 new_freq = cpufreq_driver->get(policy->cpu); 1612 if (!new_freq) 1613 return 0; 1614 1615 if (!policy->cur) { 1616 pr_debug("cpufreq: Driver did not initialize current freq\n"); 1617 policy->cur = new_freq; 1618 } else if (policy->cur != new_freq && has_target()) { 1619 cpufreq_out_of_sync(policy, new_freq); 1620 } 1621 1622 return new_freq; 1623 } 1624 1625 static struct subsys_interface cpufreq_interface = { 1626 .name = "cpufreq", 1627 .subsys = &cpu_subsys, 1628 .add_dev = cpufreq_add_dev, 1629 .remove_dev = cpufreq_remove_dev, 1630 }; 1631 1632 /* 1633 * In case platform wants some specific frequency to be configured 1634 * during suspend.. 1635 */ 1636 int cpufreq_generic_suspend(struct cpufreq_policy *policy) 1637 { 1638 int ret; 1639 1640 if (!policy->suspend_freq) { 1641 pr_debug("%s: suspend_freq not defined\n", __func__); 1642 return 0; 1643 } 1644 1645 pr_debug("%s: Setting suspend-freq: %u\n", __func__, 1646 policy->suspend_freq); 1647 1648 ret = __cpufreq_driver_target(policy, policy->suspend_freq, 1649 CPUFREQ_RELATION_H); 1650 if (ret) 1651 pr_err("%s: unable to set suspend-freq: %u. err: %d\n", 1652 __func__, policy->suspend_freq, ret); 1653 1654 return ret; 1655 } 1656 EXPORT_SYMBOL(cpufreq_generic_suspend); 1657 1658 /** 1659 * cpufreq_suspend() - Suspend CPUFreq governors 1660 * 1661 * Called during system wide Suspend/Hibernate cycles for suspending governors 1662 * as some platforms can't change frequency after this point in suspend cycle. 1663 * Because some of the devices (like: i2c, regulators, etc) they use for 1664 * changing frequency are suspended quickly after this point. 1665 */ 1666 void cpufreq_suspend(void) 1667 { 1668 struct cpufreq_policy *policy; 1669 1670 if (!cpufreq_driver) 1671 return; 1672 1673 if (!has_target() && !cpufreq_driver->suspend) 1674 goto suspend; 1675 1676 pr_debug("%s: Suspending Governors\n", __func__); 1677 1678 for_each_active_policy(policy) { 1679 if (has_target()) { 1680 down_write(&policy->rwsem); 1681 cpufreq_stop_governor(policy); 1682 up_write(&policy->rwsem); 1683 } 1684 1685 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) 1686 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1687 policy); 1688 } 1689 1690 suspend: 1691 cpufreq_suspended = true; 1692 } 1693 1694 /** 1695 * cpufreq_resume() - Resume CPUFreq governors 1696 * 1697 * Called during system wide Suspend/Hibernate cycle for resuming governors that 1698 * are suspended with cpufreq_suspend(). 1699 */ 1700 void cpufreq_resume(void) 1701 { 1702 struct cpufreq_policy *policy; 1703 int ret; 1704 1705 if (!cpufreq_driver) 1706 return; 1707 1708 if (unlikely(!cpufreq_suspended)) 1709 return; 1710 1711 cpufreq_suspended = false; 1712 1713 if (!has_target() && !cpufreq_driver->resume) 1714 return; 1715 1716 pr_debug("%s: Resuming Governors\n", __func__); 1717 1718 for_each_active_policy(policy) { 1719 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { 1720 pr_err("%s: Failed to resume driver: %p\n", __func__, 1721 policy); 1722 } else if (has_target()) { 1723 down_write(&policy->rwsem); 1724 ret = cpufreq_start_governor(policy); 1725 up_write(&policy->rwsem); 1726 1727 if (ret) 1728 pr_err("%s: Failed to start governor for policy: %p\n", 1729 __func__, policy); 1730 } 1731 } 1732 } 1733 1734 /** 1735 * cpufreq_get_current_driver - return current driver's name 1736 * 1737 * Return the name string of the currently loaded cpufreq driver 1738 * or NULL, if none. 1739 */ 1740 const char *cpufreq_get_current_driver(void) 1741 { 1742 if (cpufreq_driver) 1743 return cpufreq_driver->name; 1744 1745 return NULL; 1746 } 1747 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 1748 1749 /** 1750 * cpufreq_get_driver_data - return current driver data 1751 * 1752 * Return the private data of the currently loaded cpufreq 1753 * driver, or NULL if no cpufreq driver is loaded. 1754 */ 1755 void *cpufreq_get_driver_data(void) 1756 { 1757 if (cpufreq_driver) 1758 return cpufreq_driver->driver_data; 1759 1760 return NULL; 1761 } 1762 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); 1763 1764 /********************************************************************* 1765 * NOTIFIER LISTS INTERFACE * 1766 *********************************************************************/ 1767 1768 /** 1769 * cpufreq_register_notifier - register a driver with cpufreq 1770 * @nb: notifier function to register 1771 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1772 * 1773 * Add a driver to one of two lists: either a list of drivers that 1774 * are notified about clock rate changes (once before and once after 1775 * the transition), or a list of drivers that are notified about 1776 * changes in cpufreq policy. 1777 * 1778 * This function may sleep, and has the same return conditions as 1779 * blocking_notifier_chain_register. 1780 */ 1781 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 1782 { 1783 int ret; 1784 1785 if (cpufreq_disabled()) 1786 return -EINVAL; 1787 1788 switch (list) { 1789 case CPUFREQ_TRANSITION_NOTIFIER: 1790 mutex_lock(&cpufreq_fast_switch_lock); 1791 1792 if (cpufreq_fast_switch_count > 0) { 1793 mutex_unlock(&cpufreq_fast_switch_lock); 1794 return -EBUSY; 1795 } 1796 ret = srcu_notifier_chain_register( 1797 &cpufreq_transition_notifier_list, nb); 1798 if (!ret) 1799 cpufreq_fast_switch_count--; 1800 1801 mutex_unlock(&cpufreq_fast_switch_lock); 1802 break; 1803 case CPUFREQ_POLICY_NOTIFIER: 1804 ret = blocking_notifier_chain_register( 1805 &cpufreq_policy_notifier_list, nb); 1806 break; 1807 default: 1808 ret = -EINVAL; 1809 } 1810 1811 return ret; 1812 } 1813 EXPORT_SYMBOL(cpufreq_register_notifier); 1814 1815 /** 1816 * cpufreq_unregister_notifier - unregister a driver with cpufreq 1817 * @nb: notifier block to be unregistered 1818 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1819 * 1820 * Remove a driver from the CPU frequency notifier list. 1821 * 1822 * This function may sleep, and has the same return conditions as 1823 * blocking_notifier_chain_unregister. 1824 */ 1825 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 1826 { 1827 int ret; 1828 1829 if (cpufreq_disabled()) 1830 return -EINVAL; 1831 1832 switch (list) { 1833 case CPUFREQ_TRANSITION_NOTIFIER: 1834 mutex_lock(&cpufreq_fast_switch_lock); 1835 1836 ret = srcu_notifier_chain_unregister( 1837 &cpufreq_transition_notifier_list, nb); 1838 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0)) 1839 cpufreq_fast_switch_count++; 1840 1841 mutex_unlock(&cpufreq_fast_switch_lock); 1842 break; 1843 case CPUFREQ_POLICY_NOTIFIER: 1844 ret = blocking_notifier_chain_unregister( 1845 &cpufreq_policy_notifier_list, nb); 1846 break; 1847 default: 1848 ret = -EINVAL; 1849 } 1850 1851 return ret; 1852 } 1853 EXPORT_SYMBOL(cpufreq_unregister_notifier); 1854 1855 1856 /********************************************************************* 1857 * GOVERNORS * 1858 *********************************************************************/ 1859 1860 /** 1861 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch. 1862 * @policy: cpufreq policy to switch the frequency for. 1863 * @target_freq: New frequency to set (may be approximate). 1864 * 1865 * Carry out a fast frequency switch without sleeping. 1866 * 1867 * The driver's ->fast_switch() callback invoked by this function must be 1868 * suitable for being called from within RCU-sched read-side critical sections 1869 * and it is expected to select the minimum available frequency greater than or 1870 * equal to @target_freq (CPUFREQ_RELATION_L). 1871 * 1872 * This function must not be called if policy->fast_switch_enabled is unset. 1873 * 1874 * Governors calling this function must guarantee that it will never be invoked 1875 * twice in parallel for the same policy and that it will never be called in 1876 * parallel with either ->target() or ->target_index() for the same policy. 1877 * 1878 * Returns the actual frequency set for the CPU. 1879 * 1880 * If 0 is returned by the driver's ->fast_switch() callback to indicate an 1881 * error condition, the hardware configuration must be preserved. 1882 */ 1883 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, 1884 unsigned int target_freq) 1885 { 1886 target_freq = clamp_val(target_freq, policy->min, policy->max); 1887 1888 return cpufreq_driver->fast_switch(policy, target_freq); 1889 } 1890 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); 1891 1892 /* Must set freqs->new to intermediate frequency */ 1893 static int __target_intermediate(struct cpufreq_policy *policy, 1894 struct cpufreq_freqs *freqs, int index) 1895 { 1896 int ret; 1897 1898 freqs->new = cpufreq_driver->get_intermediate(policy, index); 1899 1900 /* We don't need to switch to intermediate freq */ 1901 if (!freqs->new) 1902 return 0; 1903 1904 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", 1905 __func__, policy->cpu, freqs->old, freqs->new); 1906 1907 cpufreq_freq_transition_begin(policy, freqs); 1908 ret = cpufreq_driver->target_intermediate(policy, index); 1909 cpufreq_freq_transition_end(policy, freqs, ret); 1910 1911 if (ret) 1912 pr_err("%s: Failed to change to intermediate frequency: %d\n", 1913 __func__, ret); 1914 1915 return ret; 1916 } 1917 1918 static int __target_index(struct cpufreq_policy *policy, int index) 1919 { 1920 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; 1921 unsigned int intermediate_freq = 0; 1922 unsigned int newfreq = policy->freq_table[index].frequency; 1923 int retval = -EINVAL; 1924 bool notify; 1925 1926 if (newfreq == policy->cur) 1927 return 0; 1928 1929 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 1930 if (notify) { 1931 /* Handle switching to intermediate frequency */ 1932 if (cpufreq_driver->get_intermediate) { 1933 retval = __target_intermediate(policy, &freqs, index); 1934 if (retval) 1935 return retval; 1936 1937 intermediate_freq = freqs.new; 1938 /* Set old freq to intermediate */ 1939 if (intermediate_freq) 1940 freqs.old = freqs.new; 1941 } 1942 1943 freqs.new = newfreq; 1944 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 1945 __func__, policy->cpu, freqs.old, freqs.new); 1946 1947 cpufreq_freq_transition_begin(policy, &freqs); 1948 } 1949 1950 retval = cpufreq_driver->target_index(policy, index); 1951 if (retval) 1952 pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 1953 retval); 1954 1955 if (notify) { 1956 cpufreq_freq_transition_end(policy, &freqs, retval); 1957 1958 /* 1959 * Failed after setting to intermediate freq? Driver should have 1960 * reverted back to initial frequency and so should we. Check 1961 * here for intermediate_freq instead of get_intermediate, in 1962 * case we haven't switched to intermediate freq at all. 1963 */ 1964 if (unlikely(retval && intermediate_freq)) { 1965 freqs.old = intermediate_freq; 1966 freqs.new = policy->restore_freq; 1967 cpufreq_freq_transition_begin(policy, &freqs); 1968 cpufreq_freq_transition_end(policy, &freqs, 0); 1969 } 1970 } 1971 1972 return retval; 1973 } 1974 1975 int __cpufreq_driver_target(struct cpufreq_policy *policy, 1976 unsigned int target_freq, 1977 unsigned int relation) 1978 { 1979 unsigned int old_target_freq = target_freq; 1980 int index; 1981 1982 if (cpufreq_disabled()) 1983 return -ENODEV; 1984 1985 /* Make sure that target_freq is within supported range */ 1986 target_freq = clamp_val(target_freq, policy->min, policy->max); 1987 1988 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 1989 policy->cpu, target_freq, relation, old_target_freq); 1990 1991 /* 1992 * This might look like a redundant call as we are checking it again 1993 * after finding index. But it is left intentionally for cases where 1994 * exactly same freq is called again and so we can save on few function 1995 * calls. 1996 */ 1997 if (target_freq == policy->cur) 1998 return 0; 1999 2000 /* Save last value to restore later on errors */ 2001 policy->restore_freq = policy->cur; 2002 2003 if (cpufreq_driver->target) 2004 return cpufreq_driver->target(policy, target_freq, relation); 2005 2006 if (!cpufreq_driver->target_index) 2007 return -EINVAL; 2008 2009 index = cpufreq_frequency_table_target(policy, target_freq, relation); 2010 2011 return __target_index(policy, index); 2012 } 2013 EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 2014 2015 int cpufreq_driver_target(struct cpufreq_policy *policy, 2016 unsigned int target_freq, 2017 unsigned int relation) 2018 { 2019 int ret = -EINVAL; 2020 2021 down_write(&policy->rwsem); 2022 2023 ret = __cpufreq_driver_target(policy, target_freq, relation); 2024 2025 up_write(&policy->rwsem); 2026 2027 return ret; 2028 } 2029 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 2030 2031 __weak struct cpufreq_governor *cpufreq_fallback_governor(void) 2032 { 2033 return NULL; 2034 } 2035 2036 static int cpufreq_init_governor(struct cpufreq_policy *policy) 2037 { 2038 int ret; 2039 2040 /* Don't start any governor operations if we are entering suspend */ 2041 if (cpufreq_suspended) 2042 return 0; 2043 /* 2044 * Governor might not be initiated here if ACPI _PPC changed 2045 * notification happened, so check it. 2046 */ 2047 if (!policy->governor) 2048 return -EINVAL; 2049 2050 /* Platform doesn't want dynamic frequency switching ? */ 2051 if (policy->governor->dynamic_switching && 2052 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { 2053 struct cpufreq_governor *gov = cpufreq_fallback_governor(); 2054 2055 if (gov) { 2056 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n", 2057 policy->governor->name, gov->name); 2058 policy->governor = gov; 2059 } else { 2060 return -EINVAL; 2061 } 2062 } 2063 2064 if (!try_module_get(policy->governor->owner)) 2065 return -EINVAL; 2066 2067 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2068 2069 if (policy->governor->init) { 2070 ret = policy->governor->init(policy); 2071 if (ret) { 2072 module_put(policy->governor->owner); 2073 return ret; 2074 } 2075 } 2076 2077 return 0; 2078 } 2079 2080 static void cpufreq_exit_governor(struct cpufreq_policy *policy) 2081 { 2082 if (cpufreq_suspended || !policy->governor) 2083 return; 2084 2085 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2086 2087 if (policy->governor->exit) 2088 policy->governor->exit(policy); 2089 2090 module_put(policy->governor->owner); 2091 } 2092 2093 static int cpufreq_start_governor(struct cpufreq_policy *policy) 2094 { 2095 int ret; 2096 2097 if (cpufreq_suspended) 2098 return 0; 2099 2100 if (!policy->governor) 2101 return -EINVAL; 2102 2103 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2104 2105 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) 2106 cpufreq_update_current_freq(policy); 2107 2108 if (policy->governor->start) { 2109 ret = policy->governor->start(policy); 2110 if (ret) 2111 return ret; 2112 } 2113 2114 if (policy->governor->limits) 2115 policy->governor->limits(policy); 2116 2117 return 0; 2118 } 2119 2120 static void cpufreq_stop_governor(struct cpufreq_policy *policy) 2121 { 2122 if (cpufreq_suspended || !policy->governor) 2123 return; 2124 2125 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2126 2127 if (policy->governor->stop) 2128 policy->governor->stop(policy); 2129 } 2130 2131 static void cpufreq_governor_limits(struct cpufreq_policy *policy) 2132 { 2133 if (cpufreq_suspended || !policy->governor) 2134 return; 2135 2136 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2137 2138 if (policy->governor->limits) 2139 policy->governor->limits(policy); 2140 } 2141 2142 int cpufreq_register_governor(struct cpufreq_governor *governor) 2143 { 2144 int err; 2145 2146 if (!governor) 2147 return -EINVAL; 2148 2149 if (cpufreq_disabled()) 2150 return -ENODEV; 2151 2152 mutex_lock(&cpufreq_governor_mutex); 2153 2154 err = -EBUSY; 2155 if (!find_governor(governor->name)) { 2156 err = 0; 2157 list_add(&governor->governor_list, &cpufreq_governor_list); 2158 } 2159 2160 mutex_unlock(&cpufreq_governor_mutex); 2161 return err; 2162 } 2163 EXPORT_SYMBOL_GPL(cpufreq_register_governor); 2164 2165 void cpufreq_unregister_governor(struct cpufreq_governor *governor) 2166 { 2167 struct cpufreq_policy *policy; 2168 unsigned long flags; 2169 2170 if (!governor) 2171 return; 2172 2173 if (cpufreq_disabled()) 2174 return; 2175 2176 /* clear last_governor for all inactive policies */ 2177 read_lock_irqsave(&cpufreq_driver_lock, flags); 2178 for_each_inactive_policy(policy) { 2179 if (!strcmp(policy->last_governor, governor->name)) { 2180 policy->governor = NULL; 2181 strcpy(policy->last_governor, "\0"); 2182 } 2183 } 2184 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 2185 2186 mutex_lock(&cpufreq_governor_mutex); 2187 list_del(&governor->governor_list); 2188 mutex_unlock(&cpufreq_governor_mutex); 2189 } 2190 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 2191 2192 2193 /********************************************************************* 2194 * POLICY INTERFACE * 2195 *********************************************************************/ 2196 2197 /** 2198 * cpufreq_get_policy - get the current cpufreq_policy 2199 * @policy: struct cpufreq_policy into which the current cpufreq_policy 2200 * is written 2201 * 2202 * Reads the current cpufreq policy. 2203 */ 2204 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 2205 { 2206 struct cpufreq_policy *cpu_policy; 2207 if (!policy) 2208 return -EINVAL; 2209 2210 cpu_policy = cpufreq_cpu_get(cpu); 2211 if (!cpu_policy) 2212 return -EINVAL; 2213 2214 memcpy(policy, cpu_policy, sizeof(*policy)); 2215 2216 cpufreq_cpu_put(cpu_policy); 2217 return 0; 2218 } 2219 EXPORT_SYMBOL(cpufreq_get_policy); 2220 2221 /** 2222 * cpufreq_set_policy - Modify cpufreq policy parameters. 2223 * @policy: Policy object to modify. 2224 * @new_policy: New policy data. 2225 * 2226 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the 2227 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to 2228 * the driver's ->verify() callback again and run the notifiers for it again 2229 * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters 2230 * of @new_policy to @policy and either invoke the driver's ->setpolicy() 2231 * callback (if present) or carry out a governor update for @policy. That is, 2232 * run the current governor's ->limits() callback (if the governor field in 2233 * @new_policy points to the same object as the one in @policy) or replace the 2234 * governor for @policy with the new one stored in @new_policy. 2235 * 2236 * The cpuinfo part of @policy is not updated by this function. 2237 */ 2238 static int cpufreq_set_policy(struct cpufreq_policy *policy, 2239 struct cpufreq_policy *new_policy) 2240 { 2241 struct cpufreq_governor *old_gov; 2242 int ret; 2243 2244 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", 2245 new_policy->cpu, new_policy->min, new_policy->max); 2246 2247 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); 2248 2249 /* 2250 * This check works well when we store new min/max freq attributes, 2251 * because new_policy is a copy of policy with one field updated. 2252 */ 2253 if (new_policy->min > new_policy->max) 2254 return -EINVAL; 2255 2256 /* verify the cpu speed can be set within this limit */ 2257 ret = cpufreq_driver->verify(new_policy); 2258 if (ret) 2259 return ret; 2260 2261 /* adjust if necessary - all reasons */ 2262 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 2263 CPUFREQ_ADJUST, new_policy); 2264 2265 /* 2266 * verify the cpu speed can be set within this limit, which might be 2267 * different to the first one 2268 */ 2269 ret = cpufreq_driver->verify(new_policy); 2270 if (ret) 2271 return ret; 2272 2273 /* notification of the new policy */ 2274 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 2275 CPUFREQ_NOTIFY, new_policy); 2276 2277 policy->min = new_policy->min; 2278 policy->max = new_policy->max; 2279 trace_cpu_frequency_limits(policy); 2280 2281 policy->cached_target_freq = UINT_MAX; 2282 2283 pr_debug("new min and max freqs are %u - %u kHz\n", 2284 policy->min, policy->max); 2285 2286 if (cpufreq_driver->setpolicy) { 2287 policy->policy = new_policy->policy; 2288 pr_debug("setting range\n"); 2289 return cpufreq_driver->setpolicy(new_policy); 2290 } 2291 2292 if (new_policy->governor == policy->governor) { 2293 pr_debug("governor limits update\n"); 2294 cpufreq_governor_limits(policy); 2295 return 0; 2296 } 2297 2298 pr_debug("governor switch\n"); 2299 2300 /* save old, working values */ 2301 old_gov = policy->governor; 2302 /* end old governor */ 2303 if (old_gov) { 2304 cpufreq_stop_governor(policy); 2305 cpufreq_exit_governor(policy); 2306 } 2307 2308 /* start new governor */ 2309 policy->governor = new_policy->governor; 2310 ret = cpufreq_init_governor(policy); 2311 if (!ret) { 2312 ret = cpufreq_start_governor(policy); 2313 if (!ret) { 2314 pr_debug("governor change\n"); 2315 sched_cpufreq_governor_change(policy, old_gov); 2316 return 0; 2317 } 2318 cpufreq_exit_governor(policy); 2319 } 2320 2321 /* new governor failed, so re-start old one */ 2322 pr_debug("starting governor %s failed\n", policy->governor->name); 2323 if (old_gov) { 2324 policy->governor = old_gov; 2325 if (cpufreq_init_governor(policy)) 2326 policy->governor = NULL; 2327 else 2328 cpufreq_start_governor(policy); 2329 } 2330 2331 return ret; 2332 } 2333 2334 /** 2335 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. 2336 * @cpu: CPU to re-evaluate the policy for. 2337 * 2338 * Update the current frequency for the cpufreq policy of @cpu and use 2339 * cpufreq_set_policy() to re-apply the min and max limits saved in the 2340 * user_policy sub-structure of that policy, which triggers the evaluation 2341 * of policy notifiers and the cpufreq driver's ->verify() callback for the 2342 * policy in question, among other things. 2343 */ 2344 void cpufreq_update_policy(unsigned int cpu) 2345 { 2346 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 2347 struct cpufreq_policy new_policy; 2348 2349 if (!policy) 2350 return; 2351 2352 down_write(&policy->rwsem); 2353 2354 if (policy_is_inactive(policy)) 2355 goto unlock; 2356 2357 /* 2358 * BIOS might change freq behind our back 2359 * -> ask driver for current freq and notify governors about a change 2360 */ 2361 if (cpufreq_driver->get && !cpufreq_driver->setpolicy && 2362 (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy)))) 2363 goto unlock; 2364 2365 pr_debug("updating policy for CPU %u\n", cpu); 2366 memcpy(&new_policy, policy, sizeof(*policy)); 2367 new_policy.min = policy->user_policy.min; 2368 new_policy.max = policy->user_policy.max; 2369 2370 cpufreq_set_policy(policy, &new_policy); 2371 2372 unlock: 2373 up_write(&policy->rwsem); 2374 2375 cpufreq_cpu_put(policy); 2376 } 2377 EXPORT_SYMBOL(cpufreq_update_policy); 2378 2379 /********************************************************************* 2380 * BOOST * 2381 *********************************************************************/ 2382 static int cpufreq_boost_set_sw(int state) 2383 { 2384 struct cpufreq_policy *policy; 2385 int ret = -EINVAL; 2386 2387 for_each_active_policy(policy) { 2388 if (!policy->freq_table) 2389 continue; 2390 2391 ret = cpufreq_frequency_table_cpuinfo(policy, 2392 policy->freq_table); 2393 if (ret) { 2394 pr_err("%s: Policy frequency update failed\n", 2395 __func__); 2396 break; 2397 } 2398 2399 down_write(&policy->rwsem); 2400 policy->user_policy.max = policy->max; 2401 cpufreq_governor_limits(policy); 2402 up_write(&policy->rwsem); 2403 } 2404 2405 return ret; 2406 } 2407 2408 int cpufreq_boost_trigger_state(int state) 2409 { 2410 unsigned long flags; 2411 int ret = 0; 2412 2413 if (cpufreq_driver->boost_enabled == state) 2414 return 0; 2415 2416 write_lock_irqsave(&cpufreq_driver_lock, flags); 2417 cpufreq_driver->boost_enabled = state; 2418 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2419 2420 ret = cpufreq_driver->set_boost(state); 2421 if (ret) { 2422 write_lock_irqsave(&cpufreq_driver_lock, flags); 2423 cpufreq_driver->boost_enabled = !state; 2424 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2425 2426 pr_err("%s: Cannot %s BOOST\n", 2427 __func__, state ? "enable" : "disable"); 2428 } 2429 2430 return ret; 2431 } 2432 2433 static bool cpufreq_boost_supported(void) 2434 { 2435 return likely(cpufreq_driver) && cpufreq_driver->set_boost; 2436 } 2437 2438 static int create_boost_sysfs_file(void) 2439 { 2440 int ret; 2441 2442 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr); 2443 if (ret) 2444 pr_err("%s: cannot register global BOOST sysfs file\n", 2445 __func__); 2446 2447 return ret; 2448 } 2449 2450 static void remove_boost_sysfs_file(void) 2451 { 2452 if (cpufreq_boost_supported()) 2453 sysfs_remove_file(cpufreq_global_kobject, &boost.attr); 2454 } 2455 2456 int cpufreq_enable_boost_support(void) 2457 { 2458 if (!cpufreq_driver) 2459 return -EINVAL; 2460 2461 if (cpufreq_boost_supported()) 2462 return 0; 2463 2464 cpufreq_driver->set_boost = cpufreq_boost_set_sw; 2465 2466 /* This will get removed on driver unregister */ 2467 return create_boost_sysfs_file(); 2468 } 2469 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support); 2470 2471 int cpufreq_boost_enabled(void) 2472 { 2473 return cpufreq_driver->boost_enabled; 2474 } 2475 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); 2476 2477 /********************************************************************* 2478 * REGISTER / UNREGISTER CPUFREQ DRIVER * 2479 *********************************************************************/ 2480 static enum cpuhp_state hp_online; 2481 2482 static int cpuhp_cpufreq_online(unsigned int cpu) 2483 { 2484 cpufreq_online(cpu); 2485 2486 return 0; 2487 } 2488 2489 static int cpuhp_cpufreq_offline(unsigned int cpu) 2490 { 2491 cpufreq_offline(cpu); 2492 2493 return 0; 2494 } 2495 2496 /** 2497 * cpufreq_register_driver - register a CPU Frequency driver 2498 * @driver_data: A struct cpufreq_driver containing the values# 2499 * submitted by the CPU Frequency driver. 2500 * 2501 * Registers a CPU Frequency driver to this core code. This code 2502 * returns zero on success, -EEXIST when another driver got here first 2503 * (and isn't unregistered in the meantime). 2504 * 2505 */ 2506 int cpufreq_register_driver(struct cpufreq_driver *driver_data) 2507 { 2508 unsigned long flags; 2509 int ret; 2510 2511 if (cpufreq_disabled()) 2512 return -ENODEV; 2513 2514 if (!driver_data || !driver_data->verify || !driver_data->init || 2515 !(driver_data->setpolicy || driver_data->target_index || 2516 driver_data->target) || 2517 (driver_data->setpolicy && (driver_data->target_index || 2518 driver_data->target)) || 2519 (!driver_data->get_intermediate != !driver_data->target_intermediate) || 2520 (!driver_data->online != !driver_data->offline)) 2521 return -EINVAL; 2522 2523 pr_debug("trying to register driver %s\n", driver_data->name); 2524 2525 /* Protect against concurrent CPU online/offline. */ 2526 cpus_read_lock(); 2527 2528 write_lock_irqsave(&cpufreq_driver_lock, flags); 2529 if (cpufreq_driver) { 2530 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2531 ret = -EEXIST; 2532 goto out; 2533 } 2534 cpufreq_driver = driver_data; 2535 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2536 2537 if (driver_data->setpolicy) 2538 driver_data->flags |= CPUFREQ_CONST_LOOPS; 2539 2540 if (cpufreq_boost_supported()) { 2541 ret = create_boost_sysfs_file(); 2542 if (ret) 2543 goto err_null_driver; 2544 } 2545 2546 ret = subsys_interface_register(&cpufreq_interface); 2547 if (ret) 2548 goto err_boost_unreg; 2549 2550 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2551 list_empty(&cpufreq_policy_list)) { 2552 /* if all ->init() calls failed, unregister */ 2553 ret = -ENODEV; 2554 pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2555 driver_data->name); 2556 goto err_if_unreg; 2557 } 2558 2559 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 2560 "cpufreq:online", 2561 cpuhp_cpufreq_online, 2562 cpuhp_cpufreq_offline); 2563 if (ret < 0) 2564 goto err_if_unreg; 2565 hp_online = ret; 2566 ret = 0; 2567 2568 pr_debug("driver %s up and running\n", driver_data->name); 2569 goto out; 2570 2571 err_if_unreg: 2572 subsys_interface_unregister(&cpufreq_interface); 2573 err_boost_unreg: 2574 remove_boost_sysfs_file(); 2575 err_null_driver: 2576 write_lock_irqsave(&cpufreq_driver_lock, flags); 2577 cpufreq_driver = NULL; 2578 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2579 out: 2580 cpus_read_unlock(); 2581 return ret; 2582 } 2583 EXPORT_SYMBOL_GPL(cpufreq_register_driver); 2584 2585 /** 2586 * cpufreq_unregister_driver - unregister the current CPUFreq driver 2587 * 2588 * Unregister the current CPUFreq driver. Only call this if you have 2589 * the right to do so, i.e. if you have succeeded in initialising before! 2590 * Returns zero if successful, and -EINVAL if the cpufreq_driver is 2591 * currently not initialised. 2592 */ 2593 int cpufreq_unregister_driver(struct cpufreq_driver *driver) 2594 { 2595 unsigned long flags; 2596 2597 if (!cpufreq_driver || (driver != cpufreq_driver)) 2598 return -EINVAL; 2599 2600 pr_debug("unregistering driver %s\n", driver->name); 2601 2602 /* Protect against concurrent cpu hotplug */ 2603 cpus_read_lock(); 2604 subsys_interface_unregister(&cpufreq_interface); 2605 remove_boost_sysfs_file(); 2606 cpuhp_remove_state_nocalls_cpuslocked(hp_online); 2607 2608 write_lock_irqsave(&cpufreq_driver_lock, flags); 2609 2610 cpufreq_driver = NULL; 2611 2612 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2613 cpus_read_unlock(); 2614 2615 return 0; 2616 } 2617 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 2618 2619 /* 2620 * Stop cpufreq at shutdown to make sure it isn't holding any locks 2621 * or mutexes when secondary CPUs are halted. 2622 */ 2623 static struct syscore_ops cpufreq_syscore_ops = { 2624 .shutdown = cpufreq_suspend, 2625 }; 2626 2627 struct kobject *cpufreq_global_kobject; 2628 EXPORT_SYMBOL(cpufreq_global_kobject); 2629 2630 static int __init cpufreq_core_init(void) 2631 { 2632 if (cpufreq_disabled()) 2633 return -ENODEV; 2634 2635 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); 2636 BUG_ON(!cpufreq_global_kobject); 2637 2638 register_syscore_ops(&cpufreq_syscore_ops); 2639 2640 return 0; 2641 } 2642 module_param(off, int, 0444); 2643 core_initcall(cpufreq_core_init); 2644