1 /* 2 * linux/drivers/cpufreq/cpufreq.c 3 * 4 * Copyright (C) 2001 Russell King 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 6 * 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 8 * Added handling for CPU hotplug 9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 10 * Fix handling for CPU hotplug -- affected CPUs 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 * 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/notifier.h> 22 #include <linux/cpufreq.h> 23 #include <linux/delay.h> 24 #include <linux/interrupt.h> 25 #include <linux/spinlock.h> 26 #include <linux/device.h> 27 #include <linux/slab.h> 28 #include <linux/cpu.h> 29 #include <linux/completion.h> 30 #include <linux/mutex.h> 31 32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 33 34 /** 35 * The "cpufreq driver" - the arch- or hardware-dependent low 36 * level driver of CPUFreq support, and its spinlock. This lock 37 * also protects the cpufreq_cpu_data array. 38 */ 39 static struct cpufreq_driver *cpufreq_driver; 40 static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; 41 static DEFINE_SPINLOCK(cpufreq_driver_lock); 42 43 /* internal prototypes */ 44 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 45 static void handle_update(struct work_struct *work); 46 47 /** 48 * Two notifier lists: the "policy" list is involved in the 49 * validation process for a new CPU frequency policy; the 50 * "transition" list for kernel code that needs to handle 51 * changes to devices when the CPU clock speed changes. 52 * The mutex locks both lists. 53 */ 54 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 55 static struct srcu_notifier_head cpufreq_transition_notifier_list; 56 57 static int __init init_cpufreq_transition_notifier_list(void) 58 { 59 srcu_init_notifier_head(&cpufreq_transition_notifier_list); 60 return 0; 61 } 62 pure_initcall(init_cpufreq_transition_notifier_list); 63 64 static LIST_HEAD(cpufreq_governor_list); 65 static DEFINE_MUTEX (cpufreq_governor_mutex); 66 67 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 68 { 69 struct cpufreq_policy *data; 70 unsigned long flags; 71 72 if (cpu >= NR_CPUS) 73 goto err_out; 74 75 /* get the cpufreq driver */ 76 spin_lock_irqsave(&cpufreq_driver_lock, flags); 77 78 if (!cpufreq_driver) 79 goto err_out_unlock; 80 81 if (!try_module_get(cpufreq_driver->owner)) 82 goto err_out_unlock; 83 84 85 /* get the CPU */ 86 data = cpufreq_cpu_data[cpu]; 87 88 if (!data) 89 goto err_out_put_module; 90 91 if (!kobject_get(&data->kobj)) 92 goto err_out_put_module; 93 94 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 95 return data; 96 97 err_out_put_module: 98 module_put(cpufreq_driver->owner); 99 err_out_unlock: 100 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 101 err_out: 102 return NULL; 103 } 104 EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 105 106 107 void cpufreq_cpu_put(struct cpufreq_policy *data) 108 { 109 kobject_put(&data->kobj); 110 module_put(cpufreq_driver->owner); 111 } 112 EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 113 114 115 /********************************************************************* 116 * UNIFIED DEBUG HELPERS * 117 *********************************************************************/ 118 #ifdef CONFIG_CPU_FREQ_DEBUG 119 120 /* what part(s) of the CPUfreq subsystem are debugged? */ 121 static unsigned int debug; 122 123 /* is the debug output ratelimit'ed using printk_ratelimit? User can 124 * set or modify this value. 125 */ 126 static unsigned int debug_ratelimit = 1; 127 128 /* is the printk_ratelimit'ing enabled? It's enabled after a successful 129 * loading of a cpufreq driver, temporarily disabled when a new policy 130 * is set, and disabled upon cpufreq driver removal 131 */ 132 static unsigned int disable_ratelimit = 1; 133 static DEFINE_SPINLOCK(disable_ratelimit_lock); 134 135 static void cpufreq_debug_enable_ratelimit(void) 136 { 137 unsigned long flags; 138 139 spin_lock_irqsave(&disable_ratelimit_lock, flags); 140 if (disable_ratelimit) 141 disable_ratelimit--; 142 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 143 } 144 145 static void cpufreq_debug_disable_ratelimit(void) 146 { 147 unsigned long flags; 148 149 spin_lock_irqsave(&disable_ratelimit_lock, flags); 150 disable_ratelimit++; 151 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 152 } 153 154 void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...) 155 { 156 char s[256]; 157 va_list args; 158 unsigned int len; 159 unsigned long flags; 160 161 WARN_ON(!prefix); 162 if (type & debug) { 163 spin_lock_irqsave(&disable_ratelimit_lock, flags); 164 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) { 165 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 166 return; 167 } 168 spin_unlock_irqrestore(&disable_ratelimit_lock, flags); 169 170 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); 171 172 va_start(args, fmt); 173 len += vsnprintf(&s[len], (256 - len), fmt, args); 174 va_end(args); 175 176 printk(s); 177 178 WARN_ON(len < 5); 179 } 180 } 181 EXPORT_SYMBOL(cpufreq_debug_printk); 182 183 184 module_param(debug, uint, 0644); 185 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors."); 186 187 module_param(debug_ratelimit, uint, 0644); 188 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting."); 189 190 #else /* !CONFIG_CPU_FREQ_DEBUG */ 191 192 static inline void cpufreq_debug_enable_ratelimit(void) { return; } 193 static inline void cpufreq_debug_disable_ratelimit(void) { return; } 194 195 #endif /* CONFIG_CPU_FREQ_DEBUG */ 196 197 198 /********************************************************************* 199 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 200 *********************************************************************/ 201 202 /** 203 * adjust_jiffies - adjust the system "loops_per_jiffy" 204 * 205 * This function alters the system "loops_per_jiffy" for the clock 206 * speed change. Note that loops_per_jiffy cannot be updated on SMP 207 * systems as each CPU might be scaled differently. So, use the arch 208 * per-CPU loops_per_jiffy value wherever possible. 209 */ 210 #ifndef CONFIG_SMP 211 static unsigned long l_p_j_ref; 212 static unsigned int l_p_j_ref_freq; 213 214 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 215 { 216 if (ci->flags & CPUFREQ_CONST_LOOPS) 217 return; 218 219 if (!l_p_j_ref_freq) { 220 l_p_j_ref = loops_per_jiffy; 221 l_p_j_ref_freq = ci->old; 222 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 223 } 224 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || 225 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || 226 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 227 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); 228 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); 229 } 230 } 231 #else 232 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; } 233 #endif 234 235 236 /** 237 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 238 * on frequency transition. 239 * 240 * This function calls the transition notifiers and the "adjust_jiffies" 241 * function. It is called twice on all CPU frequency changes that have 242 * external effects. 243 */ 244 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) 245 { 246 struct cpufreq_policy *policy; 247 248 BUG_ON(irqs_disabled()); 249 250 freqs->flags = cpufreq_driver->flags; 251 dprintk("notification %u of frequency transition to %u kHz\n", 252 state, freqs->new); 253 254 policy = cpufreq_cpu_data[freqs->cpu]; 255 switch (state) { 256 257 case CPUFREQ_PRECHANGE: 258 /* detect if the driver reported a value as "old frequency" 259 * which is not equal to what the cpufreq core thinks is 260 * "old frequency". 261 */ 262 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 263 if ((policy) && (policy->cpu == freqs->cpu) && 264 (policy->cur) && (policy->cur != freqs->old)) { 265 dprintk("Warning: CPU frequency is" 266 " %u, cpufreq assumed %u kHz.\n", 267 freqs->old, policy->cur); 268 freqs->old = policy->cur; 269 } 270 } 271 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 272 CPUFREQ_PRECHANGE, freqs); 273 adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 274 break; 275 276 case CPUFREQ_POSTCHANGE: 277 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 278 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 279 CPUFREQ_POSTCHANGE, freqs); 280 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 281 policy->cur = freqs->new; 282 break; 283 } 284 } 285 EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 286 287 288 289 /********************************************************************* 290 * SYSFS INTERFACE * 291 *********************************************************************/ 292 293 static struct cpufreq_governor *__find_governor(const char *str_governor) 294 { 295 struct cpufreq_governor *t; 296 297 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 298 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) 299 return t; 300 301 return NULL; 302 } 303 304 /** 305 * cpufreq_parse_governor - parse a governor string 306 */ 307 static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, 308 struct cpufreq_governor **governor) 309 { 310 int err = -EINVAL; 311 312 if (!cpufreq_driver) 313 goto out; 314 315 if (cpufreq_driver->setpolicy) { 316 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 317 *policy = CPUFREQ_POLICY_PERFORMANCE; 318 err = 0; 319 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { 320 *policy = CPUFREQ_POLICY_POWERSAVE; 321 err = 0; 322 } 323 } else if (cpufreq_driver->target) { 324 struct cpufreq_governor *t; 325 326 mutex_lock(&cpufreq_governor_mutex); 327 328 t = __find_governor(str_governor); 329 330 if (t == NULL) { 331 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor); 332 333 if (name) { 334 int ret; 335 336 mutex_unlock(&cpufreq_governor_mutex); 337 ret = request_module(name); 338 mutex_lock(&cpufreq_governor_mutex); 339 340 if (ret == 0) 341 t = __find_governor(str_governor); 342 } 343 344 kfree(name); 345 } 346 347 if (t != NULL) { 348 *governor = t; 349 err = 0; 350 } 351 352 mutex_unlock(&cpufreq_governor_mutex); 353 } 354 out: 355 return err; 356 } 357 358 359 /* drivers/base/cpu.c */ 360 extern struct sysdev_class cpu_sysdev_class; 361 362 363 /** 364 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information 365 * 366 * Write out information from cpufreq_driver->policy[cpu]; object must be 367 * "unsigned int". 368 */ 369 370 #define show_one(file_name, object) \ 371 static ssize_t show_##file_name \ 372 (struct cpufreq_policy * policy, char *buf) \ 373 { \ 374 return sprintf (buf, "%u\n", policy->object); \ 375 } 376 377 show_one(cpuinfo_min_freq, cpuinfo.min_freq); 378 show_one(cpuinfo_max_freq, cpuinfo.max_freq); 379 show_one(scaling_min_freq, min); 380 show_one(scaling_max_freq, max); 381 show_one(scaling_cur_freq, cur); 382 383 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); 384 385 /** 386 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 387 */ 388 #define store_one(file_name, object) \ 389 static ssize_t store_##file_name \ 390 (struct cpufreq_policy * policy, const char *buf, size_t count) \ 391 { \ 392 unsigned int ret = -EINVAL; \ 393 struct cpufreq_policy new_policy; \ 394 \ 395 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 396 if (ret) \ 397 return -EINVAL; \ 398 \ 399 ret = sscanf (buf, "%u", &new_policy.object); \ 400 if (ret != 1) \ 401 return -EINVAL; \ 402 \ 403 lock_cpu_hotplug(); \ 404 mutex_lock(&policy->lock); \ 405 ret = __cpufreq_set_policy(policy, &new_policy); \ 406 policy->user_policy.object = policy->object; \ 407 mutex_unlock(&policy->lock); \ 408 unlock_cpu_hotplug(); \ 409 \ 410 return ret ? ret : count; \ 411 } 412 413 store_one(scaling_min_freq,min); 414 store_one(scaling_max_freq,max); 415 416 /** 417 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 418 */ 419 static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf) 420 { 421 unsigned int cur_freq = cpufreq_get(policy->cpu); 422 if (!cur_freq) 423 return sprintf(buf, "<unknown>"); 424 return sprintf(buf, "%u\n", cur_freq); 425 } 426 427 428 /** 429 * show_scaling_governor - show the current policy for the specified CPU 430 */ 431 static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf) 432 { 433 if(policy->policy == CPUFREQ_POLICY_POWERSAVE) 434 return sprintf(buf, "powersave\n"); 435 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 436 return sprintf(buf, "performance\n"); 437 else if (policy->governor) 438 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name); 439 return -EINVAL; 440 } 441 442 443 /** 444 * store_scaling_governor - store policy for the specified CPU 445 */ 446 static ssize_t store_scaling_governor (struct cpufreq_policy * policy, 447 const char *buf, size_t count) 448 { 449 unsigned int ret = -EINVAL; 450 char str_governor[16]; 451 struct cpufreq_policy new_policy; 452 453 ret = cpufreq_get_policy(&new_policy, policy->cpu); 454 if (ret) 455 return ret; 456 457 ret = sscanf (buf, "%15s", str_governor); 458 if (ret != 1) 459 return -EINVAL; 460 461 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor)) 462 return -EINVAL; 463 464 lock_cpu_hotplug(); 465 466 /* Do not use cpufreq_set_policy here or the user_policy.max 467 will be wrongly overridden */ 468 mutex_lock(&policy->lock); 469 ret = __cpufreq_set_policy(policy, &new_policy); 470 471 policy->user_policy.policy = policy->policy; 472 policy->user_policy.governor = policy->governor; 473 mutex_unlock(&policy->lock); 474 475 unlock_cpu_hotplug(); 476 477 return ret ? ret : count; 478 } 479 480 /** 481 * show_scaling_driver - show the cpufreq driver currently loaded 482 */ 483 static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf) 484 { 485 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); 486 } 487 488 /** 489 * show_scaling_available_governors - show the available CPUfreq governors 490 */ 491 static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy, 492 char *buf) 493 { 494 ssize_t i = 0; 495 struct cpufreq_governor *t; 496 497 if (!cpufreq_driver->target) { 498 i += sprintf(buf, "performance powersave"); 499 goto out; 500 } 501 502 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 503 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) 504 goto out; 505 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); 506 } 507 out: 508 i += sprintf(&buf[i], "\n"); 509 return i; 510 } 511 /** 512 * show_affected_cpus - show the CPUs affected by each transition 513 */ 514 static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf) 515 { 516 ssize_t i = 0; 517 unsigned int cpu; 518 519 for_each_cpu_mask(cpu, policy->cpus) { 520 if (i) 521 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 522 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 523 if (i >= (PAGE_SIZE - 5)) 524 break; 525 } 526 i += sprintf(&buf[i], "\n"); 527 return i; 528 } 529 530 531 #define define_one_ro(_name) \ 532 static struct freq_attr _name = \ 533 __ATTR(_name, 0444, show_##_name, NULL) 534 535 #define define_one_ro0400(_name) \ 536 static struct freq_attr _name = \ 537 __ATTR(_name, 0400, show_##_name, NULL) 538 539 #define define_one_rw(_name) \ 540 static struct freq_attr _name = \ 541 __ATTR(_name, 0644, show_##_name, store_##_name) 542 543 define_one_ro0400(cpuinfo_cur_freq); 544 define_one_ro(cpuinfo_min_freq); 545 define_one_ro(cpuinfo_max_freq); 546 define_one_ro(scaling_available_governors); 547 define_one_ro(scaling_driver); 548 define_one_ro(scaling_cur_freq); 549 define_one_ro(affected_cpus); 550 define_one_rw(scaling_min_freq); 551 define_one_rw(scaling_max_freq); 552 define_one_rw(scaling_governor); 553 554 static struct attribute * default_attrs[] = { 555 &cpuinfo_min_freq.attr, 556 &cpuinfo_max_freq.attr, 557 &scaling_min_freq.attr, 558 &scaling_max_freq.attr, 559 &affected_cpus.attr, 560 &scaling_governor.attr, 561 &scaling_driver.attr, 562 &scaling_available_governors.attr, 563 NULL 564 }; 565 566 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj) 567 #define to_attr(a) container_of(a,struct freq_attr,attr) 568 569 static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) 570 { 571 struct cpufreq_policy * policy = to_policy(kobj); 572 struct freq_attr * fattr = to_attr(attr); 573 ssize_t ret; 574 policy = cpufreq_cpu_get(policy->cpu); 575 if (!policy) 576 return -EINVAL; 577 ret = fattr->show ? fattr->show(policy,buf) : -EIO; 578 cpufreq_cpu_put(policy); 579 return ret; 580 } 581 582 static ssize_t store(struct kobject * kobj, struct attribute * attr, 583 const char * buf, size_t count) 584 { 585 struct cpufreq_policy * policy = to_policy(kobj); 586 struct freq_attr * fattr = to_attr(attr); 587 ssize_t ret; 588 policy = cpufreq_cpu_get(policy->cpu); 589 if (!policy) 590 return -EINVAL; 591 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO; 592 cpufreq_cpu_put(policy); 593 return ret; 594 } 595 596 static void cpufreq_sysfs_release(struct kobject * kobj) 597 { 598 struct cpufreq_policy * policy = to_policy(kobj); 599 dprintk("last reference is dropped\n"); 600 complete(&policy->kobj_unregister); 601 } 602 603 static struct sysfs_ops sysfs_ops = { 604 .show = show, 605 .store = store, 606 }; 607 608 static struct kobj_type ktype_cpufreq = { 609 .sysfs_ops = &sysfs_ops, 610 .default_attrs = default_attrs, 611 .release = cpufreq_sysfs_release, 612 }; 613 614 615 /** 616 * cpufreq_add_dev - add a CPU device 617 * 618 * Adds the cpufreq interface for a CPU device. 619 */ 620 static int cpufreq_add_dev (struct sys_device * sys_dev) 621 { 622 unsigned int cpu = sys_dev->id; 623 int ret = 0; 624 struct cpufreq_policy new_policy; 625 struct cpufreq_policy *policy; 626 struct freq_attr **drv_attr; 627 struct sys_device *cpu_sys_dev; 628 unsigned long flags; 629 unsigned int j; 630 #ifdef CONFIG_SMP 631 struct cpufreq_policy *managed_policy; 632 #endif 633 634 if (cpu_is_offline(cpu)) 635 return 0; 636 637 cpufreq_debug_disable_ratelimit(); 638 dprintk("adding CPU %u\n", cpu); 639 640 #ifdef CONFIG_SMP 641 /* check whether a different CPU already registered this 642 * CPU because it is in the same boat. */ 643 policy = cpufreq_cpu_get(cpu); 644 if (unlikely(policy)) { 645 cpufreq_cpu_put(policy); 646 cpufreq_debug_enable_ratelimit(); 647 return 0; 648 } 649 #endif 650 651 if (!try_module_get(cpufreq_driver->owner)) { 652 ret = -EINVAL; 653 goto module_out; 654 } 655 656 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); 657 if (!policy) { 658 ret = -ENOMEM; 659 goto nomem_out; 660 } 661 662 policy->cpu = cpu; 663 policy->cpus = cpumask_of_cpu(cpu); 664 665 mutex_init(&policy->lock); 666 mutex_lock(&policy->lock); 667 init_completion(&policy->kobj_unregister); 668 INIT_WORK(&policy->update, handle_update); 669 670 /* call driver. From then on the cpufreq must be able 671 * to accept all calls to ->verify and ->setpolicy for this CPU 672 */ 673 ret = cpufreq_driver->init(policy); 674 if (ret) { 675 dprintk("initialization failed\n"); 676 mutex_unlock(&policy->lock); 677 goto err_out; 678 } 679 680 #ifdef CONFIG_SMP 681 for_each_cpu_mask(j, policy->cpus) { 682 if (cpu == j) 683 continue; 684 685 /* check for existing affected CPUs. They may not be aware 686 * of it due to CPU Hotplug. 687 */ 688 managed_policy = cpufreq_cpu_get(j); 689 if (unlikely(managed_policy)) { 690 spin_lock_irqsave(&cpufreq_driver_lock, flags); 691 managed_policy->cpus = policy->cpus; 692 cpufreq_cpu_data[cpu] = managed_policy; 693 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 694 695 dprintk("CPU already managed, adding link\n"); 696 sysfs_create_link(&sys_dev->kobj, 697 &managed_policy->kobj, "cpufreq"); 698 699 cpufreq_debug_enable_ratelimit(); 700 mutex_unlock(&policy->lock); 701 ret = 0; 702 goto err_out_driver_exit; /* call driver->exit() */ 703 } 704 } 705 #endif 706 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); 707 708 /* prepare interface data */ 709 policy->kobj.parent = &sys_dev->kobj; 710 policy->kobj.ktype = &ktype_cpufreq; 711 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); 712 713 ret = kobject_register(&policy->kobj); 714 if (ret) { 715 mutex_unlock(&policy->lock); 716 goto err_out_driver_exit; 717 } 718 /* set up files for this cpu device */ 719 drv_attr = cpufreq_driver->attr; 720 while ((drv_attr) && (*drv_attr)) { 721 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 722 drv_attr++; 723 } 724 if (cpufreq_driver->get) 725 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 726 if (cpufreq_driver->target) 727 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 728 729 spin_lock_irqsave(&cpufreq_driver_lock, flags); 730 for_each_cpu_mask(j, policy->cpus) 731 cpufreq_cpu_data[j] = policy; 732 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 733 734 /* symlink affected CPUs */ 735 for_each_cpu_mask(j, policy->cpus) { 736 if (j == cpu) 737 continue; 738 if (!cpu_online(j)) 739 continue; 740 741 dprintk("CPU %u already managed, adding link\n", j); 742 cpufreq_cpu_get(cpu); 743 cpu_sys_dev = get_cpu_sysdev(j); 744 sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 745 "cpufreq"); 746 } 747 748 policy->governor = NULL; /* to assure that the starting sequence is 749 * run in cpufreq_set_policy */ 750 mutex_unlock(&policy->lock); 751 752 /* set default policy */ 753 ret = cpufreq_set_policy(&new_policy); 754 if (ret) { 755 dprintk("setting policy failed\n"); 756 goto err_out_unregister; 757 } 758 759 module_put(cpufreq_driver->owner); 760 dprintk("initialization complete\n"); 761 cpufreq_debug_enable_ratelimit(); 762 763 return 0; 764 765 766 err_out_unregister: 767 spin_lock_irqsave(&cpufreq_driver_lock, flags); 768 for_each_cpu_mask(j, policy->cpus) 769 cpufreq_cpu_data[j] = NULL; 770 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 771 772 kobject_unregister(&policy->kobj); 773 wait_for_completion(&policy->kobj_unregister); 774 775 err_out_driver_exit: 776 if (cpufreq_driver->exit) 777 cpufreq_driver->exit(policy); 778 779 err_out: 780 kfree(policy); 781 782 nomem_out: 783 module_put(cpufreq_driver->owner); 784 module_out: 785 cpufreq_debug_enable_ratelimit(); 786 return ret; 787 } 788 789 790 /** 791 * cpufreq_remove_dev - remove a CPU device 792 * 793 * Removes the cpufreq interface for a CPU device. 794 */ 795 static int cpufreq_remove_dev (struct sys_device * sys_dev) 796 { 797 unsigned int cpu = sys_dev->id; 798 unsigned long flags; 799 struct cpufreq_policy *data; 800 #ifdef CONFIG_SMP 801 struct sys_device *cpu_sys_dev; 802 unsigned int j; 803 #endif 804 805 cpufreq_debug_disable_ratelimit(); 806 dprintk("unregistering CPU %u\n", cpu); 807 808 spin_lock_irqsave(&cpufreq_driver_lock, flags); 809 data = cpufreq_cpu_data[cpu]; 810 811 if (!data) { 812 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 813 cpufreq_debug_enable_ratelimit(); 814 return -EINVAL; 815 } 816 cpufreq_cpu_data[cpu] = NULL; 817 818 819 #ifdef CONFIG_SMP 820 /* if this isn't the CPU which is the parent of the kobj, we 821 * only need to unlink, put and exit 822 */ 823 if (unlikely(cpu != data->cpu)) { 824 dprintk("removing link\n"); 825 cpu_clear(cpu, data->cpus); 826 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 827 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 828 cpufreq_cpu_put(data); 829 cpufreq_debug_enable_ratelimit(); 830 return 0; 831 } 832 #endif 833 834 835 if (!kobject_get(&data->kobj)) { 836 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 837 cpufreq_debug_enable_ratelimit(); 838 return -EFAULT; 839 } 840 841 #ifdef CONFIG_SMP 842 /* if we have other CPUs still registered, we need to unlink them, 843 * or else wait_for_completion below will lock up. Clean the 844 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs 845 * links afterwards. 846 */ 847 if (unlikely(cpus_weight(data->cpus) > 1)) { 848 for_each_cpu_mask(j, data->cpus) { 849 if (j == cpu) 850 continue; 851 cpufreq_cpu_data[j] = NULL; 852 } 853 } 854 855 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 856 857 if (unlikely(cpus_weight(data->cpus) > 1)) { 858 for_each_cpu_mask(j, data->cpus) { 859 if (j == cpu) 860 continue; 861 dprintk("removing link for cpu %u\n", j); 862 cpu_sys_dev = get_cpu_sysdev(j); 863 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 864 cpufreq_cpu_put(data); 865 } 866 } 867 #else 868 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 869 #endif 870 871 mutex_lock(&data->lock); 872 if (cpufreq_driver->target) 873 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 874 mutex_unlock(&data->lock); 875 876 kobject_unregister(&data->kobj); 877 878 kobject_put(&data->kobj); 879 880 /* we need to make sure that the underlying kobj is actually 881 * not referenced anymore by anybody before we proceed with 882 * unloading. 883 */ 884 dprintk("waiting for dropping of refcount\n"); 885 wait_for_completion(&data->kobj_unregister); 886 dprintk("wait complete\n"); 887 888 if (cpufreq_driver->exit) 889 cpufreq_driver->exit(data); 890 891 kfree(data); 892 893 cpufreq_debug_enable_ratelimit(); 894 return 0; 895 } 896 897 898 static void handle_update(struct work_struct *work) 899 { 900 struct cpufreq_policy *policy = 901 container_of(work, struct cpufreq_policy, update); 902 unsigned int cpu = policy->cpu; 903 dprintk("handle_update for cpu %u called\n", cpu); 904 cpufreq_update_policy(cpu); 905 } 906 907 /** 908 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. 909 * @cpu: cpu number 910 * @old_freq: CPU frequency the kernel thinks the CPU runs at 911 * @new_freq: CPU frequency the CPU actually runs at 912 * 913 * We adjust to current frequency first, and need to clean up later. So either call 914 * to cpufreq_update_policy() or schedule handle_update()). 915 */ 916 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq) 917 { 918 struct cpufreq_freqs freqs; 919 920 dprintk("Warning: CPU frequency out of sync: cpufreq and timing " 921 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 922 923 freqs.cpu = cpu; 924 freqs.old = old_freq; 925 freqs.new = new_freq; 926 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 927 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 928 } 929 930 931 /** 932 * cpufreq_quick_get - get the CPU frequency (in kHz) frpm policy->cur 933 * @cpu: CPU number 934 * 935 * This is the last known freq, without actually getting it from the driver. 936 * Return value will be same as what is shown in scaling_cur_freq in sysfs. 937 */ 938 unsigned int cpufreq_quick_get(unsigned int cpu) 939 { 940 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 941 unsigned int ret = 0; 942 943 if (policy) { 944 mutex_lock(&policy->lock); 945 ret = policy->cur; 946 mutex_unlock(&policy->lock); 947 cpufreq_cpu_put(policy); 948 } 949 950 return (ret); 951 } 952 EXPORT_SYMBOL(cpufreq_quick_get); 953 954 955 /** 956 * cpufreq_get - get the current CPU frequency (in kHz) 957 * @cpu: CPU number 958 * 959 * Get the CPU current (static) CPU frequency 960 */ 961 unsigned int cpufreq_get(unsigned int cpu) 962 { 963 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 964 unsigned int ret = 0; 965 966 if (!policy) 967 return 0; 968 969 if (!cpufreq_driver->get) 970 goto out; 971 972 mutex_lock(&policy->lock); 973 974 ret = cpufreq_driver->get(cpu); 975 976 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 977 /* verify no discrepancy between actual and saved value exists */ 978 if (unlikely(ret != policy->cur)) { 979 cpufreq_out_of_sync(cpu, policy->cur, ret); 980 schedule_work(&policy->update); 981 } 982 } 983 984 mutex_unlock(&policy->lock); 985 986 out: 987 cpufreq_cpu_put(policy); 988 989 return (ret); 990 } 991 EXPORT_SYMBOL(cpufreq_get); 992 993 994 /** 995 * cpufreq_suspend - let the low level driver prepare for suspend 996 */ 997 998 static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg) 999 { 1000 int cpu = sysdev->id; 1001 unsigned int ret = 0; 1002 unsigned int cur_freq = 0; 1003 struct cpufreq_policy *cpu_policy; 1004 1005 dprintk("suspending cpu %u\n", cpu); 1006 1007 if (!cpu_online(cpu)) 1008 return 0; 1009 1010 /* we may be lax here as interrupts are off. Nonetheless 1011 * we need to grab the correct cpu policy, as to check 1012 * whether we really run on this CPU. 1013 */ 1014 1015 cpu_policy = cpufreq_cpu_get(cpu); 1016 if (!cpu_policy) 1017 return -EINVAL; 1018 1019 /* only handle each CPU group once */ 1020 if (unlikely(cpu_policy->cpu != cpu)) { 1021 cpufreq_cpu_put(cpu_policy); 1022 return 0; 1023 } 1024 1025 if (cpufreq_driver->suspend) { 1026 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1027 if (ret) { 1028 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1029 "step on CPU %u\n", cpu_policy->cpu); 1030 cpufreq_cpu_put(cpu_policy); 1031 return ret; 1032 } 1033 } 1034 1035 1036 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) 1037 goto out; 1038 1039 if (cpufreq_driver->get) 1040 cur_freq = cpufreq_driver->get(cpu_policy->cpu); 1041 1042 if (!cur_freq || !cpu_policy->cur) { 1043 printk(KERN_ERR "cpufreq: suspend failed to assert current " 1044 "frequency is what timing core thinks it is.\n"); 1045 goto out; 1046 } 1047 1048 if (unlikely(cur_freq != cpu_policy->cur)) { 1049 struct cpufreq_freqs freqs; 1050 1051 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) 1052 dprintk("Warning: CPU frequency is %u, " 1053 "cpufreq assumed %u kHz.\n", 1054 cur_freq, cpu_policy->cur); 1055 1056 freqs.cpu = cpu; 1057 freqs.old = cpu_policy->cur; 1058 freqs.new = cur_freq; 1059 1060 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 1061 CPUFREQ_SUSPENDCHANGE, &freqs); 1062 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); 1063 1064 cpu_policy->cur = cur_freq; 1065 } 1066 1067 out: 1068 cpufreq_cpu_put(cpu_policy); 1069 return 0; 1070 } 1071 1072 /** 1073 * cpufreq_resume - restore proper CPU frequency handling after resume 1074 * 1075 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) 1076 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync 1077 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are 1078 * restored. 1079 */ 1080 static int cpufreq_resume(struct sys_device * sysdev) 1081 { 1082 int cpu = sysdev->id; 1083 unsigned int ret = 0; 1084 struct cpufreq_policy *cpu_policy; 1085 1086 dprintk("resuming cpu %u\n", cpu); 1087 1088 if (!cpu_online(cpu)) 1089 return 0; 1090 1091 /* we may be lax here as interrupts are off. Nonetheless 1092 * we need to grab the correct cpu policy, as to check 1093 * whether we really run on this CPU. 1094 */ 1095 1096 cpu_policy = cpufreq_cpu_get(cpu); 1097 if (!cpu_policy) 1098 return -EINVAL; 1099 1100 /* only handle each CPU group once */ 1101 if (unlikely(cpu_policy->cpu != cpu)) { 1102 cpufreq_cpu_put(cpu_policy); 1103 return 0; 1104 } 1105 1106 if (cpufreq_driver->resume) { 1107 ret = cpufreq_driver->resume(cpu_policy); 1108 if (ret) { 1109 printk(KERN_ERR "cpufreq: resume failed in ->resume " 1110 "step on CPU %u\n", cpu_policy->cpu); 1111 cpufreq_cpu_put(cpu_policy); 1112 return ret; 1113 } 1114 } 1115 1116 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1117 unsigned int cur_freq = 0; 1118 1119 if (cpufreq_driver->get) 1120 cur_freq = cpufreq_driver->get(cpu_policy->cpu); 1121 1122 if (!cur_freq || !cpu_policy->cur) { 1123 printk(KERN_ERR "cpufreq: resume failed to assert " 1124 "current frequency is what timing core " 1125 "thinks it is.\n"); 1126 goto out; 1127 } 1128 1129 if (unlikely(cur_freq != cpu_policy->cur)) { 1130 struct cpufreq_freqs freqs; 1131 1132 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) 1133 dprintk("Warning: CPU frequency" 1134 "is %u, cpufreq assumed %u kHz.\n", 1135 cur_freq, cpu_policy->cur); 1136 1137 freqs.cpu = cpu; 1138 freqs.old = cpu_policy->cur; 1139 freqs.new = cur_freq; 1140 1141 srcu_notifier_call_chain( 1142 &cpufreq_transition_notifier_list, 1143 CPUFREQ_RESUMECHANGE, &freqs); 1144 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); 1145 1146 cpu_policy->cur = cur_freq; 1147 } 1148 } 1149 1150 out: 1151 schedule_work(&cpu_policy->update); 1152 cpufreq_cpu_put(cpu_policy); 1153 return ret; 1154 } 1155 1156 static struct sysdev_driver cpufreq_sysdev_driver = { 1157 .add = cpufreq_add_dev, 1158 .remove = cpufreq_remove_dev, 1159 .suspend = cpufreq_suspend, 1160 .resume = cpufreq_resume, 1161 }; 1162 1163 1164 /********************************************************************* 1165 * NOTIFIER LISTS INTERFACE * 1166 *********************************************************************/ 1167 1168 /** 1169 * cpufreq_register_notifier - register a driver with cpufreq 1170 * @nb: notifier function to register 1171 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1172 * 1173 * Add a driver to one of two lists: either a list of drivers that 1174 * are notified about clock rate changes (once before and once after 1175 * the transition), or a list of drivers that are notified about 1176 * changes in cpufreq policy. 1177 * 1178 * This function may sleep, and has the same return conditions as 1179 * blocking_notifier_chain_register. 1180 */ 1181 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 1182 { 1183 int ret; 1184 1185 switch (list) { 1186 case CPUFREQ_TRANSITION_NOTIFIER: 1187 ret = srcu_notifier_chain_register( 1188 &cpufreq_transition_notifier_list, nb); 1189 break; 1190 case CPUFREQ_POLICY_NOTIFIER: 1191 ret = blocking_notifier_chain_register( 1192 &cpufreq_policy_notifier_list, nb); 1193 break; 1194 default: 1195 ret = -EINVAL; 1196 } 1197 1198 return ret; 1199 } 1200 EXPORT_SYMBOL(cpufreq_register_notifier); 1201 1202 1203 /** 1204 * cpufreq_unregister_notifier - unregister a driver with cpufreq 1205 * @nb: notifier block to be unregistered 1206 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1207 * 1208 * Remove a driver from the CPU frequency notifier list. 1209 * 1210 * This function may sleep, and has the same return conditions as 1211 * blocking_notifier_chain_unregister. 1212 */ 1213 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 1214 { 1215 int ret; 1216 1217 switch (list) { 1218 case CPUFREQ_TRANSITION_NOTIFIER: 1219 ret = srcu_notifier_chain_unregister( 1220 &cpufreq_transition_notifier_list, nb); 1221 break; 1222 case CPUFREQ_POLICY_NOTIFIER: 1223 ret = blocking_notifier_chain_unregister( 1224 &cpufreq_policy_notifier_list, nb); 1225 break; 1226 default: 1227 ret = -EINVAL; 1228 } 1229 1230 return ret; 1231 } 1232 EXPORT_SYMBOL(cpufreq_unregister_notifier); 1233 1234 1235 /********************************************************************* 1236 * GOVERNORS * 1237 *********************************************************************/ 1238 1239 1240 /* Must be called with lock_cpu_hotplug held */ 1241 int __cpufreq_driver_target(struct cpufreq_policy *policy, 1242 unsigned int target_freq, 1243 unsigned int relation) 1244 { 1245 int retval = -EINVAL; 1246 1247 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1248 target_freq, relation); 1249 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1250 retval = cpufreq_driver->target(policy, target_freq, relation); 1251 1252 return retval; 1253 } 1254 EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1255 1256 int cpufreq_driver_target(struct cpufreq_policy *policy, 1257 unsigned int target_freq, 1258 unsigned int relation) 1259 { 1260 int ret; 1261 1262 policy = cpufreq_cpu_get(policy->cpu); 1263 if (!policy) 1264 return -EINVAL; 1265 1266 lock_cpu_hotplug(); 1267 mutex_lock(&policy->lock); 1268 1269 ret = __cpufreq_driver_target(policy, target_freq, relation); 1270 1271 mutex_unlock(&policy->lock); 1272 unlock_cpu_hotplug(); 1273 1274 cpufreq_cpu_put(policy); 1275 return ret; 1276 } 1277 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1278 1279 /* 1280 * Locking: Must be called with the lock_cpu_hotplug() lock held 1281 * when "event" is CPUFREQ_GOV_LIMITS 1282 */ 1283 1284 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) 1285 { 1286 int ret; 1287 1288 if (!try_module_get(policy->governor->owner)) 1289 return -EINVAL; 1290 1291 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); 1292 ret = policy->governor->governor(policy, event); 1293 1294 /* we keep one module reference alive for each CPU governed by this CPU */ 1295 if ((event != CPUFREQ_GOV_START) || ret) 1296 module_put(policy->governor->owner); 1297 if ((event == CPUFREQ_GOV_STOP) && !ret) 1298 module_put(policy->governor->owner); 1299 1300 return ret; 1301 } 1302 1303 1304 int cpufreq_register_governor(struct cpufreq_governor *governor) 1305 { 1306 int err; 1307 1308 if (!governor) 1309 return -EINVAL; 1310 1311 mutex_lock(&cpufreq_governor_mutex); 1312 1313 err = -EBUSY; 1314 if (__find_governor(governor->name) == NULL) { 1315 err = 0; 1316 list_add(&governor->governor_list, &cpufreq_governor_list); 1317 } 1318 1319 mutex_unlock(&cpufreq_governor_mutex); 1320 return err; 1321 } 1322 EXPORT_SYMBOL_GPL(cpufreq_register_governor); 1323 1324 1325 void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1326 { 1327 if (!governor) 1328 return; 1329 1330 mutex_lock(&cpufreq_governor_mutex); 1331 list_del(&governor->governor_list); 1332 mutex_unlock(&cpufreq_governor_mutex); 1333 return; 1334 } 1335 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 1336 1337 1338 1339 /********************************************************************* 1340 * POLICY INTERFACE * 1341 *********************************************************************/ 1342 1343 /** 1344 * cpufreq_get_policy - get the current cpufreq_policy 1345 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written 1346 * 1347 * Reads the current cpufreq policy. 1348 */ 1349 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 1350 { 1351 struct cpufreq_policy *cpu_policy; 1352 if (!policy) 1353 return -EINVAL; 1354 1355 cpu_policy = cpufreq_cpu_get(cpu); 1356 if (!cpu_policy) 1357 return -EINVAL; 1358 1359 mutex_lock(&cpu_policy->lock); 1360 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); 1361 mutex_unlock(&cpu_policy->lock); 1362 1363 cpufreq_cpu_put(cpu_policy); 1364 return 0; 1365 } 1366 EXPORT_SYMBOL(cpufreq_get_policy); 1367 1368 1369 /* 1370 * Locking: Must be called with the lock_cpu_hotplug() lock held 1371 */ 1372 static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) 1373 { 1374 int ret = 0; 1375 1376 cpufreq_debug_disable_ratelimit(); 1377 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1378 policy->min, policy->max); 1379 1380 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); 1381 1382 if (policy->min > data->min && policy->min > policy->max) { 1383 ret = -EINVAL; 1384 goto error_out; 1385 } 1386 1387 /* verify the cpu speed can be set within this limit */ 1388 ret = cpufreq_driver->verify(policy); 1389 if (ret) 1390 goto error_out; 1391 1392 /* adjust if necessary - all reasons */ 1393 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1394 CPUFREQ_ADJUST, policy); 1395 1396 /* adjust if necessary - hardware incompatibility*/ 1397 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1398 CPUFREQ_INCOMPATIBLE, policy); 1399 1400 /* verify the cpu speed can be set within this limit, 1401 which might be different to the first one */ 1402 ret = cpufreq_driver->verify(policy); 1403 if (ret) 1404 goto error_out; 1405 1406 /* notification of the new policy */ 1407 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1408 CPUFREQ_NOTIFY, policy); 1409 1410 data->min = policy->min; 1411 data->max = policy->max; 1412 1413 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max); 1414 1415 if (cpufreq_driver->setpolicy) { 1416 data->policy = policy->policy; 1417 dprintk("setting range\n"); 1418 ret = cpufreq_driver->setpolicy(policy); 1419 } else { 1420 if (policy->governor != data->governor) { 1421 /* save old, working values */ 1422 struct cpufreq_governor *old_gov = data->governor; 1423 1424 dprintk("governor switch\n"); 1425 1426 /* end old governor */ 1427 if (data->governor) 1428 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1429 1430 /* start new governor */ 1431 data->governor = policy->governor; 1432 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1433 /* new governor failed, so re-start old one */ 1434 dprintk("starting governor %s failed\n", data->governor->name); 1435 if (old_gov) { 1436 data->governor = old_gov; 1437 __cpufreq_governor(data, CPUFREQ_GOV_START); 1438 } 1439 ret = -EINVAL; 1440 goto error_out; 1441 } 1442 /* might be a policy change, too, so fall through */ 1443 } 1444 dprintk("governor: change or update limits\n"); 1445 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1446 } 1447 1448 error_out: 1449 cpufreq_debug_enable_ratelimit(); 1450 return ret; 1451 } 1452 1453 /** 1454 * cpufreq_set_policy - set a new CPUFreq policy 1455 * @policy: policy to be set. 1456 * 1457 * Sets a new CPU frequency and voltage scaling policy. 1458 */ 1459 int cpufreq_set_policy(struct cpufreq_policy *policy) 1460 { 1461 int ret = 0; 1462 struct cpufreq_policy *data; 1463 1464 if (!policy) 1465 return -EINVAL; 1466 1467 data = cpufreq_cpu_get(policy->cpu); 1468 if (!data) 1469 return -EINVAL; 1470 1471 lock_cpu_hotplug(); 1472 1473 /* lock this CPU */ 1474 mutex_lock(&data->lock); 1475 1476 ret = __cpufreq_set_policy(data, policy); 1477 data->user_policy.min = data->min; 1478 data->user_policy.max = data->max; 1479 data->user_policy.policy = data->policy; 1480 data->user_policy.governor = data->governor; 1481 1482 mutex_unlock(&data->lock); 1483 1484 unlock_cpu_hotplug(); 1485 cpufreq_cpu_put(data); 1486 1487 return ret; 1488 } 1489 EXPORT_SYMBOL(cpufreq_set_policy); 1490 1491 1492 /** 1493 * cpufreq_update_policy - re-evaluate an existing cpufreq policy 1494 * @cpu: CPU which shall be re-evaluated 1495 * 1496 * Usefull for policy notifiers which have different necessities 1497 * at different times. 1498 */ 1499 int cpufreq_update_policy(unsigned int cpu) 1500 { 1501 struct cpufreq_policy *data = cpufreq_cpu_get(cpu); 1502 struct cpufreq_policy policy; 1503 int ret = 0; 1504 1505 if (!data) 1506 return -ENODEV; 1507 1508 lock_cpu_hotplug(); 1509 mutex_lock(&data->lock); 1510 1511 dprintk("updating policy for CPU %u\n", cpu); 1512 memcpy(&policy, data, sizeof(struct cpufreq_policy)); 1513 policy.min = data->user_policy.min; 1514 policy.max = data->user_policy.max; 1515 policy.policy = data->user_policy.policy; 1516 policy.governor = data->user_policy.governor; 1517 1518 /* BIOS might change freq behind our back 1519 -> ask driver for current freq and notify governors about a change */ 1520 if (cpufreq_driver->get) { 1521 policy.cur = cpufreq_driver->get(cpu); 1522 if (!data->cur) { 1523 dprintk("Driver did not initialize current freq"); 1524 data->cur = policy.cur; 1525 } else { 1526 if (data->cur != policy.cur) 1527 cpufreq_out_of_sync(cpu, data->cur, policy.cur); 1528 } 1529 } 1530 1531 ret = __cpufreq_set_policy(data, &policy); 1532 1533 mutex_unlock(&data->lock); 1534 unlock_cpu_hotplug(); 1535 cpufreq_cpu_put(data); 1536 return ret; 1537 } 1538 EXPORT_SYMBOL(cpufreq_update_policy); 1539 1540 static int cpufreq_cpu_callback(struct notifier_block *nfb, 1541 unsigned long action, void *hcpu) 1542 { 1543 unsigned int cpu = (unsigned long)hcpu; 1544 struct cpufreq_policy *policy; 1545 struct sys_device *sys_dev; 1546 1547 sys_dev = get_cpu_sysdev(cpu); 1548 1549 if (sys_dev) { 1550 switch (action) { 1551 case CPU_ONLINE: 1552 cpufreq_add_dev(sys_dev); 1553 break; 1554 case CPU_DOWN_PREPARE: 1555 /* 1556 * We attempt to put this cpu in lowest frequency 1557 * possible before going down. This will permit 1558 * hardware-managed P-State to switch other related 1559 * threads to min or higher speeds if possible. 1560 */ 1561 policy = cpufreq_cpu_data[cpu]; 1562 if (policy) { 1563 cpufreq_driver_target(policy, policy->min, 1564 CPUFREQ_RELATION_H); 1565 } 1566 break; 1567 case CPU_DEAD: 1568 cpufreq_remove_dev(sys_dev); 1569 break; 1570 } 1571 } 1572 return NOTIFY_OK; 1573 } 1574 1575 static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = 1576 { 1577 .notifier_call = cpufreq_cpu_callback, 1578 }; 1579 1580 /********************************************************************* 1581 * REGISTER / UNREGISTER CPUFREQ DRIVER * 1582 *********************************************************************/ 1583 1584 /** 1585 * cpufreq_register_driver - register a CPU Frequency driver 1586 * @driver_data: A struct cpufreq_driver containing the values# 1587 * submitted by the CPU Frequency driver. 1588 * 1589 * Registers a CPU Frequency driver to this core code. This code 1590 * returns zero on success, -EBUSY when another driver got here first 1591 * (and isn't unregistered in the meantime). 1592 * 1593 */ 1594 int cpufreq_register_driver(struct cpufreq_driver *driver_data) 1595 { 1596 unsigned long flags; 1597 int ret; 1598 1599 if (!driver_data || !driver_data->verify || !driver_data->init || 1600 ((!driver_data->setpolicy) && (!driver_data->target))) 1601 return -EINVAL; 1602 1603 dprintk("trying to register driver %s\n", driver_data->name); 1604 1605 if (driver_data->setpolicy) 1606 driver_data->flags |= CPUFREQ_CONST_LOOPS; 1607 1608 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1609 if (cpufreq_driver) { 1610 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1611 return -EBUSY; 1612 } 1613 cpufreq_driver = driver_data; 1614 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1615 1616 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver); 1617 1618 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1619 int i; 1620 ret = -ENODEV; 1621 1622 /* check for at least one working CPU */ 1623 for (i=0; i<NR_CPUS; i++) 1624 if (cpufreq_cpu_data[i]) 1625 ret = 0; 1626 1627 /* if all ->init() calls failed, unregister */ 1628 if (ret) { 1629 dprintk("no CPU initialized for driver %s\n", driver_data->name); 1630 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1631 1632 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1633 cpufreq_driver = NULL; 1634 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1635 } 1636 } 1637 1638 if (!ret) { 1639 register_hotcpu_notifier(&cpufreq_cpu_notifier); 1640 dprintk("driver %s up and running\n", driver_data->name); 1641 cpufreq_debug_enable_ratelimit(); 1642 } 1643 1644 return (ret); 1645 } 1646 EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1647 1648 1649 /** 1650 * cpufreq_unregister_driver - unregister the current CPUFreq driver 1651 * 1652 * Unregister the current CPUFreq driver. Only call this if you have 1653 * the right to do so, i.e. if you have succeeded in initialising before! 1654 * Returns zero if successful, and -EINVAL if the cpufreq_driver is 1655 * currently not initialised. 1656 */ 1657 int cpufreq_unregister_driver(struct cpufreq_driver *driver) 1658 { 1659 unsigned long flags; 1660 1661 cpufreq_debug_disable_ratelimit(); 1662 1663 if (!cpufreq_driver || (driver != cpufreq_driver)) { 1664 cpufreq_debug_enable_ratelimit(); 1665 return -EINVAL; 1666 } 1667 1668 dprintk("unregistering driver %s\n", driver->name); 1669 1670 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1671 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 1672 1673 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1674 cpufreq_driver = NULL; 1675 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1676 1677 return 0; 1678 } 1679 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 1680