1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/clockchips.h> 12 #include <linux/kernel.h> 13 #include <linux/mutex.h> 14 #include <linux/sched.h> 15 #include <linux/sched/clock.h> 16 #include <linux/notifier.h> 17 #include <linux/pm_qos.h> 18 #include <linux/cpu.h> 19 #include <linux/cpuidle.h> 20 #include <linux/ktime.h> 21 #include <linux/hrtimer.h> 22 #include <linux/module.h> 23 #include <linux/suspend.h> 24 #include <linux/tick.h> 25 #include <trace/events/power.h> 26 27 #include "cpuidle.h" 28 29 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 30 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 31 32 DEFINE_MUTEX(cpuidle_lock); 33 LIST_HEAD(cpuidle_detected_devices); 34 35 static int enabled_devices; 36 static int off __read_mostly; 37 static int initialized __read_mostly; 38 39 int cpuidle_disabled(void) 40 { 41 return off; 42 } 43 void disable_cpuidle(void) 44 { 45 off = 1; 46 } 47 48 bool cpuidle_not_available(struct cpuidle_driver *drv, 49 struct cpuidle_device *dev) 50 { 51 return off || !initialized || !drv || !dev || !dev->enabled; 52 } 53 54 /** 55 * cpuidle_play_dead - cpu off-lining 56 * 57 * Returns in case of an error or no driver 58 */ 59 int cpuidle_play_dead(void) 60 { 61 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 62 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 63 int i; 64 65 if (!drv) 66 return -ENODEV; 67 68 /* Find lowest-power state that supports long-term idle */ 69 for (i = drv->state_count - 1; i >= 0; i--) 70 if (drv->states[i].enter_dead) 71 return drv->states[i].enter_dead(dev, i); 72 73 return -ENODEV; 74 } 75 76 static int find_deepest_state(struct cpuidle_driver *drv, 77 struct cpuidle_device *dev, 78 unsigned int max_latency, 79 unsigned int forbidden_flags, 80 bool s2idle) 81 { 82 unsigned int latency_req = 0; 83 int i, ret = 0; 84 85 for (i = 1; i < drv->state_count; i++) { 86 struct cpuidle_state *s = &drv->states[i]; 87 struct cpuidle_state_usage *su = &dev->states_usage[i]; 88 89 if (s->disabled || su->disable || s->exit_latency <= latency_req 90 || s->exit_latency > max_latency 91 || (s->flags & forbidden_flags) 92 || (s2idle && !s->enter_s2idle)) 93 continue; 94 95 latency_req = s->exit_latency; 96 ret = i; 97 } 98 return ret; 99 } 100 101 /** 102 * cpuidle_use_deepest_state - Set/clear governor override flag. 103 * @enable: New value of the flag. 104 * 105 * Set/unset the current CPU to use the deepest idle state (override governors 106 * going forward if set). 107 */ 108 void cpuidle_use_deepest_state(bool enable) 109 { 110 struct cpuidle_device *dev; 111 112 preempt_disable(); 113 dev = cpuidle_get_device(); 114 if (dev) 115 dev->use_deepest_state = enable; 116 preempt_enable(); 117 } 118 119 /** 120 * cpuidle_find_deepest_state - Find the deepest available idle state. 121 * @drv: cpuidle driver for the given CPU. 122 * @dev: cpuidle device for the given CPU. 123 */ 124 int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 125 struct cpuidle_device *dev) 126 { 127 return find_deepest_state(drv, dev, UINT_MAX, 0, false); 128 } 129 130 #ifdef CONFIG_SUSPEND 131 static void enter_s2idle_proper(struct cpuidle_driver *drv, 132 struct cpuidle_device *dev, int index) 133 { 134 ktime_t time_start, time_end; 135 136 time_start = ns_to_ktime(local_clock()); 137 138 /* 139 * trace_suspend_resume() called by tick_freeze() for the last CPU 140 * executing it contains RCU usage regarded as invalid in the idle 141 * context, so tell RCU about that. 142 */ 143 RCU_NONIDLE(tick_freeze()); 144 /* 145 * The state used here cannot be a "coupled" one, because the "coupled" 146 * cpuidle mechanism enables interrupts and doing that with timekeeping 147 * suspended is generally unsafe. 148 */ 149 stop_critical_timings(); 150 drv->states[index].enter_s2idle(dev, drv, index); 151 WARN_ON(!irqs_disabled()); 152 /* 153 * timekeeping_resume() that will be called by tick_unfreeze() for the 154 * first CPU executing it calls functions containing RCU read-side 155 * critical sections, so tell RCU about that. 156 */ 157 RCU_NONIDLE(tick_unfreeze()); 158 start_critical_timings(); 159 160 time_end = ns_to_ktime(local_clock()); 161 162 dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start); 163 dev->states_usage[index].s2idle_usage++; 164 } 165 166 /** 167 * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. 168 * @drv: cpuidle driver for the given CPU. 169 * @dev: cpuidle device for the given CPU. 170 * 171 * If there are states with the ->enter_s2idle callback, find the deepest of 172 * them and enter it with frozen tick. 173 */ 174 int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) 175 { 176 int index; 177 178 /* 179 * Find the deepest state with ->enter_s2idle present, which guarantees 180 * that interrupts won't be enabled when it exits and allows the tick to 181 * be frozen safely. 182 */ 183 index = find_deepest_state(drv, dev, UINT_MAX, 0, true); 184 if (index > 0) 185 enter_s2idle_proper(drv, dev, index); 186 187 return index; 188 } 189 #endif /* CONFIG_SUSPEND */ 190 191 /** 192 * cpuidle_enter_state - enter the state and update stats 193 * @dev: cpuidle device for this cpu 194 * @drv: cpuidle driver for this cpu 195 * @index: index into the states table in @drv of the state to enter 196 */ 197 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 198 int index) 199 { 200 int entered_state; 201 202 struct cpuidle_state *target_state = &drv->states[index]; 203 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 204 ktime_t time_start, time_end; 205 206 /* 207 * Tell the time framework to switch to a broadcast timer because our 208 * local timer will be shut down. If a local timer is used from another 209 * CPU as a broadcast timer, this call may fail if it is not available. 210 */ 211 if (broadcast && tick_broadcast_enter()) { 212 index = find_deepest_state(drv, dev, target_state->exit_latency, 213 CPUIDLE_FLAG_TIMER_STOP, false); 214 if (index < 0) { 215 default_idle_call(); 216 return -EBUSY; 217 } 218 target_state = &drv->states[index]; 219 broadcast = false; 220 } 221 222 /* Take note of the planned idle state. */ 223 sched_idle_set_state(target_state); 224 225 trace_cpu_idle_rcuidle(index, dev->cpu); 226 time_start = ns_to_ktime(local_clock()); 227 228 stop_critical_timings(); 229 entered_state = target_state->enter(dev, drv, index); 230 start_critical_timings(); 231 232 sched_clock_idle_wakeup_event(); 233 time_end = ns_to_ktime(local_clock()); 234 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 235 236 /* The cpu is no longer idle or about to enter idle. */ 237 sched_idle_set_state(NULL); 238 239 if (broadcast) { 240 if (WARN_ON_ONCE(!irqs_disabled())) 241 local_irq_disable(); 242 243 tick_broadcast_exit(); 244 } 245 246 if (!cpuidle_state_is_coupled(drv, index)) 247 local_irq_enable(); 248 249 if (entered_state >= 0) { 250 s64 diff, delay = drv->states[entered_state].exit_latency; 251 int i; 252 253 /* 254 * Update cpuidle counters 255 * This can be moved to within driver enter routine, 256 * but that results in multiple copies of same code. 257 */ 258 diff = ktime_us_delta(time_end, time_start); 259 if (diff > INT_MAX) 260 diff = INT_MAX; 261 262 dev->last_residency = (int)diff; 263 dev->states_usage[entered_state].time += dev->last_residency; 264 dev->states_usage[entered_state].usage++; 265 266 if (diff < drv->states[entered_state].target_residency) { 267 for (i = entered_state - 1; i >= 0; i--) { 268 if (drv->states[i].disabled || 269 dev->states_usage[i].disable) 270 continue; 271 272 /* Shallower states are enabled, so update. */ 273 dev->states_usage[entered_state].above++; 274 break; 275 } 276 } else if (diff > delay) { 277 for (i = entered_state + 1; i < drv->state_count; i++) { 278 if (drv->states[i].disabled || 279 dev->states_usage[i].disable) 280 continue; 281 282 /* 283 * Update if a deeper state would have been a 284 * better match for the observed idle duration. 285 */ 286 if (diff - delay >= drv->states[i].target_residency) 287 dev->states_usage[entered_state].below++; 288 289 break; 290 } 291 } 292 } else { 293 dev->last_residency = 0; 294 } 295 296 return entered_state; 297 } 298 299 /** 300 * cpuidle_select - ask the cpuidle framework to choose an idle state 301 * 302 * @drv: the cpuidle driver 303 * @dev: the cpuidle device 304 * @stop_tick: indication on whether or not to stop the tick 305 * 306 * Returns the index of the idle state. The return value must not be negative. 307 * 308 * The memory location pointed to by @stop_tick is expected to be written the 309 * 'false' boolean value if the scheduler tick should not be stopped before 310 * entering the returned state. 311 */ 312 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, 313 bool *stop_tick) 314 { 315 return cpuidle_curr_governor->select(drv, dev, stop_tick); 316 } 317 318 /** 319 * cpuidle_enter - enter into the specified idle state 320 * 321 * @drv: the cpuidle driver tied with the cpu 322 * @dev: the cpuidle device 323 * @index: the index in the idle state table 324 * 325 * Returns the index in the idle state, < 0 in case of error. 326 * The error code depends on the backend driver 327 */ 328 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 329 int index) 330 { 331 if (cpuidle_state_is_coupled(drv, index)) 332 return cpuidle_enter_state_coupled(dev, drv, index); 333 return cpuidle_enter_state(dev, drv, index); 334 } 335 336 /** 337 * cpuidle_reflect - tell the underlying governor what was the state 338 * we were in 339 * 340 * @dev : the cpuidle device 341 * @index: the index in the idle state table 342 * 343 */ 344 void cpuidle_reflect(struct cpuidle_device *dev, int index) 345 { 346 if (cpuidle_curr_governor->reflect && index >= 0) 347 cpuidle_curr_governor->reflect(dev, index); 348 } 349 350 /** 351 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 352 */ 353 void cpuidle_install_idle_handler(void) 354 { 355 if (enabled_devices) { 356 /* Make sure all changes finished before we switch to new idle */ 357 smp_wmb(); 358 initialized = 1; 359 } 360 } 361 362 /** 363 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 364 */ 365 void cpuidle_uninstall_idle_handler(void) 366 { 367 if (enabled_devices) { 368 initialized = 0; 369 wake_up_all_idle_cpus(); 370 } 371 372 /* 373 * Make sure external observers (such as the scheduler) 374 * are done looking at pointed idle states. 375 */ 376 synchronize_rcu(); 377 } 378 379 /** 380 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 381 */ 382 void cpuidle_pause_and_lock(void) 383 { 384 mutex_lock(&cpuidle_lock); 385 cpuidle_uninstall_idle_handler(); 386 } 387 388 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 389 390 /** 391 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 392 */ 393 void cpuidle_resume_and_unlock(void) 394 { 395 cpuidle_install_idle_handler(); 396 mutex_unlock(&cpuidle_lock); 397 } 398 399 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 400 401 /* Currently used in suspend/resume path to suspend cpuidle */ 402 void cpuidle_pause(void) 403 { 404 mutex_lock(&cpuidle_lock); 405 cpuidle_uninstall_idle_handler(); 406 mutex_unlock(&cpuidle_lock); 407 } 408 409 /* Currently used in suspend/resume path to resume cpuidle */ 410 void cpuidle_resume(void) 411 { 412 mutex_lock(&cpuidle_lock); 413 cpuidle_install_idle_handler(); 414 mutex_unlock(&cpuidle_lock); 415 } 416 417 /** 418 * cpuidle_enable_device - enables idle PM for a CPU 419 * @dev: the CPU 420 * 421 * This function must be called between cpuidle_pause_and_lock and 422 * cpuidle_resume_and_unlock when used externally. 423 */ 424 int cpuidle_enable_device(struct cpuidle_device *dev) 425 { 426 int ret; 427 struct cpuidle_driver *drv; 428 429 if (!dev) 430 return -EINVAL; 431 432 if (dev->enabled) 433 return 0; 434 435 if (!cpuidle_curr_governor) 436 return -EIO; 437 438 drv = cpuidle_get_cpu_driver(dev); 439 440 if (!drv) 441 return -EIO; 442 443 if (!dev->registered) 444 return -EINVAL; 445 446 ret = cpuidle_add_device_sysfs(dev); 447 if (ret) 448 return ret; 449 450 if (cpuidle_curr_governor->enable) { 451 ret = cpuidle_curr_governor->enable(drv, dev); 452 if (ret) 453 goto fail_sysfs; 454 } 455 456 smp_wmb(); 457 458 dev->enabled = 1; 459 460 enabled_devices++; 461 return 0; 462 463 fail_sysfs: 464 cpuidle_remove_device_sysfs(dev); 465 466 return ret; 467 } 468 469 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 470 471 /** 472 * cpuidle_disable_device - disables idle PM for a CPU 473 * @dev: the CPU 474 * 475 * This function must be called between cpuidle_pause_and_lock and 476 * cpuidle_resume_and_unlock when used externally. 477 */ 478 void cpuidle_disable_device(struct cpuidle_device *dev) 479 { 480 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 481 482 if (!dev || !dev->enabled) 483 return; 484 485 if (!drv || !cpuidle_curr_governor) 486 return; 487 488 dev->enabled = 0; 489 490 if (cpuidle_curr_governor->disable) 491 cpuidle_curr_governor->disable(drv, dev); 492 493 cpuidle_remove_device_sysfs(dev); 494 enabled_devices--; 495 } 496 497 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 498 499 static void __cpuidle_unregister_device(struct cpuidle_device *dev) 500 { 501 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 502 503 list_del(&dev->device_list); 504 per_cpu(cpuidle_devices, dev->cpu) = NULL; 505 module_put(drv->owner); 506 507 dev->registered = 0; 508 } 509 510 static void __cpuidle_device_init(struct cpuidle_device *dev) 511 { 512 memset(dev->states_usage, 0, sizeof(dev->states_usage)); 513 dev->last_residency = 0; 514 } 515 516 /** 517 * __cpuidle_register_device - internal register function called before register 518 * and enable routines 519 * @dev: the cpu 520 * 521 * cpuidle_lock mutex must be held before this is called 522 */ 523 static int __cpuidle_register_device(struct cpuidle_device *dev) 524 { 525 int ret; 526 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 527 528 if (!try_module_get(drv->owner)) 529 return -EINVAL; 530 531 per_cpu(cpuidle_devices, dev->cpu) = dev; 532 list_add(&dev->device_list, &cpuidle_detected_devices); 533 534 ret = cpuidle_coupled_register_device(dev); 535 if (ret) 536 __cpuidle_unregister_device(dev); 537 else 538 dev->registered = 1; 539 540 return ret; 541 } 542 543 /** 544 * cpuidle_register_device - registers a CPU's idle PM feature 545 * @dev: the cpu 546 */ 547 int cpuidle_register_device(struct cpuidle_device *dev) 548 { 549 int ret = -EBUSY; 550 551 if (!dev) 552 return -EINVAL; 553 554 mutex_lock(&cpuidle_lock); 555 556 if (dev->registered) 557 goto out_unlock; 558 559 __cpuidle_device_init(dev); 560 561 ret = __cpuidle_register_device(dev); 562 if (ret) 563 goto out_unlock; 564 565 ret = cpuidle_add_sysfs(dev); 566 if (ret) 567 goto out_unregister; 568 569 ret = cpuidle_enable_device(dev); 570 if (ret) 571 goto out_sysfs; 572 573 cpuidle_install_idle_handler(); 574 575 out_unlock: 576 mutex_unlock(&cpuidle_lock); 577 578 return ret; 579 580 out_sysfs: 581 cpuidle_remove_sysfs(dev); 582 out_unregister: 583 __cpuidle_unregister_device(dev); 584 goto out_unlock; 585 } 586 587 EXPORT_SYMBOL_GPL(cpuidle_register_device); 588 589 /** 590 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 591 * @dev: the cpu 592 */ 593 void cpuidle_unregister_device(struct cpuidle_device *dev) 594 { 595 if (!dev || dev->registered == 0) 596 return; 597 598 cpuidle_pause_and_lock(); 599 600 cpuidle_disable_device(dev); 601 602 cpuidle_remove_sysfs(dev); 603 604 __cpuidle_unregister_device(dev); 605 606 cpuidle_coupled_unregister_device(dev); 607 608 cpuidle_resume_and_unlock(); 609 } 610 611 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 612 613 /** 614 * cpuidle_unregister: unregister a driver and the devices. This function 615 * can be used only if the driver has been previously registered through 616 * the cpuidle_register function. 617 * 618 * @drv: a valid pointer to a struct cpuidle_driver 619 */ 620 void cpuidle_unregister(struct cpuidle_driver *drv) 621 { 622 int cpu; 623 struct cpuidle_device *device; 624 625 for_each_cpu(cpu, drv->cpumask) { 626 device = &per_cpu(cpuidle_dev, cpu); 627 cpuidle_unregister_device(device); 628 } 629 630 cpuidle_unregister_driver(drv); 631 } 632 EXPORT_SYMBOL_GPL(cpuidle_unregister); 633 634 /** 635 * cpuidle_register: registers the driver and the cpu devices with the 636 * coupled_cpus passed as parameter. This function is used for all common 637 * initialization pattern there are in the arch specific drivers. The 638 * devices is globally defined in this file. 639 * 640 * @drv : a valid pointer to a struct cpuidle_driver 641 * @coupled_cpus: a cpumask for the coupled states 642 * 643 * Returns 0 on success, < 0 otherwise 644 */ 645 int cpuidle_register(struct cpuidle_driver *drv, 646 const struct cpumask *const coupled_cpus) 647 { 648 int ret, cpu; 649 struct cpuidle_device *device; 650 651 ret = cpuidle_register_driver(drv); 652 if (ret) { 653 pr_err("failed to register cpuidle driver\n"); 654 return ret; 655 } 656 657 for_each_cpu(cpu, drv->cpumask) { 658 device = &per_cpu(cpuidle_dev, cpu); 659 device->cpu = cpu; 660 661 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 662 /* 663 * On multiplatform for ARM, the coupled idle states could be 664 * enabled in the kernel even if the cpuidle driver does not 665 * use it. Note, coupled_cpus is a struct copy. 666 */ 667 if (coupled_cpus) 668 device->coupled_cpus = *coupled_cpus; 669 #endif 670 ret = cpuidle_register_device(device); 671 if (!ret) 672 continue; 673 674 pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 675 676 cpuidle_unregister(drv); 677 break; 678 } 679 680 return ret; 681 } 682 EXPORT_SYMBOL_GPL(cpuidle_register); 683 684 #ifdef CONFIG_SMP 685 686 /* 687 * This function gets called when a part of the kernel has a new latency 688 * requirement. This means we need to get all processors out of their C-state, 689 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 690 * wakes them all right up. 691 */ 692 static int cpuidle_latency_notify(struct notifier_block *b, 693 unsigned long l, void *v) 694 { 695 wake_up_all_idle_cpus(); 696 return NOTIFY_OK; 697 } 698 699 static struct notifier_block cpuidle_latency_notifier = { 700 .notifier_call = cpuidle_latency_notify, 701 }; 702 703 static inline void latency_notifier_init(struct notifier_block *n) 704 { 705 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 706 } 707 708 #else /* CONFIG_SMP */ 709 710 #define latency_notifier_init(x) do { } while (0) 711 712 #endif /* CONFIG_SMP */ 713 714 /** 715 * cpuidle_init - core initializer 716 */ 717 static int __init cpuidle_init(void) 718 { 719 int ret; 720 721 if (cpuidle_disabled()) 722 return -ENODEV; 723 724 ret = cpuidle_add_interface(cpu_subsys.dev_root); 725 if (ret) 726 return ret; 727 728 latency_notifier_init(&cpuidle_latency_notifier); 729 730 return 0; 731 } 732 733 module_param(off, int, 0444); 734 module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444); 735 core_initcall(cpuidle_init); 736