1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/clockchips.h> 12 #include <linux/kernel.h> 13 #include <linux/mutex.h> 14 #include <linux/sched.h> 15 #include <linux/sched/clock.h> 16 #include <linux/notifier.h> 17 #include <linux/pm_qos.h> 18 #include <linux/cpu.h> 19 #include <linux/cpuidle.h> 20 #include <linux/ktime.h> 21 #include <linux/hrtimer.h> 22 #include <linux/module.h> 23 #include <linux/suspend.h> 24 #include <linux/tick.h> 25 #include <trace/events/power.h> 26 27 #include "cpuidle.h" 28 29 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 30 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 31 32 DEFINE_MUTEX(cpuidle_lock); 33 LIST_HEAD(cpuidle_detected_devices); 34 35 static int enabled_devices; 36 static int off __read_mostly; 37 static int initialized __read_mostly; 38 39 int cpuidle_disabled(void) 40 { 41 return off; 42 } 43 void disable_cpuidle(void) 44 { 45 off = 1; 46 } 47 48 bool cpuidle_not_available(struct cpuidle_driver *drv, 49 struct cpuidle_device *dev) 50 { 51 return off || !initialized || !drv || !dev || !dev->enabled; 52 } 53 54 /** 55 * cpuidle_play_dead - cpu off-lining 56 * 57 * Returns in case of an error or no driver 58 */ 59 int cpuidle_play_dead(void) 60 { 61 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 62 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 63 int i; 64 65 if (!drv) 66 return -ENODEV; 67 68 /* Find lowest-power state that supports long-term idle */ 69 for (i = drv->state_count - 1; i >= 0; i--) 70 if (drv->states[i].enter_dead) 71 return drv->states[i].enter_dead(dev, i); 72 73 return -ENODEV; 74 } 75 76 static int find_deepest_state(struct cpuidle_driver *drv, 77 struct cpuidle_device *dev, 78 unsigned int max_latency, 79 unsigned int forbidden_flags, 80 bool freeze) 81 { 82 unsigned int latency_req = 0; 83 int i, ret = 0; 84 85 for (i = 1; i < drv->state_count; i++) { 86 struct cpuidle_state *s = &drv->states[i]; 87 struct cpuidle_state_usage *su = &dev->states_usage[i]; 88 89 if (s->disabled || su->disable || s->exit_latency <= latency_req 90 || s->exit_latency > max_latency 91 || (s->flags & forbidden_flags) 92 || (freeze && !s->enter_freeze)) 93 continue; 94 95 latency_req = s->exit_latency; 96 ret = i; 97 } 98 return ret; 99 } 100 101 /** 102 * cpuidle_use_deepest_state - Set/clear governor override flag. 103 * @enable: New value of the flag. 104 * 105 * Set/unset the current CPU to use the deepest idle state (override governors 106 * going forward if set). 107 */ 108 void cpuidle_use_deepest_state(bool enable) 109 { 110 struct cpuidle_device *dev; 111 112 preempt_disable(); 113 dev = cpuidle_get_device(); 114 if (dev) 115 dev->use_deepest_state = enable; 116 preempt_enable(); 117 } 118 119 /** 120 * cpuidle_find_deepest_state - Find the deepest available idle state. 121 * @drv: cpuidle driver for the given CPU. 122 * @dev: cpuidle device for the given CPU. 123 */ 124 int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 125 struct cpuidle_device *dev) 126 { 127 return find_deepest_state(drv, dev, UINT_MAX, 0, false); 128 } 129 130 #ifdef CONFIG_SUSPEND 131 static void enter_freeze_proper(struct cpuidle_driver *drv, 132 struct cpuidle_device *dev, int index) 133 { 134 /* 135 * trace_suspend_resume() called by tick_freeze() for the last CPU 136 * executing it contains RCU usage regarded as invalid in the idle 137 * context, so tell RCU about that. 138 */ 139 RCU_NONIDLE(tick_freeze()); 140 /* 141 * The state used here cannot be a "coupled" one, because the "coupled" 142 * cpuidle mechanism enables interrupts and doing that with timekeeping 143 * suspended is generally unsafe. 144 */ 145 stop_critical_timings(); 146 drv->states[index].enter_freeze(dev, drv, index); 147 WARN_ON(!irqs_disabled()); 148 /* 149 * timekeeping_resume() that will be called by tick_unfreeze() for the 150 * first CPU executing it calls functions containing RCU read-side 151 * critical sections, so tell RCU about that. 152 */ 153 RCU_NONIDLE(tick_unfreeze()); 154 start_critical_timings(); 155 } 156 157 /** 158 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 159 * @drv: cpuidle driver for the given CPU. 160 * @dev: cpuidle device for the given CPU. 161 * 162 * If there are states with the ->enter_freeze callback, find the deepest of 163 * them and enter it with frozen tick. 164 */ 165 int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) 166 { 167 int index; 168 169 /* 170 * Find the deepest state with ->enter_freeze present, which guarantees 171 * that interrupts won't be enabled when it exits and allows the tick to 172 * be frozen safely. 173 */ 174 index = find_deepest_state(drv, dev, UINT_MAX, 0, true); 175 if (index > 0) 176 enter_freeze_proper(drv, dev, index); 177 178 return index; 179 } 180 #endif /* CONFIG_SUSPEND */ 181 182 /** 183 * cpuidle_enter_state - enter the state and update stats 184 * @dev: cpuidle device for this cpu 185 * @drv: cpuidle driver for this cpu 186 * @index: index into the states table in @drv of the state to enter 187 */ 188 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 189 int index) 190 { 191 int entered_state; 192 193 struct cpuidle_state *target_state = &drv->states[index]; 194 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 195 ktime_t time_start, time_end; 196 s64 diff; 197 198 /* 199 * Tell the time framework to switch to a broadcast timer because our 200 * local timer will be shut down. If a local timer is used from another 201 * CPU as a broadcast timer, this call may fail if it is not available. 202 */ 203 if (broadcast && tick_broadcast_enter()) { 204 index = find_deepest_state(drv, dev, target_state->exit_latency, 205 CPUIDLE_FLAG_TIMER_STOP, false); 206 if (index < 0) { 207 default_idle_call(); 208 return -EBUSY; 209 } 210 target_state = &drv->states[index]; 211 } 212 213 /* Take note of the planned idle state. */ 214 sched_idle_set_state(target_state); 215 216 trace_cpu_idle_rcuidle(index, dev->cpu); 217 time_start = ns_to_ktime(local_clock()); 218 219 stop_critical_timings(); 220 entered_state = target_state->enter(dev, drv, index); 221 start_critical_timings(); 222 223 time_end = ns_to_ktime(local_clock()); 224 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 225 226 /* The cpu is no longer idle or about to enter idle. */ 227 sched_idle_set_state(NULL); 228 229 if (broadcast) { 230 if (WARN_ON_ONCE(!irqs_disabled())) 231 local_irq_disable(); 232 233 tick_broadcast_exit(); 234 } 235 236 if (!cpuidle_state_is_coupled(drv, index)) 237 local_irq_enable(); 238 239 diff = ktime_us_delta(time_end, time_start); 240 if (diff > INT_MAX) 241 diff = INT_MAX; 242 243 dev->last_residency = (int) diff; 244 245 if (entered_state >= 0) { 246 /* Update cpuidle counters */ 247 /* This can be moved to within driver enter routine 248 * but that results in multiple copies of same code. 249 */ 250 dev->states_usage[entered_state].time += dev->last_residency; 251 dev->states_usage[entered_state].usage++; 252 } else { 253 dev->last_residency = 0; 254 } 255 256 return entered_state; 257 } 258 259 /** 260 * cpuidle_select - ask the cpuidle framework to choose an idle state 261 * 262 * @drv: the cpuidle driver 263 * @dev: the cpuidle device 264 * 265 * Returns the index of the idle state. The return value must not be negative. 266 */ 267 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 268 { 269 return cpuidle_curr_governor->select(drv, dev); 270 } 271 272 /** 273 * cpuidle_enter - enter into the specified idle state 274 * 275 * @drv: the cpuidle driver tied with the cpu 276 * @dev: the cpuidle device 277 * @index: the index in the idle state table 278 * 279 * Returns the index in the idle state, < 0 in case of error. 280 * The error code depends on the backend driver 281 */ 282 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 283 int index) 284 { 285 if (cpuidle_state_is_coupled(drv, index)) 286 return cpuidle_enter_state_coupled(dev, drv, index); 287 return cpuidle_enter_state(dev, drv, index); 288 } 289 290 /** 291 * cpuidle_reflect - tell the underlying governor what was the state 292 * we were in 293 * 294 * @dev : the cpuidle device 295 * @index: the index in the idle state table 296 * 297 */ 298 void cpuidle_reflect(struct cpuidle_device *dev, int index) 299 { 300 if (cpuidle_curr_governor->reflect && index >= 0) 301 cpuidle_curr_governor->reflect(dev, index); 302 } 303 304 /** 305 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 306 */ 307 void cpuidle_install_idle_handler(void) 308 { 309 if (enabled_devices) { 310 /* Make sure all changes finished before we switch to new idle */ 311 smp_wmb(); 312 initialized = 1; 313 } 314 } 315 316 /** 317 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 318 */ 319 void cpuidle_uninstall_idle_handler(void) 320 { 321 if (enabled_devices) { 322 initialized = 0; 323 wake_up_all_idle_cpus(); 324 } 325 326 /* 327 * Make sure external observers (such as the scheduler) 328 * are done looking at pointed idle states. 329 */ 330 synchronize_rcu(); 331 } 332 333 /** 334 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 335 */ 336 void cpuidle_pause_and_lock(void) 337 { 338 mutex_lock(&cpuidle_lock); 339 cpuidle_uninstall_idle_handler(); 340 } 341 342 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 343 344 /** 345 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 346 */ 347 void cpuidle_resume_and_unlock(void) 348 { 349 cpuidle_install_idle_handler(); 350 mutex_unlock(&cpuidle_lock); 351 } 352 353 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 354 355 /* Currently used in suspend/resume path to suspend cpuidle */ 356 void cpuidle_pause(void) 357 { 358 mutex_lock(&cpuidle_lock); 359 cpuidle_uninstall_idle_handler(); 360 mutex_unlock(&cpuidle_lock); 361 } 362 363 /* Currently used in suspend/resume path to resume cpuidle */ 364 void cpuidle_resume(void) 365 { 366 mutex_lock(&cpuidle_lock); 367 cpuidle_install_idle_handler(); 368 mutex_unlock(&cpuidle_lock); 369 } 370 371 /** 372 * cpuidle_enable_device - enables idle PM for a CPU 373 * @dev: the CPU 374 * 375 * This function must be called between cpuidle_pause_and_lock and 376 * cpuidle_resume_and_unlock when used externally. 377 */ 378 int cpuidle_enable_device(struct cpuidle_device *dev) 379 { 380 int ret; 381 struct cpuidle_driver *drv; 382 383 if (!dev) 384 return -EINVAL; 385 386 if (dev->enabled) 387 return 0; 388 389 drv = cpuidle_get_cpu_driver(dev); 390 391 if (!drv || !cpuidle_curr_governor) 392 return -EIO; 393 394 if (!dev->registered) 395 return -EINVAL; 396 397 ret = cpuidle_add_device_sysfs(dev); 398 if (ret) 399 return ret; 400 401 if (cpuidle_curr_governor->enable && 402 (ret = cpuidle_curr_governor->enable(drv, dev))) 403 goto fail_sysfs; 404 405 smp_wmb(); 406 407 dev->enabled = 1; 408 409 enabled_devices++; 410 return 0; 411 412 fail_sysfs: 413 cpuidle_remove_device_sysfs(dev); 414 415 return ret; 416 } 417 418 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 419 420 /** 421 * cpuidle_disable_device - disables idle PM for a CPU 422 * @dev: the CPU 423 * 424 * This function must be called between cpuidle_pause_and_lock and 425 * cpuidle_resume_and_unlock when used externally. 426 */ 427 void cpuidle_disable_device(struct cpuidle_device *dev) 428 { 429 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 430 431 if (!dev || !dev->enabled) 432 return; 433 434 if (!drv || !cpuidle_curr_governor) 435 return; 436 437 dev->enabled = 0; 438 439 if (cpuidle_curr_governor->disable) 440 cpuidle_curr_governor->disable(drv, dev); 441 442 cpuidle_remove_device_sysfs(dev); 443 enabled_devices--; 444 } 445 446 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 447 448 static void __cpuidle_unregister_device(struct cpuidle_device *dev) 449 { 450 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 451 452 list_del(&dev->device_list); 453 per_cpu(cpuidle_devices, dev->cpu) = NULL; 454 module_put(drv->owner); 455 456 dev->registered = 0; 457 } 458 459 static void __cpuidle_device_init(struct cpuidle_device *dev) 460 { 461 memset(dev->states_usage, 0, sizeof(dev->states_usage)); 462 dev->last_residency = 0; 463 } 464 465 /** 466 * __cpuidle_register_device - internal register function called before register 467 * and enable routines 468 * @dev: the cpu 469 * 470 * cpuidle_lock mutex must be held before this is called 471 */ 472 static int __cpuidle_register_device(struct cpuidle_device *dev) 473 { 474 int ret; 475 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 476 477 if (!try_module_get(drv->owner)) 478 return -EINVAL; 479 480 per_cpu(cpuidle_devices, dev->cpu) = dev; 481 list_add(&dev->device_list, &cpuidle_detected_devices); 482 483 ret = cpuidle_coupled_register_device(dev); 484 if (ret) 485 __cpuidle_unregister_device(dev); 486 else 487 dev->registered = 1; 488 489 return ret; 490 } 491 492 /** 493 * cpuidle_register_device - registers a CPU's idle PM feature 494 * @dev: the cpu 495 */ 496 int cpuidle_register_device(struct cpuidle_device *dev) 497 { 498 int ret = -EBUSY; 499 500 if (!dev) 501 return -EINVAL; 502 503 mutex_lock(&cpuidle_lock); 504 505 if (dev->registered) 506 goto out_unlock; 507 508 __cpuidle_device_init(dev); 509 510 ret = __cpuidle_register_device(dev); 511 if (ret) 512 goto out_unlock; 513 514 ret = cpuidle_add_sysfs(dev); 515 if (ret) 516 goto out_unregister; 517 518 ret = cpuidle_enable_device(dev); 519 if (ret) 520 goto out_sysfs; 521 522 cpuidle_install_idle_handler(); 523 524 out_unlock: 525 mutex_unlock(&cpuidle_lock); 526 527 return ret; 528 529 out_sysfs: 530 cpuidle_remove_sysfs(dev); 531 out_unregister: 532 __cpuidle_unregister_device(dev); 533 goto out_unlock; 534 } 535 536 EXPORT_SYMBOL_GPL(cpuidle_register_device); 537 538 /** 539 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 540 * @dev: the cpu 541 */ 542 void cpuidle_unregister_device(struct cpuidle_device *dev) 543 { 544 if (!dev || dev->registered == 0) 545 return; 546 547 cpuidle_pause_and_lock(); 548 549 cpuidle_disable_device(dev); 550 551 cpuidle_remove_sysfs(dev); 552 553 __cpuidle_unregister_device(dev); 554 555 cpuidle_coupled_unregister_device(dev); 556 557 cpuidle_resume_and_unlock(); 558 } 559 560 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 561 562 /** 563 * cpuidle_unregister: unregister a driver and the devices. This function 564 * can be used only if the driver has been previously registered through 565 * the cpuidle_register function. 566 * 567 * @drv: a valid pointer to a struct cpuidle_driver 568 */ 569 void cpuidle_unregister(struct cpuidle_driver *drv) 570 { 571 int cpu; 572 struct cpuidle_device *device; 573 574 for_each_cpu(cpu, drv->cpumask) { 575 device = &per_cpu(cpuidle_dev, cpu); 576 cpuidle_unregister_device(device); 577 } 578 579 cpuidle_unregister_driver(drv); 580 } 581 EXPORT_SYMBOL_GPL(cpuidle_unregister); 582 583 /** 584 * cpuidle_register: registers the driver and the cpu devices with the 585 * coupled_cpus passed as parameter. This function is used for all common 586 * initialization pattern there are in the arch specific drivers. The 587 * devices is globally defined in this file. 588 * 589 * @drv : a valid pointer to a struct cpuidle_driver 590 * @coupled_cpus: a cpumask for the coupled states 591 * 592 * Returns 0 on success, < 0 otherwise 593 */ 594 int cpuidle_register(struct cpuidle_driver *drv, 595 const struct cpumask *const coupled_cpus) 596 { 597 int ret, cpu; 598 struct cpuidle_device *device; 599 600 ret = cpuidle_register_driver(drv); 601 if (ret) { 602 pr_err("failed to register cpuidle driver\n"); 603 return ret; 604 } 605 606 for_each_cpu(cpu, drv->cpumask) { 607 device = &per_cpu(cpuidle_dev, cpu); 608 device->cpu = cpu; 609 610 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 611 /* 612 * On multiplatform for ARM, the coupled idle states could be 613 * enabled in the kernel even if the cpuidle driver does not 614 * use it. Note, coupled_cpus is a struct copy. 615 */ 616 if (coupled_cpus) 617 device->coupled_cpus = *coupled_cpus; 618 #endif 619 ret = cpuidle_register_device(device); 620 if (!ret) 621 continue; 622 623 pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 624 625 cpuidle_unregister(drv); 626 break; 627 } 628 629 return ret; 630 } 631 EXPORT_SYMBOL_GPL(cpuidle_register); 632 633 #ifdef CONFIG_SMP 634 635 /* 636 * This function gets called when a part of the kernel has a new latency 637 * requirement. This means we need to get all processors out of their C-state, 638 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 639 * wakes them all right up. 640 */ 641 static int cpuidle_latency_notify(struct notifier_block *b, 642 unsigned long l, void *v) 643 { 644 wake_up_all_idle_cpus(); 645 return NOTIFY_OK; 646 } 647 648 static struct notifier_block cpuidle_latency_notifier = { 649 .notifier_call = cpuidle_latency_notify, 650 }; 651 652 static inline void latency_notifier_init(struct notifier_block *n) 653 { 654 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 655 } 656 657 #else /* CONFIG_SMP */ 658 659 #define latency_notifier_init(x) do { } while (0) 660 661 #endif /* CONFIG_SMP */ 662 663 /** 664 * cpuidle_init - core initializer 665 */ 666 static int __init cpuidle_init(void) 667 { 668 int ret; 669 670 if (cpuidle_disabled()) 671 return -ENODEV; 672 673 ret = cpuidle_add_interface(cpu_subsys.dev_root); 674 if (ret) 675 return ret; 676 677 latency_notifier_init(&cpuidle_latency_notifier); 678 679 return 0; 680 } 681 682 module_param(off, int, 0444); 683 core_initcall(cpuidle_init); 684