1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/clockchips.h> 12 #include <linux/kernel.h> 13 #include <linux/mutex.h> 14 #include <linux/sched.h> 15 #include <linux/notifier.h> 16 #include <linux/pm_qos.h> 17 #include <linux/cpu.h> 18 #include <linux/cpuidle.h> 19 #include <linux/ktime.h> 20 #include <linux/hrtimer.h> 21 #include <linux/module.h> 22 #include <linux/suspend.h> 23 #include <linux/tick.h> 24 #include <trace/events/power.h> 25 26 #include "cpuidle.h" 27 28 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 29 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 30 31 DEFINE_MUTEX(cpuidle_lock); 32 LIST_HEAD(cpuidle_detected_devices); 33 34 static int enabled_devices; 35 static int off __read_mostly; 36 static int initialized __read_mostly; 37 38 int cpuidle_disabled(void) 39 { 40 return off; 41 } 42 void disable_cpuidle(void) 43 { 44 off = 1; 45 } 46 47 bool cpuidle_not_available(struct cpuidle_driver *drv, 48 struct cpuidle_device *dev) 49 { 50 return off || !initialized || !drv || !dev || !dev->enabled; 51 } 52 53 /** 54 * cpuidle_play_dead - cpu off-lining 55 * 56 * Returns in case of an error or no driver 57 */ 58 int cpuidle_play_dead(void) 59 { 60 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 61 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 62 int i; 63 64 if (!drv) 65 return -ENODEV; 66 67 /* Find lowest-power state that supports long-term idle */ 68 for (i = drv->state_count - 1; i >= 0; i--) 69 if (drv->states[i].enter_dead) 70 return drv->states[i].enter_dead(dev, i); 71 72 return -ENODEV; 73 } 74 75 static int find_deepest_state(struct cpuidle_driver *drv, 76 struct cpuidle_device *dev, 77 unsigned int max_latency, 78 unsigned int forbidden_flags, 79 bool freeze) 80 { 81 unsigned int latency_req = 0; 82 int i, ret = -ENXIO; 83 84 for (i = 0; i < drv->state_count; i++) { 85 struct cpuidle_state *s = &drv->states[i]; 86 struct cpuidle_state_usage *su = &dev->states_usage[i]; 87 88 if (s->disabled || su->disable || s->exit_latency <= latency_req 89 || s->exit_latency > max_latency 90 || (s->flags & forbidden_flags) 91 || (freeze && !s->enter_freeze)) 92 continue; 93 94 latency_req = s->exit_latency; 95 ret = i; 96 } 97 return ret; 98 } 99 100 /** 101 * cpuidle_find_deepest_state - Find the deepest available idle state. 102 * @drv: cpuidle driver for the given CPU. 103 * @dev: cpuidle device for the given CPU. 104 */ 105 int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 106 struct cpuidle_device *dev) 107 { 108 return find_deepest_state(drv, dev, UINT_MAX, 0, false); 109 } 110 111 static void enter_freeze_proper(struct cpuidle_driver *drv, 112 struct cpuidle_device *dev, int index) 113 { 114 tick_freeze(); 115 /* 116 * The state used here cannot be a "coupled" one, because the "coupled" 117 * cpuidle mechanism enables interrupts and doing that with timekeeping 118 * suspended is generally unsafe. 119 */ 120 drv->states[index].enter_freeze(dev, drv, index); 121 WARN_ON(!irqs_disabled()); 122 /* 123 * timekeeping_resume() that will be called by tick_unfreeze() for the 124 * last CPU executing it calls functions containing RCU read-side 125 * critical sections, so tell RCU about that. 126 */ 127 RCU_NONIDLE(tick_unfreeze()); 128 } 129 130 /** 131 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 132 * @drv: cpuidle driver for the given CPU. 133 * @dev: cpuidle device for the given CPU. 134 * 135 * If there are states with the ->enter_freeze callback, find the deepest of 136 * them and enter it with frozen tick. 137 */ 138 int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) 139 { 140 int index; 141 142 /* 143 * Find the deepest state with ->enter_freeze present, which guarantees 144 * that interrupts won't be enabled when it exits and allows the tick to 145 * be frozen safely. 146 */ 147 index = find_deepest_state(drv, dev, UINT_MAX, 0, true); 148 if (index >= 0) 149 enter_freeze_proper(drv, dev, index); 150 151 return index; 152 } 153 154 /** 155 * cpuidle_enter_state - enter the state and update stats 156 * @dev: cpuidle device for this cpu 157 * @drv: cpuidle driver for this cpu 158 * @index: index into the states table in @drv of the state to enter 159 */ 160 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 161 int index) 162 { 163 int entered_state; 164 165 struct cpuidle_state *target_state = &drv->states[index]; 166 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 167 ktime_t time_start, time_end; 168 s64 diff; 169 170 /* 171 * Tell the time framework to switch to a broadcast timer because our 172 * local timer will be shut down. If a local timer is used from another 173 * CPU as a broadcast timer, this call may fail if it is not available. 174 */ 175 if (broadcast && tick_broadcast_enter()) { 176 index = find_deepest_state(drv, dev, target_state->exit_latency, 177 CPUIDLE_FLAG_TIMER_STOP, false); 178 if (index < 0) { 179 default_idle_call(); 180 return -EBUSY; 181 } 182 target_state = &drv->states[index]; 183 } 184 185 /* Take note of the planned idle state. */ 186 sched_idle_set_state(target_state); 187 188 trace_cpu_idle_rcuidle(index, dev->cpu); 189 time_start = ktime_get(); 190 191 entered_state = target_state->enter(dev, drv, index); 192 193 time_end = ktime_get(); 194 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 195 196 /* The cpu is no longer idle or about to enter idle. */ 197 sched_idle_set_state(NULL); 198 199 if (broadcast) { 200 if (WARN_ON_ONCE(!irqs_disabled())) 201 local_irq_disable(); 202 203 tick_broadcast_exit(); 204 } 205 206 if (!cpuidle_state_is_coupled(dev, drv, entered_state)) 207 local_irq_enable(); 208 209 diff = ktime_to_us(ktime_sub(time_end, time_start)); 210 if (diff > INT_MAX) 211 diff = INT_MAX; 212 213 dev->last_residency = (int) diff; 214 215 if (entered_state >= 0) { 216 /* Update cpuidle counters */ 217 /* This can be moved to within driver enter routine 218 * but that results in multiple copies of same code. 219 */ 220 dev->states_usage[entered_state].time += dev->last_residency; 221 dev->states_usage[entered_state].usage++; 222 } else { 223 dev->last_residency = 0; 224 } 225 226 return entered_state; 227 } 228 229 /** 230 * cpuidle_select - ask the cpuidle framework to choose an idle state 231 * 232 * @drv: the cpuidle driver 233 * @dev: the cpuidle device 234 * 235 * Returns the index of the idle state. 236 */ 237 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 238 { 239 return cpuidle_curr_governor->select(drv, dev); 240 } 241 242 /** 243 * cpuidle_enter - enter into the specified idle state 244 * 245 * @drv: the cpuidle driver tied with the cpu 246 * @dev: the cpuidle device 247 * @index: the index in the idle state table 248 * 249 * Returns the index in the idle state, < 0 in case of error. 250 * The error code depends on the backend driver 251 */ 252 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 253 int index) 254 { 255 if (cpuidle_state_is_coupled(dev, drv, index)) 256 return cpuidle_enter_state_coupled(dev, drv, index); 257 return cpuidle_enter_state(dev, drv, index); 258 } 259 260 /** 261 * cpuidle_reflect - tell the underlying governor what was the state 262 * we were in 263 * 264 * @dev : the cpuidle device 265 * @index: the index in the idle state table 266 * 267 */ 268 void cpuidle_reflect(struct cpuidle_device *dev, int index) 269 { 270 if (cpuidle_curr_governor->reflect && index >= 0) 271 cpuidle_curr_governor->reflect(dev, index); 272 } 273 274 /** 275 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 276 */ 277 void cpuidle_install_idle_handler(void) 278 { 279 if (enabled_devices) { 280 /* Make sure all changes finished before we switch to new idle */ 281 smp_wmb(); 282 initialized = 1; 283 } 284 } 285 286 /** 287 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 288 */ 289 void cpuidle_uninstall_idle_handler(void) 290 { 291 if (enabled_devices) { 292 initialized = 0; 293 wake_up_all_idle_cpus(); 294 } 295 296 /* 297 * Make sure external observers (such as the scheduler) 298 * are done looking at pointed idle states. 299 */ 300 synchronize_rcu(); 301 } 302 303 /** 304 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 305 */ 306 void cpuidle_pause_and_lock(void) 307 { 308 mutex_lock(&cpuidle_lock); 309 cpuidle_uninstall_idle_handler(); 310 } 311 312 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 313 314 /** 315 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 316 */ 317 void cpuidle_resume_and_unlock(void) 318 { 319 cpuidle_install_idle_handler(); 320 mutex_unlock(&cpuidle_lock); 321 } 322 323 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 324 325 /* Currently used in suspend/resume path to suspend cpuidle */ 326 void cpuidle_pause(void) 327 { 328 mutex_lock(&cpuidle_lock); 329 cpuidle_uninstall_idle_handler(); 330 mutex_unlock(&cpuidle_lock); 331 } 332 333 /* Currently used in suspend/resume path to resume cpuidle */ 334 void cpuidle_resume(void) 335 { 336 mutex_lock(&cpuidle_lock); 337 cpuidle_install_idle_handler(); 338 mutex_unlock(&cpuidle_lock); 339 } 340 341 /** 342 * cpuidle_enable_device - enables idle PM for a CPU 343 * @dev: the CPU 344 * 345 * This function must be called between cpuidle_pause_and_lock and 346 * cpuidle_resume_and_unlock when used externally. 347 */ 348 int cpuidle_enable_device(struct cpuidle_device *dev) 349 { 350 int ret; 351 struct cpuidle_driver *drv; 352 353 if (!dev) 354 return -EINVAL; 355 356 if (dev->enabled) 357 return 0; 358 359 drv = cpuidle_get_cpu_driver(dev); 360 361 if (!drv || !cpuidle_curr_governor) 362 return -EIO; 363 364 if (!dev->registered) 365 return -EINVAL; 366 367 ret = cpuidle_add_device_sysfs(dev); 368 if (ret) 369 return ret; 370 371 if (cpuidle_curr_governor->enable && 372 (ret = cpuidle_curr_governor->enable(drv, dev))) 373 goto fail_sysfs; 374 375 smp_wmb(); 376 377 dev->enabled = 1; 378 379 enabled_devices++; 380 return 0; 381 382 fail_sysfs: 383 cpuidle_remove_device_sysfs(dev); 384 385 return ret; 386 } 387 388 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 389 390 /** 391 * cpuidle_disable_device - disables idle PM for a CPU 392 * @dev: the CPU 393 * 394 * This function must be called between cpuidle_pause_and_lock and 395 * cpuidle_resume_and_unlock when used externally. 396 */ 397 void cpuidle_disable_device(struct cpuidle_device *dev) 398 { 399 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 400 401 if (!dev || !dev->enabled) 402 return; 403 404 if (!drv || !cpuidle_curr_governor) 405 return; 406 407 dev->enabled = 0; 408 409 if (cpuidle_curr_governor->disable) 410 cpuidle_curr_governor->disable(drv, dev); 411 412 cpuidle_remove_device_sysfs(dev); 413 enabled_devices--; 414 } 415 416 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 417 418 static void __cpuidle_unregister_device(struct cpuidle_device *dev) 419 { 420 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 421 422 list_del(&dev->device_list); 423 per_cpu(cpuidle_devices, dev->cpu) = NULL; 424 module_put(drv->owner); 425 } 426 427 static void __cpuidle_device_init(struct cpuidle_device *dev) 428 { 429 memset(dev->states_usage, 0, sizeof(dev->states_usage)); 430 dev->last_residency = 0; 431 } 432 433 /** 434 * __cpuidle_register_device - internal register function called before register 435 * and enable routines 436 * @dev: the cpu 437 * 438 * cpuidle_lock mutex must be held before this is called 439 */ 440 static int __cpuidle_register_device(struct cpuidle_device *dev) 441 { 442 int ret; 443 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 444 445 if (!try_module_get(drv->owner)) 446 return -EINVAL; 447 448 per_cpu(cpuidle_devices, dev->cpu) = dev; 449 list_add(&dev->device_list, &cpuidle_detected_devices); 450 451 ret = cpuidle_coupled_register_device(dev); 452 if (ret) 453 __cpuidle_unregister_device(dev); 454 else 455 dev->registered = 1; 456 457 return ret; 458 } 459 460 /** 461 * cpuidle_register_device - registers a CPU's idle PM feature 462 * @dev: the cpu 463 */ 464 int cpuidle_register_device(struct cpuidle_device *dev) 465 { 466 int ret = -EBUSY; 467 468 if (!dev) 469 return -EINVAL; 470 471 mutex_lock(&cpuidle_lock); 472 473 if (dev->registered) 474 goto out_unlock; 475 476 __cpuidle_device_init(dev); 477 478 ret = __cpuidle_register_device(dev); 479 if (ret) 480 goto out_unlock; 481 482 ret = cpuidle_add_sysfs(dev); 483 if (ret) 484 goto out_unregister; 485 486 ret = cpuidle_enable_device(dev); 487 if (ret) 488 goto out_sysfs; 489 490 cpuidle_install_idle_handler(); 491 492 out_unlock: 493 mutex_unlock(&cpuidle_lock); 494 495 return ret; 496 497 out_sysfs: 498 cpuidle_remove_sysfs(dev); 499 out_unregister: 500 __cpuidle_unregister_device(dev); 501 goto out_unlock; 502 } 503 504 EXPORT_SYMBOL_GPL(cpuidle_register_device); 505 506 /** 507 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 508 * @dev: the cpu 509 */ 510 void cpuidle_unregister_device(struct cpuidle_device *dev) 511 { 512 if (!dev || dev->registered == 0) 513 return; 514 515 cpuidle_pause_and_lock(); 516 517 cpuidle_disable_device(dev); 518 519 cpuidle_remove_sysfs(dev); 520 521 __cpuidle_unregister_device(dev); 522 523 cpuidle_coupled_unregister_device(dev); 524 525 cpuidle_resume_and_unlock(); 526 } 527 528 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 529 530 /** 531 * cpuidle_unregister: unregister a driver and the devices. This function 532 * can be used only if the driver has been previously registered through 533 * the cpuidle_register function. 534 * 535 * @drv: a valid pointer to a struct cpuidle_driver 536 */ 537 void cpuidle_unregister(struct cpuidle_driver *drv) 538 { 539 int cpu; 540 struct cpuidle_device *device; 541 542 for_each_cpu(cpu, drv->cpumask) { 543 device = &per_cpu(cpuidle_dev, cpu); 544 cpuidle_unregister_device(device); 545 } 546 547 cpuidle_unregister_driver(drv); 548 } 549 EXPORT_SYMBOL_GPL(cpuidle_unregister); 550 551 /** 552 * cpuidle_register: registers the driver and the cpu devices with the 553 * coupled_cpus passed as parameter. This function is used for all common 554 * initialization pattern there are in the arch specific drivers. The 555 * devices is globally defined in this file. 556 * 557 * @drv : a valid pointer to a struct cpuidle_driver 558 * @coupled_cpus: a cpumask for the coupled states 559 * 560 * Returns 0 on success, < 0 otherwise 561 */ 562 int cpuidle_register(struct cpuidle_driver *drv, 563 const struct cpumask *const coupled_cpus) 564 { 565 int ret, cpu; 566 struct cpuidle_device *device; 567 568 ret = cpuidle_register_driver(drv); 569 if (ret) { 570 pr_err("failed to register cpuidle driver\n"); 571 return ret; 572 } 573 574 for_each_cpu(cpu, drv->cpumask) { 575 device = &per_cpu(cpuidle_dev, cpu); 576 device->cpu = cpu; 577 578 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 579 /* 580 * On multiplatform for ARM, the coupled idle states could be 581 * enabled in the kernel even if the cpuidle driver does not 582 * use it. Note, coupled_cpus is a struct copy. 583 */ 584 if (coupled_cpus) 585 device->coupled_cpus = *coupled_cpus; 586 #endif 587 ret = cpuidle_register_device(device); 588 if (!ret) 589 continue; 590 591 pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 592 593 cpuidle_unregister(drv); 594 break; 595 } 596 597 return ret; 598 } 599 EXPORT_SYMBOL_GPL(cpuidle_register); 600 601 #ifdef CONFIG_SMP 602 603 /* 604 * This function gets called when a part of the kernel has a new latency 605 * requirement. This means we need to get all processors out of their C-state, 606 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 607 * wakes them all right up. 608 */ 609 static int cpuidle_latency_notify(struct notifier_block *b, 610 unsigned long l, void *v) 611 { 612 wake_up_all_idle_cpus(); 613 return NOTIFY_OK; 614 } 615 616 static struct notifier_block cpuidle_latency_notifier = { 617 .notifier_call = cpuidle_latency_notify, 618 }; 619 620 static inline void latency_notifier_init(struct notifier_block *n) 621 { 622 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 623 } 624 625 #else /* CONFIG_SMP */ 626 627 #define latency_notifier_init(x) do { } while (0) 628 629 #endif /* CONFIG_SMP */ 630 631 /** 632 * cpuidle_init - core initializer 633 */ 634 static int __init cpuidle_init(void) 635 { 636 int ret; 637 638 if (cpuidle_disabled()) 639 return -ENODEV; 640 641 ret = cpuidle_add_interface(cpu_subsys.dev_root); 642 if (ret) 643 return ret; 644 645 latency_notifier_init(&cpuidle_latency_notifier); 646 647 return 0; 648 } 649 650 module_param(off, int, 0444); 651 core_initcall(cpuidle_init); 652