1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/mutex.h> 13 #include <linux/sched.h> 14 #include <linux/notifier.h> 15 #include <linux/pm_qos.h> 16 #include <linux/cpu.h> 17 #include <linux/cpuidle.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/module.h> 21 #include <trace/events/power.h> 22 23 #include "cpuidle.h" 24 25 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 26 27 DEFINE_MUTEX(cpuidle_lock); 28 LIST_HEAD(cpuidle_detected_devices); 29 30 static int enabled_devices; 31 static int off __read_mostly; 32 static int initialized __read_mostly; 33 34 int cpuidle_disabled(void) 35 { 36 return off; 37 } 38 void disable_cpuidle(void) 39 { 40 off = 1; 41 } 42 43 static int __cpuidle_register_device(struct cpuidle_device *dev); 44 45 static inline int cpuidle_enter(struct cpuidle_device *dev, 46 struct cpuidle_driver *drv, int index) 47 { 48 struct cpuidle_state *target_state = &drv->states[index]; 49 return target_state->enter(dev, drv, index); 50 } 51 52 static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 53 struct cpuidle_driver *drv, int index) 54 { 55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 56 } 57 58 typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 59 struct cpuidle_driver *drv, int index); 60 61 static cpuidle_enter_t cpuidle_enter_ops; 62 63 /** 64 * cpuidle_play_dead - cpu off-lining 65 * 66 * Returns in case of an error or no driver 67 */ 68 int cpuidle_play_dead(void) 69 { 70 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 71 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 72 int i, dead_state = -1; 73 int power_usage = INT_MAX; 74 75 if (!drv) 76 return -ENODEV; 77 78 /* Find lowest-power state that supports long-term idle */ 79 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 80 struct cpuidle_state *s = &drv->states[i]; 81 82 if (s->power_usage < power_usage && s->enter_dead) { 83 power_usage = s->power_usage; 84 dead_state = i; 85 } 86 } 87 88 if (dead_state != -1) 89 return drv->states[dead_state].enter_dead(dev, dead_state); 90 91 return -ENODEV; 92 } 93 94 /** 95 * cpuidle_enter_state - enter the state and update stats 96 * @dev: cpuidle device for this cpu 97 * @drv: cpuidle driver for this cpu 98 * @next_state: index into drv->states of the state to enter 99 */ 100 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 101 int next_state) 102 { 103 int entered_state; 104 105 entered_state = cpuidle_enter_ops(dev, drv, next_state); 106 107 if (entered_state >= 0) { 108 /* Update cpuidle counters */ 109 /* This can be moved to within driver enter routine 110 * but that results in multiple copies of same code. 111 */ 112 dev->states_usage[entered_state].time += dev->last_residency; 113 dev->states_usage[entered_state].usage++; 114 } else { 115 dev->last_residency = 0; 116 } 117 118 return entered_state; 119 } 120 121 /** 122 * cpuidle_idle_call - the main idle loop 123 * 124 * NOTE: no locks or semaphores should be used here 125 * return non-zero on failure 126 */ 127 int cpuidle_idle_call(void) 128 { 129 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 130 struct cpuidle_driver *drv; 131 int next_state, entered_state; 132 133 if (off) 134 return -ENODEV; 135 136 if (!initialized) 137 return -ENODEV; 138 139 /* check if the device is ready */ 140 if (!dev || !dev->enabled) 141 return -EBUSY; 142 143 drv = cpuidle_get_cpu_driver(dev); 144 145 /* ask the governor for the next state */ 146 next_state = cpuidle_curr_governor->select(drv, dev); 147 if (need_resched()) { 148 dev->last_residency = 0; 149 /* give the governor an opportunity to reflect on the outcome */ 150 if (cpuidle_curr_governor->reflect) 151 cpuidle_curr_governor->reflect(dev, next_state); 152 local_irq_enable(); 153 return 0; 154 } 155 156 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 157 trace_cpu_idle_rcuidle(next_state, dev->cpu); 158 159 if (cpuidle_state_is_coupled(dev, drv, next_state)) 160 entered_state = cpuidle_enter_state_coupled(dev, drv, 161 next_state); 162 else 163 entered_state = cpuidle_enter_state(dev, drv, next_state); 164 165 trace_power_end_rcuidle(dev->cpu); 166 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 167 168 /* give the governor an opportunity to reflect on the outcome */ 169 if (cpuidle_curr_governor->reflect) 170 cpuidle_curr_governor->reflect(dev, entered_state); 171 172 return 0; 173 } 174 175 /** 176 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 177 */ 178 void cpuidle_install_idle_handler(void) 179 { 180 if (enabled_devices) { 181 /* Make sure all changes finished before we switch to new idle */ 182 smp_wmb(); 183 initialized = 1; 184 } 185 } 186 187 /** 188 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 189 */ 190 void cpuidle_uninstall_idle_handler(void) 191 { 192 if (enabled_devices) { 193 initialized = 0; 194 kick_all_cpus_sync(); 195 } 196 } 197 198 /** 199 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 200 */ 201 void cpuidle_pause_and_lock(void) 202 { 203 mutex_lock(&cpuidle_lock); 204 cpuidle_uninstall_idle_handler(); 205 } 206 207 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 208 209 /** 210 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 211 */ 212 void cpuidle_resume_and_unlock(void) 213 { 214 cpuidle_install_idle_handler(); 215 mutex_unlock(&cpuidle_lock); 216 } 217 218 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 219 220 /* Currently used in suspend/resume path to suspend cpuidle */ 221 void cpuidle_pause(void) 222 { 223 mutex_lock(&cpuidle_lock); 224 cpuidle_uninstall_idle_handler(); 225 mutex_unlock(&cpuidle_lock); 226 } 227 228 /* Currently used in suspend/resume path to resume cpuidle */ 229 void cpuidle_resume(void) 230 { 231 mutex_lock(&cpuidle_lock); 232 cpuidle_install_idle_handler(); 233 mutex_unlock(&cpuidle_lock); 234 } 235 236 /** 237 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 238 * @dev: pointer to a valid cpuidle_device object 239 * @drv: pointer to a valid cpuidle_driver object 240 * @index: index of the target cpuidle state. 241 */ 242 int cpuidle_wrap_enter(struct cpuidle_device *dev, 243 struct cpuidle_driver *drv, int index, 244 int (*enter)(struct cpuidle_device *dev, 245 struct cpuidle_driver *drv, int index)) 246 { 247 ktime_t time_start, time_end; 248 s64 diff; 249 250 time_start = ktime_get(); 251 252 index = enter(dev, drv, index); 253 254 time_end = ktime_get(); 255 256 local_irq_enable(); 257 258 diff = ktime_to_us(ktime_sub(time_end, time_start)); 259 if (diff > INT_MAX) 260 diff = INT_MAX; 261 262 dev->last_residency = (int) diff; 263 264 return index; 265 } 266 267 #ifdef CONFIG_ARCH_HAS_CPU_RELAX 268 static int poll_idle(struct cpuidle_device *dev, 269 struct cpuidle_driver *drv, int index) 270 { 271 ktime_t t1, t2; 272 s64 diff; 273 274 t1 = ktime_get(); 275 local_irq_enable(); 276 while (!need_resched()) 277 cpu_relax(); 278 279 t2 = ktime_get(); 280 diff = ktime_to_us(ktime_sub(t2, t1)); 281 if (diff > INT_MAX) 282 diff = INT_MAX; 283 284 dev->last_residency = (int) diff; 285 286 return index; 287 } 288 289 static void poll_idle_init(struct cpuidle_driver *drv) 290 { 291 struct cpuidle_state *state = &drv->states[0]; 292 293 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 294 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 295 state->exit_latency = 0; 296 state->target_residency = 0; 297 state->power_usage = -1; 298 state->flags = 0; 299 state->enter = poll_idle; 300 state->disabled = false; 301 } 302 #else 303 static void poll_idle_init(struct cpuidle_driver *drv) {} 304 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 305 306 /** 307 * cpuidle_enable_device - enables idle PM for a CPU 308 * @dev: the CPU 309 * 310 * This function must be called between cpuidle_pause_and_lock and 311 * cpuidle_resume_and_unlock when used externally. 312 */ 313 int cpuidle_enable_device(struct cpuidle_device *dev) 314 { 315 int ret, i; 316 struct cpuidle_driver *drv; 317 318 if (!dev) 319 return -EINVAL; 320 321 if (dev->enabled) 322 return 0; 323 324 drv = cpuidle_get_cpu_driver(dev); 325 326 if (!drv || !cpuidle_curr_governor) 327 return -EIO; 328 329 if (!dev->state_count) 330 dev->state_count = drv->state_count; 331 332 if (dev->registered == 0) { 333 ret = __cpuidle_register_device(dev); 334 if (ret) 335 return ret; 336 } 337 338 cpuidle_enter_ops = drv->en_core_tk_irqen ? 339 cpuidle_enter_tk : cpuidle_enter; 340 341 poll_idle_init(drv); 342 343 ret = cpuidle_add_device_sysfs(dev); 344 if (ret) 345 return ret; 346 347 if (cpuidle_curr_governor->enable && 348 (ret = cpuidle_curr_governor->enable(drv, dev))) 349 goto fail_sysfs; 350 351 for (i = 0; i < dev->state_count; i++) { 352 dev->states_usage[i].usage = 0; 353 dev->states_usage[i].time = 0; 354 } 355 dev->last_residency = 0; 356 357 smp_wmb(); 358 359 dev->enabled = 1; 360 361 enabled_devices++; 362 return 0; 363 364 fail_sysfs: 365 cpuidle_remove_device_sysfs(dev); 366 367 return ret; 368 } 369 370 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 371 372 /** 373 * cpuidle_disable_device - disables idle PM for a CPU 374 * @dev: the CPU 375 * 376 * This function must be called between cpuidle_pause_and_lock and 377 * cpuidle_resume_and_unlock when used externally. 378 */ 379 void cpuidle_disable_device(struct cpuidle_device *dev) 380 { 381 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 382 383 if (!dev || !dev->enabled) 384 return; 385 386 if (!drv || !cpuidle_curr_governor) 387 return; 388 389 dev->enabled = 0; 390 391 if (cpuidle_curr_governor->disable) 392 cpuidle_curr_governor->disable(drv, dev); 393 394 cpuidle_remove_device_sysfs(dev); 395 enabled_devices--; 396 } 397 398 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 399 400 /** 401 * __cpuidle_register_device - internal register function called before register 402 * and enable routines 403 * @dev: the cpu 404 * 405 * cpuidle_lock mutex must be held before this is called 406 */ 407 static int __cpuidle_register_device(struct cpuidle_device *dev) 408 { 409 int ret; 410 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 411 412 if (!try_module_get(drv->owner)) 413 return -EINVAL; 414 415 per_cpu(cpuidle_devices, dev->cpu) = dev; 416 list_add(&dev->device_list, &cpuidle_detected_devices); 417 ret = cpuidle_add_sysfs(dev); 418 if (ret) 419 goto err_sysfs; 420 421 ret = cpuidle_coupled_register_device(dev); 422 if (ret) 423 goto err_coupled; 424 425 dev->registered = 1; 426 return 0; 427 428 err_coupled: 429 cpuidle_remove_sysfs(dev); 430 err_sysfs: 431 list_del(&dev->device_list); 432 per_cpu(cpuidle_devices, dev->cpu) = NULL; 433 module_put(drv->owner); 434 return ret; 435 } 436 437 /** 438 * cpuidle_register_device - registers a CPU's idle PM feature 439 * @dev: the cpu 440 */ 441 int cpuidle_register_device(struct cpuidle_device *dev) 442 { 443 int ret; 444 445 if (!dev) 446 return -EINVAL; 447 448 mutex_lock(&cpuidle_lock); 449 450 if ((ret = __cpuidle_register_device(dev))) { 451 mutex_unlock(&cpuidle_lock); 452 return ret; 453 } 454 455 cpuidle_enable_device(dev); 456 cpuidle_install_idle_handler(); 457 458 mutex_unlock(&cpuidle_lock); 459 460 return 0; 461 462 } 463 464 EXPORT_SYMBOL_GPL(cpuidle_register_device); 465 466 /** 467 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 468 * @dev: the cpu 469 */ 470 void cpuidle_unregister_device(struct cpuidle_device *dev) 471 { 472 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 473 474 if (dev->registered == 0) 475 return; 476 477 cpuidle_pause_and_lock(); 478 479 cpuidle_disable_device(dev); 480 481 cpuidle_remove_sysfs(dev); 482 list_del(&dev->device_list); 483 per_cpu(cpuidle_devices, dev->cpu) = NULL; 484 485 cpuidle_coupled_unregister_device(dev); 486 487 cpuidle_resume_and_unlock(); 488 489 module_put(drv->owner); 490 } 491 492 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 493 494 #ifdef CONFIG_SMP 495 496 static void smp_callback(void *v) 497 { 498 /* we already woke the CPU up, nothing more to do */ 499 } 500 501 /* 502 * This function gets called when a part of the kernel has a new latency 503 * requirement. This means we need to get all processors out of their C-state, 504 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 505 * wakes them all right up. 506 */ 507 static int cpuidle_latency_notify(struct notifier_block *b, 508 unsigned long l, void *v) 509 { 510 smp_call_function(smp_callback, NULL, 1); 511 return NOTIFY_OK; 512 } 513 514 static struct notifier_block cpuidle_latency_notifier = { 515 .notifier_call = cpuidle_latency_notify, 516 }; 517 518 static inline void latency_notifier_init(struct notifier_block *n) 519 { 520 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 521 } 522 523 #else /* CONFIG_SMP */ 524 525 #define latency_notifier_init(x) do { } while (0) 526 527 #endif /* CONFIG_SMP */ 528 529 /** 530 * cpuidle_init - core initializer 531 */ 532 static int __init cpuidle_init(void) 533 { 534 int ret; 535 536 if (cpuidle_disabled()) 537 return -ENODEV; 538 539 ret = cpuidle_add_interface(cpu_subsys.dev_root); 540 if (ret) 541 return ret; 542 543 latency_notifier_init(&cpuidle_latency_notifier); 544 545 return 0; 546 } 547 548 module_param(off, int, 0444); 549 core_initcall(cpuidle_init); 550