1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/mutex.h> 13 #include <linux/sched.h> 14 #include <linux/notifier.h> 15 #include <linux/pm_qos.h> 16 #include <linux/cpu.h> 17 #include <linux/cpuidle.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/module.h> 21 #include <trace/events/power.h> 22 23 #include "cpuidle.h" 24 25 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 26 27 DEFINE_MUTEX(cpuidle_lock); 28 LIST_HEAD(cpuidle_detected_devices); 29 30 static int enabled_devices; 31 static int off __read_mostly; 32 static int initialized __read_mostly; 33 34 int cpuidle_disabled(void) 35 { 36 return off; 37 } 38 void disable_cpuidle(void) 39 { 40 off = 1; 41 } 42 43 static int __cpuidle_register_device(struct cpuidle_device *dev); 44 45 static inline int cpuidle_enter(struct cpuidle_device *dev, 46 struct cpuidle_driver *drv, int index) 47 { 48 struct cpuidle_state *target_state = &drv->states[index]; 49 return target_state->enter(dev, drv, index); 50 } 51 52 static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 53 struct cpuidle_driver *drv, int index) 54 { 55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 56 } 57 58 typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 59 struct cpuidle_driver *drv, int index); 60 61 static cpuidle_enter_t cpuidle_enter_ops; 62 63 /** 64 * cpuidle_play_dead - cpu off-lining 65 * 66 * Returns in case of an error or no driver 67 */ 68 int cpuidle_play_dead(void) 69 { 70 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 71 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 72 int i; 73 74 if (!drv) 75 return -ENODEV; 76 77 /* Find lowest-power state that supports long-term idle */ 78 for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) 79 if (drv->states[i].enter_dead) 80 return drv->states[i].enter_dead(dev, i); 81 82 return -ENODEV; 83 } 84 85 /** 86 * cpuidle_enter_state - enter the state and update stats 87 * @dev: cpuidle device for this cpu 88 * @drv: cpuidle driver for this cpu 89 * @next_state: index into drv->states of the state to enter 90 */ 91 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 92 int next_state) 93 { 94 int entered_state; 95 96 entered_state = cpuidle_enter_ops(dev, drv, next_state); 97 98 if (entered_state >= 0) { 99 /* Update cpuidle counters */ 100 /* This can be moved to within driver enter routine 101 * but that results in multiple copies of same code. 102 */ 103 dev->states_usage[entered_state].time += dev->last_residency; 104 dev->states_usage[entered_state].usage++; 105 } else { 106 dev->last_residency = 0; 107 } 108 109 return entered_state; 110 } 111 112 /** 113 * cpuidle_idle_call - the main idle loop 114 * 115 * NOTE: no locks or semaphores should be used here 116 * return non-zero on failure 117 */ 118 int cpuidle_idle_call(void) 119 { 120 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 121 struct cpuidle_driver *drv; 122 int next_state, entered_state; 123 124 if (off) 125 return -ENODEV; 126 127 if (!initialized) 128 return -ENODEV; 129 130 /* check if the device is ready */ 131 if (!dev || !dev->enabled) 132 return -EBUSY; 133 134 drv = cpuidle_get_cpu_driver(dev); 135 136 /* ask the governor for the next state */ 137 next_state = cpuidle_curr_governor->select(drv, dev); 138 if (need_resched()) { 139 dev->last_residency = 0; 140 /* give the governor an opportunity to reflect on the outcome */ 141 if (cpuidle_curr_governor->reflect) 142 cpuidle_curr_governor->reflect(dev, next_state); 143 local_irq_enable(); 144 return 0; 145 } 146 147 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 148 trace_cpu_idle_rcuidle(next_state, dev->cpu); 149 150 if (cpuidle_state_is_coupled(dev, drv, next_state)) 151 entered_state = cpuidle_enter_state_coupled(dev, drv, 152 next_state); 153 else 154 entered_state = cpuidle_enter_state(dev, drv, next_state); 155 156 trace_power_end_rcuidle(dev->cpu); 157 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 158 159 /* give the governor an opportunity to reflect on the outcome */ 160 if (cpuidle_curr_governor->reflect) 161 cpuidle_curr_governor->reflect(dev, entered_state); 162 163 return 0; 164 } 165 166 /** 167 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 168 */ 169 void cpuidle_install_idle_handler(void) 170 { 171 if (enabled_devices) { 172 /* Make sure all changes finished before we switch to new idle */ 173 smp_wmb(); 174 initialized = 1; 175 } 176 } 177 178 /** 179 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 180 */ 181 void cpuidle_uninstall_idle_handler(void) 182 { 183 if (enabled_devices) { 184 initialized = 0; 185 kick_all_cpus_sync(); 186 } 187 } 188 189 /** 190 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 191 */ 192 void cpuidle_pause_and_lock(void) 193 { 194 mutex_lock(&cpuidle_lock); 195 cpuidle_uninstall_idle_handler(); 196 } 197 198 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 199 200 /** 201 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 202 */ 203 void cpuidle_resume_and_unlock(void) 204 { 205 cpuidle_install_idle_handler(); 206 mutex_unlock(&cpuidle_lock); 207 } 208 209 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 210 211 /* Currently used in suspend/resume path to suspend cpuidle */ 212 void cpuidle_pause(void) 213 { 214 mutex_lock(&cpuidle_lock); 215 cpuidle_uninstall_idle_handler(); 216 mutex_unlock(&cpuidle_lock); 217 } 218 219 /* Currently used in suspend/resume path to resume cpuidle */ 220 void cpuidle_resume(void) 221 { 222 mutex_lock(&cpuidle_lock); 223 cpuidle_install_idle_handler(); 224 mutex_unlock(&cpuidle_lock); 225 } 226 227 /** 228 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 229 * @dev: pointer to a valid cpuidle_device object 230 * @drv: pointer to a valid cpuidle_driver object 231 * @index: index of the target cpuidle state. 232 */ 233 int cpuidle_wrap_enter(struct cpuidle_device *dev, 234 struct cpuidle_driver *drv, int index, 235 int (*enter)(struct cpuidle_device *dev, 236 struct cpuidle_driver *drv, int index)) 237 { 238 ktime_t time_start, time_end; 239 s64 diff; 240 241 time_start = ktime_get(); 242 243 index = enter(dev, drv, index); 244 245 time_end = ktime_get(); 246 247 local_irq_enable(); 248 249 diff = ktime_to_us(ktime_sub(time_end, time_start)); 250 if (diff > INT_MAX) 251 diff = INT_MAX; 252 253 dev->last_residency = (int) diff; 254 255 return index; 256 } 257 258 #ifdef CONFIG_ARCH_HAS_CPU_RELAX 259 static int poll_idle(struct cpuidle_device *dev, 260 struct cpuidle_driver *drv, int index) 261 { 262 ktime_t t1, t2; 263 s64 diff; 264 265 t1 = ktime_get(); 266 local_irq_enable(); 267 while (!need_resched()) 268 cpu_relax(); 269 270 t2 = ktime_get(); 271 diff = ktime_to_us(ktime_sub(t2, t1)); 272 if (diff > INT_MAX) 273 diff = INT_MAX; 274 275 dev->last_residency = (int) diff; 276 277 return index; 278 } 279 280 static void poll_idle_init(struct cpuidle_driver *drv) 281 { 282 struct cpuidle_state *state = &drv->states[0]; 283 284 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 285 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 286 state->exit_latency = 0; 287 state->target_residency = 0; 288 state->power_usage = -1; 289 state->flags = 0; 290 state->enter = poll_idle; 291 state->disabled = false; 292 } 293 #else 294 static void poll_idle_init(struct cpuidle_driver *drv) {} 295 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 296 297 /** 298 * cpuidle_enable_device - enables idle PM for a CPU 299 * @dev: the CPU 300 * 301 * This function must be called between cpuidle_pause_and_lock and 302 * cpuidle_resume_and_unlock when used externally. 303 */ 304 int cpuidle_enable_device(struct cpuidle_device *dev) 305 { 306 int ret, i; 307 struct cpuidle_driver *drv; 308 309 if (!dev) 310 return -EINVAL; 311 312 if (dev->enabled) 313 return 0; 314 315 drv = cpuidle_get_cpu_driver(dev); 316 317 if (!drv || !cpuidle_curr_governor) 318 return -EIO; 319 320 if (!dev->state_count) 321 dev->state_count = drv->state_count; 322 323 if (dev->registered == 0) { 324 ret = __cpuidle_register_device(dev); 325 if (ret) 326 return ret; 327 } 328 329 cpuidle_enter_ops = drv->en_core_tk_irqen ? 330 cpuidle_enter_tk : cpuidle_enter; 331 332 poll_idle_init(drv); 333 334 ret = cpuidle_add_device_sysfs(dev); 335 if (ret) 336 return ret; 337 338 if (cpuidle_curr_governor->enable && 339 (ret = cpuidle_curr_governor->enable(drv, dev))) 340 goto fail_sysfs; 341 342 for (i = 0; i < dev->state_count; i++) { 343 dev->states_usage[i].usage = 0; 344 dev->states_usage[i].time = 0; 345 } 346 dev->last_residency = 0; 347 348 smp_wmb(); 349 350 dev->enabled = 1; 351 352 enabled_devices++; 353 return 0; 354 355 fail_sysfs: 356 cpuidle_remove_device_sysfs(dev); 357 358 return ret; 359 } 360 361 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 362 363 /** 364 * cpuidle_disable_device - disables idle PM for a CPU 365 * @dev: the CPU 366 * 367 * This function must be called between cpuidle_pause_and_lock and 368 * cpuidle_resume_and_unlock when used externally. 369 */ 370 void cpuidle_disable_device(struct cpuidle_device *dev) 371 { 372 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 373 374 if (!dev || !dev->enabled) 375 return; 376 377 if (!drv || !cpuidle_curr_governor) 378 return; 379 380 dev->enabled = 0; 381 382 if (cpuidle_curr_governor->disable) 383 cpuidle_curr_governor->disable(drv, dev); 384 385 cpuidle_remove_device_sysfs(dev); 386 enabled_devices--; 387 } 388 389 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 390 391 /** 392 * __cpuidle_register_device - internal register function called before register 393 * and enable routines 394 * @dev: the cpu 395 * 396 * cpuidle_lock mutex must be held before this is called 397 */ 398 static int __cpuidle_register_device(struct cpuidle_device *dev) 399 { 400 int ret; 401 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 402 403 if (!try_module_get(drv->owner)) 404 return -EINVAL; 405 406 per_cpu(cpuidle_devices, dev->cpu) = dev; 407 list_add(&dev->device_list, &cpuidle_detected_devices); 408 ret = cpuidle_add_sysfs(dev); 409 if (ret) 410 goto err_sysfs; 411 412 ret = cpuidle_coupled_register_device(dev); 413 if (ret) 414 goto err_coupled; 415 416 dev->registered = 1; 417 return 0; 418 419 err_coupled: 420 cpuidle_remove_sysfs(dev); 421 err_sysfs: 422 list_del(&dev->device_list); 423 per_cpu(cpuidle_devices, dev->cpu) = NULL; 424 module_put(drv->owner); 425 return ret; 426 } 427 428 /** 429 * cpuidle_register_device - registers a CPU's idle PM feature 430 * @dev: the cpu 431 */ 432 int cpuidle_register_device(struct cpuidle_device *dev) 433 { 434 int ret; 435 436 if (!dev) 437 return -EINVAL; 438 439 mutex_lock(&cpuidle_lock); 440 441 if ((ret = __cpuidle_register_device(dev))) { 442 mutex_unlock(&cpuidle_lock); 443 return ret; 444 } 445 446 cpuidle_enable_device(dev); 447 cpuidle_install_idle_handler(); 448 449 mutex_unlock(&cpuidle_lock); 450 451 return 0; 452 453 } 454 455 EXPORT_SYMBOL_GPL(cpuidle_register_device); 456 457 /** 458 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 459 * @dev: the cpu 460 */ 461 void cpuidle_unregister_device(struct cpuidle_device *dev) 462 { 463 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 464 465 if (dev->registered == 0) 466 return; 467 468 cpuidle_pause_and_lock(); 469 470 cpuidle_disable_device(dev); 471 472 cpuidle_remove_sysfs(dev); 473 list_del(&dev->device_list); 474 per_cpu(cpuidle_devices, dev->cpu) = NULL; 475 476 cpuidle_coupled_unregister_device(dev); 477 478 cpuidle_resume_and_unlock(); 479 480 module_put(drv->owner); 481 } 482 483 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 484 485 #ifdef CONFIG_SMP 486 487 static void smp_callback(void *v) 488 { 489 /* we already woke the CPU up, nothing more to do */ 490 } 491 492 /* 493 * This function gets called when a part of the kernel has a new latency 494 * requirement. This means we need to get all processors out of their C-state, 495 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 496 * wakes them all right up. 497 */ 498 static int cpuidle_latency_notify(struct notifier_block *b, 499 unsigned long l, void *v) 500 { 501 smp_call_function(smp_callback, NULL, 1); 502 return NOTIFY_OK; 503 } 504 505 static struct notifier_block cpuidle_latency_notifier = { 506 .notifier_call = cpuidle_latency_notify, 507 }; 508 509 static inline void latency_notifier_init(struct notifier_block *n) 510 { 511 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 512 } 513 514 #else /* CONFIG_SMP */ 515 516 #define latency_notifier_init(x) do { } while (0) 517 518 #endif /* CONFIG_SMP */ 519 520 /** 521 * cpuidle_init - core initializer 522 */ 523 static int __init cpuidle_init(void) 524 { 525 int ret; 526 527 if (cpuidle_disabled()) 528 return -ENODEV; 529 530 ret = cpuidle_add_interface(cpu_subsys.dev_root); 531 if (ret) 532 return ret; 533 534 latency_notifier_init(&cpuidle_latency_notifier); 535 536 return 0; 537 } 538 539 module_param(off, int, 0444); 540 core_initcall(cpuidle_init); 541