1 /* 2 * drivers/base/power/wakeup.c - System wakeup events framework 3 * 4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/sched.h> 12 #include <linux/capability.h> 13 #include <linux/suspend.h> 14 #include <linux/seq_file.h> 15 #include <linux/debugfs.h> 16 17 #include "power.h" 18 19 #define TIMEOUT 100 20 21 /* 22 * If set, the suspend/hibernate code will abort transitions to a sleep state 23 * if wakeup events are registered during or immediately before the transition. 24 */ 25 bool events_check_enabled; 26 27 /* The counter of registered wakeup events. */ 28 static atomic_t event_count = ATOMIC_INIT(0); 29 /* A preserved old value of event_count. */ 30 static unsigned int saved_count; 31 /* The counter of wakeup events being processed. */ 32 static atomic_t events_in_progress = ATOMIC_INIT(0); 33 34 static DEFINE_SPINLOCK(events_lock); 35 36 static void pm_wakeup_timer_fn(unsigned long data); 37 38 static LIST_HEAD(wakeup_sources); 39 40 /** 41 * wakeup_source_create - Create a struct wakeup_source object. 42 * @name: Name of the new wakeup source. 43 */ 44 struct wakeup_source *wakeup_source_create(const char *name) 45 { 46 struct wakeup_source *ws; 47 48 ws = kzalloc(sizeof(*ws), GFP_KERNEL); 49 if (!ws) 50 return NULL; 51 52 spin_lock_init(&ws->lock); 53 if (name) 54 ws->name = kstrdup(name, GFP_KERNEL); 55 56 return ws; 57 } 58 EXPORT_SYMBOL_GPL(wakeup_source_create); 59 60 /** 61 * wakeup_source_destroy - Destroy a struct wakeup_source object. 62 * @ws: Wakeup source to destroy. 63 */ 64 void wakeup_source_destroy(struct wakeup_source *ws) 65 { 66 if (!ws) 67 return; 68 69 spin_lock_irq(&ws->lock); 70 while (ws->active) { 71 spin_unlock_irq(&ws->lock); 72 73 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); 74 75 spin_lock_irq(&ws->lock); 76 } 77 spin_unlock_irq(&ws->lock); 78 79 kfree(ws->name); 80 kfree(ws); 81 } 82 EXPORT_SYMBOL_GPL(wakeup_source_destroy); 83 84 /** 85 * wakeup_source_add - Add given object to the list of wakeup sources. 86 * @ws: Wakeup source object to add to the list. 87 */ 88 void wakeup_source_add(struct wakeup_source *ws) 89 { 90 if (WARN_ON(!ws)) 91 return; 92 93 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); 94 ws->active = false; 95 96 spin_lock_irq(&events_lock); 97 list_add_rcu(&ws->entry, &wakeup_sources); 98 spin_unlock_irq(&events_lock); 99 synchronize_rcu(); 100 } 101 EXPORT_SYMBOL_GPL(wakeup_source_add); 102 103 /** 104 * wakeup_source_remove - Remove given object from the wakeup sources list. 105 * @ws: Wakeup source object to remove from the list. 106 */ 107 void wakeup_source_remove(struct wakeup_source *ws) 108 { 109 if (WARN_ON(!ws)) 110 return; 111 112 spin_lock_irq(&events_lock); 113 list_del_rcu(&ws->entry); 114 spin_unlock_irq(&events_lock); 115 synchronize_rcu(); 116 } 117 EXPORT_SYMBOL_GPL(wakeup_source_remove); 118 119 /** 120 * wakeup_source_register - Create wakeup source and add it to the list. 121 * @name: Name of the wakeup source to register. 122 */ 123 struct wakeup_source *wakeup_source_register(const char *name) 124 { 125 struct wakeup_source *ws; 126 127 ws = wakeup_source_create(name); 128 if (ws) 129 wakeup_source_add(ws); 130 131 return ws; 132 } 133 EXPORT_SYMBOL_GPL(wakeup_source_register); 134 135 /** 136 * wakeup_source_unregister - Remove wakeup source from the list and remove it. 137 * @ws: Wakeup source object to unregister. 138 */ 139 void wakeup_source_unregister(struct wakeup_source *ws) 140 { 141 wakeup_source_remove(ws); 142 wakeup_source_destroy(ws); 143 } 144 EXPORT_SYMBOL_GPL(wakeup_source_unregister); 145 146 /** 147 * device_wakeup_attach - Attach a wakeup source object to a device object. 148 * @dev: Device to handle. 149 * @ws: Wakeup source object to attach to @dev. 150 * 151 * This causes @dev to be treated as a wakeup device. 152 */ 153 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) 154 { 155 spin_lock_irq(&dev->power.lock); 156 if (dev->power.wakeup) { 157 spin_unlock_irq(&dev->power.lock); 158 return -EEXIST; 159 } 160 dev->power.wakeup = ws; 161 spin_unlock_irq(&dev->power.lock); 162 return 0; 163 } 164 165 /** 166 * device_wakeup_enable - Enable given device to be a wakeup source. 167 * @dev: Device to handle. 168 * 169 * Create a wakeup source object, register it and attach it to @dev. 170 */ 171 int device_wakeup_enable(struct device *dev) 172 { 173 struct wakeup_source *ws; 174 int ret; 175 176 if (!dev || !dev->power.can_wakeup) 177 return -EINVAL; 178 179 ws = wakeup_source_register(dev_name(dev)); 180 if (!ws) 181 return -ENOMEM; 182 183 ret = device_wakeup_attach(dev, ws); 184 if (ret) 185 wakeup_source_unregister(ws); 186 187 return ret; 188 } 189 EXPORT_SYMBOL_GPL(device_wakeup_enable); 190 191 /** 192 * device_wakeup_detach - Detach a device's wakeup source object from it. 193 * @dev: Device to detach the wakeup source object from. 194 * 195 * After it returns, @dev will not be treated as a wakeup device any more. 196 */ 197 static struct wakeup_source *device_wakeup_detach(struct device *dev) 198 { 199 struct wakeup_source *ws; 200 201 spin_lock_irq(&dev->power.lock); 202 ws = dev->power.wakeup; 203 dev->power.wakeup = NULL; 204 spin_unlock_irq(&dev->power.lock); 205 return ws; 206 } 207 208 /** 209 * device_wakeup_disable - Do not regard a device as a wakeup source any more. 210 * @dev: Device to handle. 211 * 212 * Detach the @dev's wakeup source object from it, unregister this wakeup source 213 * object and destroy it. 214 */ 215 int device_wakeup_disable(struct device *dev) 216 { 217 struct wakeup_source *ws; 218 219 if (!dev || !dev->power.can_wakeup) 220 return -EINVAL; 221 222 ws = device_wakeup_detach(dev); 223 if (ws) 224 wakeup_source_unregister(ws); 225 226 return 0; 227 } 228 EXPORT_SYMBOL_GPL(device_wakeup_disable); 229 230 /** 231 * device_init_wakeup - Device wakeup initialization. 232 * @dev: Device to handle. 233 * @enable: Whether or not to enable @dev as a wakeup device. 234 * 235 * By default, most devices should leave wakeup disabled. The exceptions are 236 * devices that everyone expects to be wakeup sources: keyboards, power buttons, 237 * possibly network interfaces, etc. 238 */ 239 int device_init_wakeup(struct device *dev, bool enable) 240 { 241 int ret = 0; 242 243 if (enable) { 244 device_set_wakeup_capable(dev, true); 245 ret = device_wakeup_enable(dev); 246 } else { 247 device_set_wakeup_capable(dev, false); 248 } 249 250 return ret; 251 } 252 EXPORT_SYMBOL_GPL(device_init_wakeup); 253 254 /** 255 * device_set_wakeup_enable - Enable or disable a device to wake up the system. 256 * @dev: Device to handle. 257 */ 258 int device_set_wakeup_enable(struct device *dev, bool enable) 259 { 260 if (!dev || !dev->power.can_wakeup) 261 return -EINVAL; 262 263 return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); 264 } 265 EXPORT_SYMBOL_GPL(device_set_wakeup_enable); 266 267 /* 268 * The functions below use the observation that each wakeup event starts a 269 * period in which the system should not be suspended. The moment this period 270 * will end depends on how the wakeup event is going to be processed after being 271 * detected and all of the possible cases can be divided into two distinct 272 * groups. 273 * 274 * First, a wakeup event may be detected by the same functional unit that will 275 * carry out the entire processing of it and possibly will pass it to user space 276 * for further processing. In that case the functional unit that has detected 277 * the event may later "close" the "no suspend" period associated with it 278 * directly as soon as it has been dealt with. The pair of pm_stay_awake() and 279 * pm_relax(), balanced with each other, is supposed to be used in such 280 * situations. 281 * 282 * Second, a wakeup event may be detected by one functional unit and processed 283 * by another one. In that case the unit that has detected it cannot really 284 * "close" the "no suspend" period associated with it, unless it knows in 285 * advance what's going to happen to the event during processing. This 286 * knowledge, however, may not be available to it, so it can simply specify time 287 * to wait before the system can be suspended and pass it as the second 288 * argument of pm_wakeup_event(). 289 * 290 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the 291 * "no suspend" period will be ended either by the pm_relax(), or by the timer 292 * function executed when the timer expires, whichever comes first. 293 */ 294 295 /** 296 * wakup_source_activate - Mark given wakeup source as active. 297 * @ws: Wakeup source to handle. 298 * 299 * Update the @ws' statistics and, if @ws has just been activated, notify the PM 300 * core of the event by incrementing the counter of of wakeup events being 301 * processed. 302 */ 303 static void wakeup_source_activate(struct wakeup_source *ws) 304 { 305 ws->active = true; 306 ws->active_count++; 307 ws->timer_expires = jiffies; 308 ws->last_time = ktime_get(); 309 310 atomic_inc(&events_in_progress); 311 } 312 313 /** 314 * __pm_stay_awake - Notify the PM core of a wakeup event. 315 * @ws: Wakeup source object associated with the source of the event. 316 * 317 * It is safe to call this function from interrupt context. 318 */ 319 void __pm_stay_awake(struct wakeup_source *ws) 320 { 321 unsigned long flags; 322 323 if (!ws) 324 return; 325 326 spin_lock_irqsave(&ws->lock, flags); 327 ws->event_count++; 328 if (!ws->active) 329 wakeup_source_activate(ws); 330 spin_unlock_irqrestore(&ws->lock, flags); 331 } 332 EXPORT_SYMBOL_GPL(__pm_stay_awake); 333 334 /** 335 * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 336 * @dev: Device the wakeup event is related to. 337 * 338 * Notify the PM core of a wakeup event (signaled by @dev) by calling 339 * __pm_stay_awake for the @dev's wakeup source object. 340 * 341 * Call this function after detecting of a wakeup event if pm_relax() is going 342 * to be called directly after processing the event (and possibly passing it to 343 * user space for further processing). 344 */ 345 void pm_stay_awake(struct device *dev) 346 { 347 unsigned long flags; 348 349 if (!dev) 350 return; 351 352 spin_lock_irqsave(&dev->power.lock, flags); 353 __pm_stay_awake(dev->power.wakeup); 354 spin_unlock_irqrestore(&dev->power.lock, flags); 355 } 356 EXPORT_SYMBOL_GPL(pm_stay_awake); 357 358 /** 359 * wakup_source_deactivate - Mark given wakeup source as inactive. 360 * @ws: Wakeup source to handle. 361 * 362 * Update the @ws' statistics and notify the PM core that the wakeup source has 363 * become inactive by decrementing the counter of wakeup events being processed 364 * and incrementing the counter of registered wakeup events. 365 */ 366 static void wakeup_source_deactivate(struct wakeup_source *ws) 367 { 368 ktime_t duration; 369 ktime_t now; 370 371 ws->relax_count++; 372 /* 373 * __pm_relax() may be called directly or from a timer function. 374 * If it is called directly right after the timer function has been 375 * started, but before the timer function calls __pm_relax(), it is 376 * possible that __pm_stay_awake() will be called in the meantime and 377 * will set ws->active. Then, ws->active may be cleared immediately 378 * by the __pm_relax() called from the timer function, but in such a 379 * case ws->relax_count will be different from ws->active_count. 380 */ 381 if (ws->relax_count != ws->active_count) { 382 ws->relax_count--; 383 return; 384 } 385 386 ws->active = false; 387 388 now = ktime_get(); 389 duration = ktime_sub(now, ws->last_time); 390 ws->total_time = ktime_add(ws->total_time, duration); 391 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) 392 ws->max_time = duration; 393 394 del_timer(&ws->timer); 395 396 /* 397 * event_count has to be incremented before events_in_progress is 398 * modified, so that the callers of pm_check_wakeup_events() and 399 * pm_save_wakeup_count() don't see the old value of event_count and 400 * events_in_progress equal to zero at the same time. 401 */ 402 atomic_inc(&event_count); 403 smp_mb__before_atomic_dec(); 404 atomic_dec(&events_in_progress); 405 } 406 407 /** 408 * __pm_relax - Notify the PM core that processing of a wakeup event has ended. 409 * @ws: Wakeup source object associated with the source of the event. 410 * 411 * Call this function for wakeup events whose processing started with calling 412 * __pm_stay_awake(). 413 * 414 * It is safe to call it from interrupt context. 415 */ 416 void __pm_relax(struct wakeup_source *ws) 417 { 418 unsigned long flags; 419 420 if (!ws) 421 return; 422 423 spin_lock_irqsave(&ws->lock, flags); 424 if (ws->active) 425 wakeup_source_deactivate(ws); 426 spin_unlock_irqrestore(&ws->lock, flags); 427 } 428 EXPORT_SYMBOL_GPL(__pm_relax); 429 430 /** 431 * pm_relax - Notify the PM core that processing of a wakeup event has ended. 432 * @dev: Device that signaled the event. 433 * 434 * Execute __pm_relax() for the @dev's wakeup source object. 435 */ 436 void pm_relax(struct device *dev) 437 { 438 unsigned long flags; 439 440 if (!dev) 441 return; 442 443 spin_lock_irqsave(&dev->power.lock, flags); 444 __pm_relax(dev->power.wakeup); 445 spin_unlock_irqrestore(&dev->power.lock, flags); 446 } 447 EXPORT_SYMBOL_GPL(pm_relax); 448 449 /** 450 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 451 * @data: Address of the wakeup source object associated with the event source. 452 * 453 * Call __pm_relax() for the wakeup source whose address is stored in @data. 454 */ 455 static void pm_wakeup_timer_fn(unsigned long data) 456 { 457 __pm_relax((struct wakeup_source *)data); 458 } 459 460 /** 461 * __pm_wakeup_event - Notify the PM core of a wakeup event. 462 * @ws: Wakeup source object associated with the event source. 463 * @msec: Anticipated event processing time (in milliseconds). 464 * 465 * Notify the PM core of a wakeup event whose source is @ws that will take 466 * approximately @msec milliseconds to be processed by the kernel. If @ws is 467 * not active, activate it. If @msec is nonzero, set up the @ws' timer to 468 * execute pm_wakeup_timer_fn() in future. 469 * 470 * It is safe to call this function from interrupt context. 471 */ 472 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) 473 { 474 unsigned long flags; 475 unsigned long expires; 476 477 if (!ws) 478 return; 479 480 spin_lock_irqsave(&ws->lock, flags); 481 482 ws->event_count++; 483 if (!ws->active) 484 wakeup_source_activate(ws); 485 486 if (!msec) { 487 wakeup_source_deactivate(ws); 488 goto unlock; 489 } 490 491 expires = jiffies + msecs_to_jiffies(msec); 492 if (!expires) 493 expires = 1; 494 495 if (time_after(expires, ws->timer_expires)) { 496 mod_timer(&ws->timer, expires); 497 ws->timer_expires = expires; 498 } 499 500 unlock: 501 spin_unlock_irqrestore(&ws->lock, flags); 502 } 503 EXPORT_SYMBOL_GPL(__pm_wakeup_event); 504 505 506 /** 507 * pm_wakeup_event - Notify the PM core of a wakeup event. 508 * @dev: Device the wakeup event is related to. 509 * @msec: Anticipated event processing time (in milliseconds). 510 * 511 * Call __pm_wakeup_event() for the @dev's wakeup source object. 512 */ 513 void pm_wakeup_event(struct device *dev, unsigned int msec) 514 { 515 unsigned long flags; 516 517 if (!dev) 518 return; 519 520 spin_lock_irqsave(&dev->power.lock, flags); 521 __pm_wakeup_event(dev->power.wakeup, msec); 522 spin_unlock_irqrestore(&dev->power.lock, flags); 523 } 524 EXPORT_SYMBOL_GPL(pm_wakeup_event); 525 526 /** 527 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. 528 */ 529 static void pm_wakeup_update_hit_counts(void) 530 { 531 unsigned long flags; 532 struct wakeup_source *ws; 533 534 rcu_read_lock(); 535 list_for_each_entry_rcu(ws, &wakeup_sources, entry) { 536 spin_lock_irqsave(&ws->lock, flags); 537 if (ws->active) 538 ws->hit_count++; 539 spin_unlock_irqrestore(&ws->lock, flags); 540 } 541 rcu_read_unlock(); 542 } 543 544 /** 545 * pm_check_wakeup_events - Check for new wakeup events. 546 * 547 * Compare the current number of registered wakeup events with its preserved 548 * value from the past to check if new wakeup events have been registered since 549 * the old value was stored. Check if the current number of wakeup events being 550 * processed is zero. 551 */ 552 bool pm_check_wakeup_events(void) 553 { 554 unsigned long flags; 555 bool ret = true; 556 557 spin_lock_irqsave(&events_lock, flags); 558 if (events_check_enabled) { 559 ret = ((unsigned int)atomic_read(&event_count) == saved_count) 560 && !atomic_read(&events_in_progress); 561 events_check_enabled = ret; 562 } 563 spin_unlock_irqrestore(&events_lock, flags); 564 if (!ret) 565 pm_wakeup_update_hit_counts(); 566 return ret; 567 } 568 569 /** 570 * pm_get_wakeup_count - Read the number of registered wakeup events. 571 * @count: Address to store the value at. 572 * 573 * Store the number of registered wakeup events at the address in @count. Block 574 * if the current number of wakeup events being processed is nonzero. 575 * 576 * Return false if the wait for the number of wakeup events being processed to 577 * drop down to zero has been interrupted by a signal (and the current number 578 * of wakeup events being processed is still nonzero). Otherwise return true. 579 */ 580 bool pm_get_wakeup_count(unsigned int *count) 581 { 582 bool ret; 583 584 if (capable(CAP_SYS_ADMIN)) 585 events_check_enabled = false; 586 587 while (atomic_read(&events_in_progress) && !signal_pending(current)) { 588 pm_wakeup_update_hit_counts(); 589 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); 590 } 591 592 ret = !atomic_read(&events_in_progress); 593 *count = atomic_read(&event_count); 594 return ret; 595 } 596 597 /** 598 * pm_save_wakeup_count - Save the current number of registered wakeup events. 599 * @count: Value to compare with the current number of registered wakeup events. 600 * 601 * If @count is equal to the current number of registered wakeup events and the 602 * current number of wakeup events being processed is zero, store @count as the 603 * old number of registered wakeup events to be used by pm_check_wakeup_events() 604 * and return true. Otherwise return false. 605 */ 606 bool pm_save_wakeup_count(unsigned int count) 607 { 608 bool ret = false; 609 610 spin_lock_irq(&events_lock); 611 if (count == (unsigned int)atomic_read(&event_count) 612 && !atomic_read(&events_in_progress)) { 613 saved_count = count; 614 events_check_enabled = true; 615 ret = true; 616 } 617 spin_unlock_irq(&events_lock); 618 if (!ret) 619 pm_wakeup_update_hit_counts(); 620 return ret; 621 } 622 623 static struct dentry *wakeup_sources_stats_dentry; 624 625 /** 626 * print_wakeup_source_stats - Print wakeup source statistics information. 627 * @m: seq_file to print the statistics into. 628 * @ws: Wakeup source object to print the statistics for. 629 */ 630 static int print_wakeup_source_stats(struct seq_file *m, 631 struct wakeup_source *ws) 632 { 633 unsigned long flags; 634 ktime_t total_time; 635 ktime_t max_time; 636 unsigned long active_count; 637 ktime_t active_time; 638 int ret; 639 640 spin_lock_irqsave(&ws->lock, flags); 641 642 total_time = ws->total_time; 643 max_time = ws->max_time; 644 active_count = ws->active_count; 645 if (ws->active) { 646 active_time = ktime_sub(ktime_get(), ws->last_time); 647 total_time = ktime_add(total_time, active_time); 648 if (active_time.tv64 > max_time.tv64) 649 max_time = active_time; 650 } else { 651 active_time = ktime_set(0, 0); 652 } 653 654 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" 655 "%lld\t\t%lld\t\t%lld\t\t%lld\n", 656 ws->name, active_count, ws->event_count, ws->hit_count, 657 ktime_to_ms(active_time), ktime_to_ms(total_time), 658 ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); 659 660 spin_unlock_irqrestore(&ws->lock, flags); 661 662 return ret; 663 } 664 665 /** 666 * wakeup_sources_stats_show - Print wakeup sources statistics information. 667 * @m: seq_file to print the statistics into. 668 */ 669 static int wakeup_sources_stats_show(struct seq_file *m, void *unused) 670 { 671 struct wakeup_source *ws; 672 673 seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" 674 "active_since\ttotal_time\tmax_time\tlast_change\n"); 675 676 rcu_read_lock(); 677 list_for_each_entry_rcu(ws, &wakeup_sources, entry) 678 print_wakeup_source_stats(m, ws); 679 rcu_read_unlock(); 680 681 return 0; 682 } 683 684 static int wakeup_sources_stats_open(struct inode *inode, struct file *file) 685 { 686 return single_open(file, wakeup_sources_stats_show, NULL); 687 } 688 689 static const struct file_operations wakeup_sources_stats_fops = { 690 .owner = THIS_MODULE, 691 .open = wakeup_sources_stats_open, 692 .read = seq_read, 693 .llseek = seq_lseek, 694 .release = single_release, 695 }; 696 697 static int __init wakeup_sources_debugfs_init(void) 698 { 699 wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", 700 S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); 701 return 0; 702 } 703 704 postcore_initcall(wakeup_sources_debugfs_init); 705