1 /* 2 * drivers/base/power/wakeup.c - System wakeup events framework 3 * 4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/sched.h> 12 #include <linux/capability.h> 13 #include <linux/export.h> 14 #include <linux/suspend.h> 15 #include <linux/seq_file.h> 16 #include <linux/debugfs.h> 17 #include <trace/events/power.h> 18 19 #include "power.h" 20 21 /* 22 * If set, the suspend/hibernate code will abort transitions to a sleep state 23 * if wakeup events are registered during or immediately before the transition. 24 */ 25 bool events_check_enabled __read_mostly; 26 27 /* 28 * Combined counters of registered wakeup events and wakeup events in progress. 29 * They need to be modified together atomically, so it's better to use one 30 * atomic variable to hold them both. 31 */ 32 static atomic_t combined_event_count = ATOMIC_INIT(0); 33 34 #define IN_PROGRESS_BITS (sizeof(int) * 4) 35 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) 36 37 static void split_counters(unsigned int *cnt, unsigned int *inpr) 38 { 39 unsigned int comb = atomic_read(&combined_event_count); 40 41 *cnt = (comb >> IN_PROGRESS_BITS); 42 *inpr = comb & MAX_IN_PROGRESS; 43 } 44 45 /* A preserved old value of the events counter. */ 46 static unsigned int saved_count; 47 48 static DEFINE_SPINLOCK(events_lock); 49 50 static void pm_wakeup_timer_fn(unsigned long data); 51 52 static LIST_HEAD(wakeup_sources); 53 54 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); 55 56 /** 57 * wakeup_source_prepare - Prepare a new wakeup source for initialization. 58 * @ws: Wakeup source to prepare. 59 * @name: Pointer to the name of the new wakeup source. 60 * 61 * Callers must ensure that the @name string won't be freed when @ws is still in 62 * use. 63 */ 64 void wakeup_source_prepare(struct wakeup_source *ws, const char *name) 65 { 66 if (ws) { 67 memset(ws, 0, sizeof(*ws)); 68 ws->name = name; 69 } 70 } 71 EXPORT_SYMBOL_GPL(wakeup_source_prepare); 72 73 /** 74 * wakeup_source_create - Create a struct wakeup_source object. 75 * @name: Name of the new wakeup source. 76 */ 77 struct wakeup_source *wakeup_source_create(const char *name) 78 { 79 struct wakeup_source *ws; 80 81 ws = kmalloc(sizeof(*ws), GFP_KERNEL); 82 if (!ws) 83 return NULL; 84 85 wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); 86 return ws; 87 } 88 EXPORT_SYMBOL_GPL(wakeup_source_create); 89 90 /** 91 * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. 92 * @ws: Wakeup source to prepare for destruction. 93 * 94 * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never 95 * be run in parallel with this function for the same wakeup source object. 96 */ 97 void wakeup_source_drop(struct wakeup_source *ws) 98 { 99 if (!ws) 100 return; 101 102 del_timer_sync(&ws->timer); 103 __pm_relax(ws); 104 } 105 EXPORT_SYMBOL_GPL(wakeup_source_drop); 106 107 /** 108 * wakeup_source_destroy - Destroy a struct wakeup_source object. 109 * @ws: Wakeup source to destroy. 110 * 111 * Use only for wakeup source objects created with wakeup_source_create(). 112 */ 113 void wakeup_source_destroy(struct wakeup_source *ws) 114 { 115 if (!ws) 116 return; 117 118 wakeup_source_drop(ws); 119 kfree(ws->name); 120 kfree(ws); 121 } 122 EXPORT_SYMBOL_GPL(wakeup_source_destroy); 123 124 /** 125 * wakeup_source_add - Add given object to the list of wakeup sources. 126 * @ws: Wakeup source object to add to the list. 127 */ 128 void wakeup_source_add(struct wakeup_source *ws) 129 { 130 if (WARN_ON(!ws)) 131 return; 132 133 spin_lock_init(&ws->lock); 134 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); 135 ws->active = false; 136 ws->last_time = ktime_get(); 137 138 spin_lock_irq(&events_lock); 139 list_add_rcu(&ws->entry, &wakeup_sources); 140 spin_unlock_irq(&events_lock); 141 } 142 EXPORT_SYMBOL_GPL(wakeup_source_add); 143 144 /** 145 * wakeup_source_remove - Remove given object from the wakeup sources list. 146 * @ws: Wakeup source object to remove from the list. 147 */ 148 void wakeup_source_remove(struct wakeup_source *ws) 149 { 150 if (WARN_ON(!ws)) 151 return; 152 153 spin_lock_irq(&events_lock); 154 list_del_rcu(&ws->entry); 155 spin_unlock_irq(&events_lock); 156 synchronize_rcu(); 157 } 158 EXPORT_SYMBOL_GPL(wakeup_source_remove); 159 160 /** 161 * wakeup_source_register - Create wakeup source and add it to the list. 162 * @name: Name of the wakeup source to register. 163 */ 164 struct wakeup_source *wakeup_source_register(const char *name) 165 { 166 struct wakeup_source *ws; 167 168 ws = wakeup_source_create(name); 169 if (ws) 170 wakeup_source_add(ws); 171 172 return ws; 173 } 174 EXPORT_SYMBOL_GPL(wakeup_source_register); 175 176 /** 177 * wakeup_source_unregister - Remove wakeup source from the list and remove it. 178 * @ws: Wakeup source object to unregister. 179 */ 180 void wakeup_source_unregister(struct wakeup_source *ws) 181 { 182 if (ws) { 183 wakeup_source_remove(ws); 184 wakeup_source_destroy(ws); 185 } 186 } 187 EXPORT_SYMBOL_GPL(wakeup_source_unregister); 188 189 /** 190 * device_wakeup_attach - Attach a wakeup source object to a device object. 191 * @dev: Device to handle. 192 * @ws: Wakeup source object to attach to @dev. 193 * 194 * This causes @dev to be treated as a wakeup device. 195 */ 196 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) 197 { 198 spin_lock_irq(&dev->power.lock); 199 if (dev->power.wakeup) { 200 spin_unlock_irq(&dev->power.lock); 201 return -EEXIST; 202 } 203 dev->power.wakeup = ws; 204 spin_unlock_irq(&dev->power.lock); 205 return 0; 206 } 207 208 /** 209 * device_wakeup_enable - Enable given device to be a wakeup source. 210 * @dev: Device to handle. 211 * 212 * Create a wakeup source object, register it and attach it to @dev. 213 */ 214 int device_wakeup_enable(struct device *dev) 215 { 216 struct wakeup_source *ws; 217 int ret; 218 219 if (!dev || !dev->power.can_wakeup) 220 return -EINVAL; 221 222 ws = wakeup_source_register(dev_name(dev)); 223 if (!ws) 224 return -ENOMEM; 225 226 ret = device_wakeup_attach(dev, ws); 227 if (ret) 228 wakeup_source_unregister(ws); 229 230 return ret; 231 } 232 EXPORT_SYMBOL_GPL(device_wakeup_enable); 233 234 /** 235 * device_wakeup_detach - Detach a device's wakeup source object from it. 236 * @dev: Device to detach the wakeup source object from. 237 * 238 * After it returns, @dev will not be treated as a wakeup device any more. 239 */ 240 static struct wakeup_source *device_wakeup_detach(struct device *dev) 241 { 242 struct wakeup_source *ws; 243 244 spin_lock_irq(&dev->power.lock); 245 ws = dev->power.wakeup; 246 dev->power.wakeup = NULL; 247 spin_unlock_irq(&dev->power.lock); 248 return ws; 249 } 250 251 /** 252 * device_wakeup_disable - Do not regard a device as a wakeup source any more. 253 * @dev: Device to handle. 254 * 255 * Detach the @dev's wakeup source object from it, unregister this wakeup source 256 * object and destroy it. 257 */ 258 int device_wakeup_disable(struct device *dev) 259 { 260 struct wakeup_source *ws; 261 262 if (!dev || !dev->power.can_wakeup) 263 return -EINVAL; 264 265 ws = device_wakeup_detach(dev); 266 if (ws) 267 wakeup_source_unregister(ws); 268 269 return 0; 270 } 271 EXPORT_SYMBOL_GPL(device_wakeup_disable); 272 273 /** 274 * device_set_wakeup_capable - Set/reset device wakeup capability flag. 275 * @dev: Device to handle. 276 * @capable: Whether or not @dev is capable of waking up the system from sleep. 277 * 278 * If @capable is set, set the @dev's power.can_wakeup flag and add its 279 * wakeup-related attributes to sysfs. Otherwise, unset the @dev's 280 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. 281 * 282 * This function may sleep and it can't be called from any context where 283 * sleeping is not allowed. 284 */ 285 void device_set_wakeup_capable(struct device *dev, bool capable) 286 { 287 if (!!dev->power.can_wakeup == !!capable) 288 return; 289 290 if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { 291 if (capable) { 292 if (wakeup_sysfs_add(dev)) 293 return; 294 } else { 295 wakeup_sysfs_remove(dev); 296 } 297 } 298 dev->power.can_wakeup = capable; 299 } 300 EXPORT_SYMBOL_GPL(device_set_wakeup_capable); 301 302 /** 303 * device_init_wakeup - Device wakeup initialization. 304 * @dev: Device to handle. 305 * @enable: Whether or not to enable @dev as a wakeup device. 306 * 307 * By default, most devices should leave wakeup disabled. The exceptions are 308 * devices that everyone expects to be wakeup sources: keyboards, power buttons, 309 * possibly network interfaces, etc. Also, devices that don't generate their 310 * own wakeup requests but merely forward requests from one bus to another 311 * (like PCI bridges) should have wakeup enabled by default. 312 */ 313 int device_init_wakeup(struct device *dev, bool enable) 314 { 315 int ret = 0; 316 317 if (enable) { 318 device_set_wakeup_capable(dev, true); 319 ret = device_wakeup_enable(dev); 320 } else { 321 device_set_wakeup_capable(dev, false); 322 } 323 324 return ret; 325 } 326 EXPORT_SYMBOL_GPL(device_init_wakeup); 327 328 /** 329 * device_set_wakeup_enable - Enable or disable a device to wake up the system. 330 * @dev: Device to handle. 331 */ 332 int device_set_wakeup_enable(struct device *dev, bool enable) 333 { 334 if (!dev || !dev->power.can_wakeup) 335 return -EINVAL; 336 337 return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); 338 } 339 EXPORT_SYMBOL_GPL(device_set_wakeup_enable); 340 341 /* 342 * The functions below use the observation that each wakeup event starts a 343 * period in which the system should not be suspended. The moment this period 344 * will end depends on how the wakeup event is going to be processed after being 345 * detected and all of the possible cases can be divided into two distinct 346 * groups. 347 * 348 * First, a wakeup event may be detected by the same functional unit that will 349 * carry out the entire processing of it and possibly will pass it to user space 350 * for further processing. In that case the functional unit that has detected 351 * the event may later "close" the "no suspend" period associated with it 352 * directly as soon as it has been dealt with. The pair of pm_stay_awake() and 353 * pm_relax(), balanced with each other, is supposed to be used in such 354 * situations. 355 * 356 * Second, a wakeup event may be detected by one functional unit and processed 357 * by another one. In that case the unit that has detected it cannot really 358 * "close" the "no suspend" period associated with it, unless it knows in 359 * advance what's going to happen to the event during processing. This 360 * knowledge, however, may not be available to it, so it can simply specify time 361 * to wait before the system can be suspended and pass it as the second 362 * argument of pm_wakeup_event(). 363 * 364 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the 365 * "no suspend" period will be ended either by the pm_relax(), or by the timer 366 * function executed when the timer expires, whichever comes first. 367 */ 368 369 /** 370 * wakup_source_activate - Mark given wakeup source as active. 371 * @ws: Wakeup source to handle. 372 * 373 * Update the @ws' statistics and, if @ws has just been activated, notify the PM 374 * core of the event by incrementing the counter of of wakeup events being 375 * processed. 376 */ 377 static void wakeup_source_activate(struct wakeup_source *ws) 378 { 379 unsigned int cec; 380 381 ws->active = true; 382 ws->active_count++; 383 ws->last_time = ktime_get(); 384 if (ws->autosleep_enabled) 385 ws->start_prevent_time = ws->last_time; 386 387 /* Increment the counter of events in progress. */ 388 cec = atomic_inc_return(&combined_event_count); 389 390 trace_wakeup_source_activate(ws->name, cec); 391 } 392 393 /** 394 * wakeup_source_report_event - Report wakeup event using the given source. 395 * @ws: Wakeup source to report the event for. 396 */ 397 static void wakeup_source_report_event(struct wakeup_source *ws) 398 { 399 ws->event_count++; 400 /* This is racy, but the counter is approximate anyway. */ 401 if (events_check_enabled) 402 ws->wakeup_count++; 403 404 if (!ws->active) 405 wakeup_source_activate(ws); 406 } 407 408 /** 409 * __pm_stay_awake - Notify the PM core of a wakeup event. 410 * @ws: Wakeup source object associated with the source of the event. 411 * 412 * It is safe to call this function from interrupt context. 413 */ 414 void __pm_stay_awake(struct wakeup_source *ws) 415 { 416 unsigned long flags; 417 418 if (!ws) 419 return; 420 421 spin_lock_irqsave(&ws->lock, flags); 422 423 wakeup_source_report_event(ws); 424 del_timer(&ws->timer); 425 ws->timer_expires = 0; 426 427 spin_unlock_irqrestore(&ws->lock, flags); 428 } 429 EXPORT_SYMBOL_GPL(__pm_stay_awake); 430 431 /** 432 * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 433 * @dev: Device the wakeup event is related to. 434 * 435 * Notify the PM core of a wakeup event (signaled by @dev) by calling 436 * __pm_stay_awake for the @dev's wakeup source object. 437 * 438 * Call this function after detecting of a wakeup event if pm_relax() is going 439 * to be called directly after processing the event (and possibly passing it to 440 * user space for further processing). 441 */ 442 void pm_stay_awake(struct device *dev) 443 { 444 unsigned long flags; 445 446 if (!dev) 447 return; 448 449 spin_lock_irqsave(&dev->power.lock, flags); 450 __pm_stay_awake(dev->power.wakeup); 451 spin_unlock_irqrestore(&dev->power.lock, flags); 452 } 453 EXPORT_SYMBOL_GPL(pm_stay_awake); 454 455 #ifdef CONFIG_PM_AUTOSLEEP 456 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) 457 { 458 ktime_t delta = ktime_sub(now, ws->start_prevent_time); 459 ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); 460 } 461 #else 462 static inline void update_prevent_sleep_time(struct wakeup_source *ws, 463 ktime_t now) {} 464 #endif 465 466 /** 467 * wakup_source_deactivate - Mark given wakeup source as inactive. 468 * @ws: Wakeup source to handle. 469 * 470 * Update the @ws' statistics and notify the PM core that the wakeup source has 471 * become inactive by decrementing the counter of wakeup events being processed 472 * and incrementing the counter of registered wakeup events. 473 */ 474 static void wakeup_source_deactivate(struct wakeup_source *ws) 475 { 476 unsigned int cnt, inpr, cec; 477 ktime_t duration; 478 ktime_t now; 479 480 ws->relax_count++; 481 /* 482 * __pm_relax() may be called directly or from a timer function. 483 * If it is called directly right after the timer function has been 484 * started, but before the timer function calls __pm_relax(), it is 485 * possible that __pm_stay_awake() will be called in the meantime and 486 * will set ws->active. Then, ws->active may be cleared immediately 487 * by the __pm_relax() called from the timer function, but in such a 488 * case ws->relax_count will be different from ws->active_count. 489 */ 490 if (ws->relax_count != ws->active_count) { 491 ws->relax_count--; 492 return; 493 } 494 495 ws->active = false; 496 497 now = ktime_get(); 498 duration = ktime_sub(now, ws->last_time); 499 ws->total_time = ktime_add(ws->total_time, duration); 500 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) 501 ws->max_time = duration; 502 503 ws->last_time = now; 504 del_timer(&ws->timer); 505 ws->timer_expires = 0; 506 507 if (ws->autosleep_enabled) 508 update_prevent_sleep_time(ws, now); 509 510 /* 511 * Increment the counter of registered wakeup events and decrement the 512 * couter of wakeup events in progress simultaneously. 513 */ 514 cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); 515 trace_wakeup_source_deactivate(ws->name, cec); 516 517 split_counters(&cnt, &inpr); 518 if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) 519 wake_up(&wakeup_count_wait_queue); 520 } 521 522 /** 523 * __pm_relax - Notify the PM core that processing of a wakeup event has ended. 524 * @ws: Wakeup source object associated with the source of the event. 525 * 526 * Call this function for wakeup events whose processing started with calling 527 * __pm_stay_awake(). 528 * 529 * It is safe to call it from interrupt context. 530 */ 531 void __pm_relax(struct wakeup_source *ws) 532 { 533 unsigned long flags; 534 535 if (!ws) 536 return; 537 538 spin_lock_irqsave(&ws->lock, flags); 539 if (ws->active) 540 wakeup_source_deactivate(ws); 541 spin_unlock_irqrestore(&ws->lock, flags); 542 } 543 EXPORT_SYMBOL_GPL(__pm_relax); 544 545 /** 546 * pm_relax - Notify the PM core that processing of a wakeup event has ended. 547 * @dev: Device that signaled the event. 548 * 549 * Execute __pm_relax() for the @dev's wakeup source object. 550 */ 551 void pm_relax(struct device *dev) 552 { 553 unsigned long flags; 554 555 if (!dev) 556 return; 557 558 spin_lock_irqsave(&dev->power.lock, flags); 559 __pm_relax(dev->power.wakeup); 560 spin_unlock_irqrestore(&dev->power.lock, flags); 561 } 562 EXPORT_SYMBOL_GPL(pm_relax); 563 564 /** 565 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 566 * @data: Address of the wakeup source object associated with the event source. 567 * 568 * Call wakeup_source_deactivate() for the wakeup source whose address is stored 569 * in @data if it is currently active and its timer has not been canceled and 570 * the expiration time of the timer is not in future. 571 */ 572 static void pm_wakeup_timer_fn(unsigned long data) 573 { 574 struct wakeup_source *ws = (struct wakeup_source *)data; 575 unsigned long flags; 576 577 spin_lock_irqsave(&ws->lock, flags); 578 579 if (ws->active && ws->timer_expires 580 && time_after_eq(jiffies, ws->timer_expires)) { 581 wakeup_source_deactivate(ws); 582 ws->expire_count++; 583 } 584 585 spin_unlock_irqrestore(&ws->lock, flags); 586 } 587 588 /** 589 * __pm_wakeup_event - Notify the PM core of a wakeup event. 590 * @ws: Wakeup source object associated with the event source. 591 * @msec: Anticipated event processing time (in milliseconds). 592 * 593 * Notify the PM core of a wakeup event whose source is @ws that will take 594 * approximately @msec milliseconds to be processed by the kernel. If @ws is 595 * not active, activate it. If @msec is nonzero, set up the @ws' timer to 596 * execute pm_wakeup_timer_fn() in future. 597 * 598 * It is safe to call this function from interrupt context. 599 */ 600 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) 601 { 602 unsigned long flags; 603 unsigned long expires; 604 605 if (!ws) 606 return; 607 608 spin_lock_irqsave(&ws->lock, flags); 609 610 wakeup_source_report_event(ws); 611 612 if (!msec) { 613 wakeup_source_deactivate(ws); 614 goto unlock; 615 } 616 617 expires = jiffies + msecs_to_jiffies(msec); 618 if (!expires) 619 expires = 1; 620 621 if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { 622 mod_timer(&ws->timer, expires); 623 ws->timer_expires = expires; 624 } 625 626 unlock: 627 spin_unlock_irqrestore(&ws->lock, flags); 628 } 629 EXPORT_SYMBOL_GPL(__pm_wakeup_event); 630 631 632 /** 633 * pm_wakeup_event - Notify the PM core of a wakeup event. 634 * @dev: Device the wakeup event is related to. 635 * @msec: Anticipated event processing time (in milliseconds). 636 * 637 * Call __pm_wakeup_event() for the @dev's wakeup source object. 638 */ 639 void pm_wakeup_event(struct device *dev, unsigned int msec) 640 { 641 unsigned long flags; 642 643 if (!dev) 644 return; 645 646 spin_lock_irqsave(&dev->power.lock, flags); 647 __pm_wakeup_event(dev->power.wakeup, msec); 648 spin_unlock_irqrestore(&dev->power.lock, flags); 649 } 650 EXPORT_SYMBOL_GPL(pm_wakeup_event); 651 652 /** 653 * pm_wakeup_pending - Check if power transition in progress should be aborted. 654 * 655 * Compare the current number of registered wakeup events with its preserved 656 * value from the past and return true if new wakeup events have been registered 657 * since the old value was stored. Also return true if the current number of 658 * wakeup events being processed is different from zero. 659 */ 660 bool pm_wakeup_pending(void) 661 { 662 unsigned long flags; 663 bool ret = false; 664 665 spin_lock_irqsave(&events_lock, flags); 666 if (events_check_enabled) { 667 unsigned int cnt, inpr; 668 669 split_counters(&cnt, &inpr); 670 ret = (cnt != saved_count || inpr > 0); 671 events_check_enabled = !ret; 672 } 673 spin_unlock_irqrestore(&events_lock, flags); 674 return ret; 675 } 676 677 /** 678 * pm_get_wakeup_count - Read the number of registered wakeup events. 679 * @count: Address to store the value at. 680 * @block: Whether or not to block. 681 * 682 * Store the number of registered wakeup events at the address in @count. If 683 * @block is set, block until the current number of wakeup events being 684 * processed is zero. 685 * 686 * Return 'false' if the current number of wakeup events being processed is 687 * nonzero. Otherwise return 'true'. 688 */ 689 bool pm_get_wakeup_count(unsigned int *count, bool block) 690 { 691 unsigned int cnt, inpr; 692 693 if (block) { 694 DEFINE_WAIT(wait); 695 696 for (;;) { 697 prepare_to_wait(&wakeup_count_wait_queue, &wait, 698 TASK_INTERRUPTIBLE); 699 split_counters(&cnt, &inpr); 700 if (inpr == 0 || signal_pending(current)) 701 break; 702 703 schedule(); 704 } 705 finish_wait(&wakeup_count_wait_queue, &wait); 706 } 707 708 split_counters(&cnt, &inpr); 709 *count = cnt; 710 return !inpr; 711 } 712 713 /** 714 * pm_save_wakeup_count - Save the current number of registered wakeup events. 715 * @count: Value to compare with the current number of registered wakeup events. 716 * 717 * If @count is equal to the current number of registered wakeup events and the 718 * current number of wakeup events being processed is zero, store @count as the 719 * old number of registered wakeup events for pm_check_wakeup_events(), enable 720 * wakeup events detection and return 'true'. Otherwise disable wakeup events 721 * detection and return 'false'. 722 */ 723 bool pm_save_wakeup_count(unsigned int count) 724 { 725 unsigned int cnt, inpr; 726 727 events_check_enabled = false; 728 spin_lock_irq(&events_lock); 729 split_counters(&cnt, &inpr); 730 if (cnt == count && inpr == 0) { 731 saved_count = count; 732 events_check_enabled = true; 733 } 734 spin_unlock_irq(&events_lock); 735 return events_check_enabled; 736 } 737 738 #ifdef CONFIG_PM_AUTOSLEEP 739 /** 740 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. 741 * @enabled: Whether to set or to clear the autosleep_enabled flags. 742 */ 743 void pm_wakep_autosleep_enabled(bool set) 744 { 745 struct wakeup_source *ws; 746 ktime_t now = ktime_get(); 747 748 rcu_read_lock(); 749 list_for_each_entry_rcu(ws, &wakeup_sources, entry) { 750 spin_lock_irq(&ws->lock); 751 if (ws->autosleep_enabled != set) { 752 ws->autosleep_enabled = set; 753 if (ws->active) { 754 if (set) 755 ws->start_prevent_time = now; 756 else 757 update_prevent_sleep_time(ws, now); 758 } 759 } 760 spin_unlock_irq(&ws->lock); 761 } 762 rcu_read_unlock(); 763 } 764 #endif /* CONFIG_PM_AUTOSLEEP */ 765 766 static struct dentry *wakeup_sources_stats_dentry; 767 768 /** 769 * print_wakeup_source_stats - Print wakeup source statistics information. 770 * @m: seq_file to print the statistics into. 771 * @ws: Wakeup source object to print the statistics for. 772 */ 773 static int print_wakeup_source_stats(struct seq_file *m, 774 struct wakeup_source *ws) 775 { 776 unsigned long flags; 777 ktime_t total_time; 778 ktime_t max_time; 779 unsigned long active_count; 780 ktime_t active_time; 781 ktime_t prevent_sleep_time; 782 int ret; 783 784 spin_lock_irqsave(&ws->lock, flags); 785 786 total_time = ws->total_time; 787 max_time = ws->max_time; 788 prevent_sleep_time = ws->prevent_sleep_time; 789 active_count = ws->active_count; 790 if (ws->active) { 791 ktime_t now = ktime_get(); 792 793 active_time = ktime_sub(now, ws->last_time); 794 total_time = ktime_add(total_time, active_time); 795 if (active_time.tv64 > max_time.tv64) 796 max_time = active_time; 797 798 if (ws->autosleep_enabled) 799 prevent_sleep_time = ktime_add(prevent_sleep_time, 800 ktime_sub(now, ws->start_prevent_time)); 801 } else { 802 active_time = ktime_set(0, 0); 803 } 804 805 ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t" 806 "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", 807 ws->name, active_count, ws->event_count, 808 ws->wakeup_count, ws->expire_count, 809 ktime_to_ms(active_time), ktime_to_ms(total_time), 810 ktime_to_ms(max_time), ktime_to_ms(ws->last_time), 811 ktime_to_ms(prevent_sleep_time)); 812 813 spin_unlock_irqrestore(&ws->lock, flags); 814 815 return ret; 816 } 817 818 /** 819 * wakeup_sources_stats_show - Print wakeup sources statistics information. 820 * @m: seq_file to print the statistics into. 821 */ 822 static int wakeup_sources_stats_show(struct seq_file *m, void *unused) 823 { 824 struct wakeup_source *ws; 825 826 seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" 827 "expire_count\tactive_since\ttotal_time\tmax_time\t" 828 "last_change\tprevent_suspend_time\n"); 829 830 rcu_read_lock(); 831 list_for_each_entry_rcu(ws, &wakeup_sources, entry) 832 print_wakeup_source_stats(m, ws); 833 rcu_read_unlock(); 834 835 return 0; 836 } 837 838 static int wakeup_sources_stats_open(struct inode *inode, struct file *file) 839 { 840 return single_open(file, wakeup_sources_stats_show, NULL); 841 } 842 843 static const struct file_operations wakeup_sources_stats_fops = { 844 .owner = THIS_MODULE, 845 .open = wakeup_sources_stats_open, 846 .read = seq_read, 847 .llseek = seq_lseek, 848 .release = single_release, 849 }; 850 851 static int __init wakeup_sources_debugfs_init(void) 852 { 853 wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", 854 S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); 855 return 0; 856 } 857 858 postcore_initcall(wakeup_sources_debugfs_init); 859