1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/wakeup.c - System wakeup events framework 4 * 5 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/sched/signal.h> 12 #include <linux/capability.h> 13 #include <linux/export.h> 14 #include <linux/suspend.h> 15 #include <linux/seq_file.h> 16 #include <linux/debugfs.h> 17 #include <linux/pm_wakeirq.h> 18 #include <trace/events/power.h> 19 20 #include "power.h" 21 22 #ifndef CONFIG_SUSPEND 23 suspend_state_t pm_suspend_target_state; 24 #define pm_suspend_target_state (PM_SUSPEND_ON) 25 #endif 26 27 #define list_for_each_entry_rcu_locked(pos, head, member) \ 28 list_for_each_entry_rcu(pos, head, member, \ 29 srcu_read_lock_held(&wakeup_srcu)) 30 /* 31 * If set, the suspend/hibernate code will abort transitions to a sleep state 32 * if wakeup events are registered during or immediately before the transition. 33 */ 34 bool events_check_enabled __read_mostly; 35 36 /* First wakeup IRQ seen by the kernel in the last cycle. */ 37 static unsigned int wakeup_irq[2] __read_mostly; 38 static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); 39 40 /* If greater than 0 and the system is suspending, terminate the suspend. */ 41 static atomic_t pm_abort_suspend __read_mostly; 42 43 /* 44 * Combined counters of registered wakeup events and wakeup events in progress. 45 * They need to be modified together atomically, so it's better to use one 46 * atomic variable to hold them both. 47 */ 48 static atomic_t combined_event_count = ATOMIC_INIT(0); 49 50 #define IN_PROGRESS_BITS (sizeof(int) * 4) 51 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) 52 53 static void split_counters(unsigned int *cnt, unsigned int *inpr) 54 { 55 unsigned int comb = atomic_read(&combined_event_count); 56 57 *cnt = (comb >> IN_PROGRESS_BITS); 58 *inpr = comb & MAX_IN_PROGRESS; 59 } 60 61 /* A preserved old value of the events counter. */ 62 static unsigned int saved_count; 63 64 static DEFINE_RAW_SPINLOCK(events_lock); 65 66 static void pm_wakeup_timer_fn(struct timer_list *t); 67 68 static LIST_HEAD(wakeup_sources); 69 70 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); 71 72 DEFINE_STATIC_SRCU(wakeup_srcu); 73 74 static struct wakeup_source deleted_ws = { 75 .name = "deleted", 76 .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), 77 }; 78 79 static DEFINE_IDA(wakeup_ida); 80 81 /** 82 * wakeup_source_create - Create a struct wakeup_source object. 83 * @name: Name of the new wakeup source. 84 */ 85 struct wakeup_source *wakeup_source_create(const char *name) 86 { 87 struct wakeup_source *ws; 88 const char *ws_name; 89 int id; 90 91 ws = kzalloc(sizeof(*ws), GFP_KERNEL); 92 if (!ws) 93 goto err_ws; 94 95 ws_name = kstrdup_const(name, GFP_KERNEL); 96 if (!ws_name) 97 goto err_name; 98 ws->name = ws_name; 99 100 id = ida_alloc(&wakeup_ida, GFP_KERNEL); 101 if (id < 0) 102 goto err_id; 103 ws->id = id; 104 105 return ws; 106 107 err_id: 108 kfree_const(ws->name); 109 err_name: 110 kfree(ws); 111 err_ws: 112 return NULL; 113 } 114 EXPORT_SYMBOL_GPL(wakeup_source_create); 115 116 /* 117 * Record wakeup_source statistics being deleted into a dummy wakeup_source. 118 */ 119 static void wakeup_source_record(struct wakeup_source *ws) 120 { 121 unsigned long flags; 122 123 spin_lock_irqsave(&deleted_ws.lock, flags); 124 125 if (ws->event_count) { 126 deleted_ws.total_time = 127 ktime_add(deleted_ws.total_time, ws->total_time); 128 deleted_ws.prevent_sleep_time = 129 ktime_add(deleted_ws.prevent_sleep_time, 130 ws->prevent_sleep_time); 131 deleted_ws.max_time = 132 ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? 133 deleted_ws.max_time : ws->max_time; 134 deleted_ws.event_count += ws->event_count; 135 deleted_ws.active_count += ws->active_count; 136 deleted_ws.relax_count += ws->relax_count; 137 deleted_ws.expire_count += ws->expire_count; 138 deleted_ws.wakeup_count += ws->wakeup_count; 139 } 140 141 spin_unlock_irqrestore(&deleted_ws.lock, flags); 142 } 143 144 static void wakeup_source_free(struct wakeup_source *ws) 145 { 146 ida_free(&wakeup_ida, ws->id); 147 kfree_const(ws->name); 148 kfree(ws); 149 } 150 151 /** 152 * wakeup_source_destroy - Destroy a struct wakeup_source object. 153 * @ws: Wakeup source to destroy. 154 * 155 * Use only for wakeup source objects created with wakeup_source_create(). 156 */ 157 void wakeup_source_destroy(struct wakeup_source *ws) 158 { 159 if (!ws) 160 return; 161 162 __pm_relax(ws); 163 wakeup_source_record(ws); 164 wakeup_source_free(ws); 165 } 166 EXPORT_SYMBOL_GPL(wakeup_source_destroy); 167 168 /** 169 * wakeup_source_add - Add given object to the list of wakeup sources. 170 * @ws: Wakeup source object to add to the list. 171 */ 172 void wakeup_source_add(struct wakeup_source *ws) 173 { 174 unsigned long flags; 175 176 if (WARN_ON(!ws)) 177 return; 178 179 spin_lock_init(&ws->lock); 180 timer_setup(&ws->timer, pm_wakeup_timer_fn, 0); 181 ws->active = false; 182 183 raw_spin_lock_irqsave(&events_lock, flags); 184 list_add_rcu(&ws->entry, &wakeup_sources); 185 raw_spin_unlock_irqrestore(&events_lock, flags); 186 } 187 EXPORT_SYMBOL_GPL(wakeup_source_add); 188 189 /** 190 * wakeup_source_remove - Remove given object from the wakeup sources list. 191 * @ws: Wakeup source object to remove from the list. 192 */ 193 void wakeup_source_remove(struct wakeup_source *ws) 194 { 195 unsigned long flags; 196 197 if (WARN_ON(!ws)) 198 return; 199 200 raw_spin_lock_irqsave(&events_lock, flags); 201 list_del_rcu(&ws->entry); 202 raw_spin_unlock_irqrestore(&events_lock, flags); 203 synchronize_srcu(&wakeup_srcu); 204 205 del_timer_sync(&ws->timer); 206 /* 207 * Clear timer.function to make wakeup_source_not_registered() treat 208 * this wakeup source as not registered. 209 */ 210 ws->timer.function = NULL; 211 } 212 EXPORT_SYMBOL_GPL(wakeup_source_remove); 213 214 /** 215 * wakeup_source_register - Create wakeup source and add it to the list. 216 * @dev: Device this wakeup source is associated with (or NULL if virtual). 217 * @name: Name of the wakeup source to register. 218 */ 219 struct wakeup_source *wakeup_source_register(struct device *dev, 220 const char *name) 221 { 222 struct wakeup_source *ws; 223 int ret; 224 225 ws = wakeup_source_create(name); 226 if (ws) { 227 if (!dev || device_is_registered(dev)) { 228 ret = wakeup_source_sysfs_add(dev, ws); 229 if (ret) { 230 wakeup_source_free(ws); 231 return NULL; 232 } 233 } 234 wakeup_source_add(ws); 235 } 236 return ws; 237 } 238 EXPORT_SYMBOL_GPL(wakeup_source_register); 239 240 /** 241 * wakeup_source_unregister - Remove wakeup source from the list and remove it. 242 * @ws: Wakeup source object to unregister. 243 */ 244 void wakeup_source_unregister(struct wakeup_source *ws) 245 { 246 if (ws) { 247 wakeup_source_remove(ws); 248 if (ws->dev) 249 wakeup_source_sysfs_remove(ws); 250 251 wakeup_source_destroy(ws); 252 } 253 } 254 EXPORT_SYMBOL_GPL(wakeup_source_unregister); 255 256 /** 257 * wakeup_sources_read_lock - Lock wakeup source list for read. 258 * 259 * Returns an index of srcu lock for struct wakeup_srcu. 260 * This index must be passed to the matching wakeup_sources_read_unlock(). 261 */ 262 int wakeup_sources_read_lock(void) 263 { 264 return srcu_read_lock(&wakeup_srcu); 265 } 266 EXPORT_SYMBOL_GPL(wakeup_sources_read_lock); 267 268 /** 269 * wakeup_sources_read_unlock - Unlock wakeup source list. 270 * @idx: return value from corresponding wakeup_sources_read_lock() 271 */ 272 void wakeup_sources_read_unlock(int idx) 273 { 274 srcu_read_unlock(&wakeup_srcu, idx); 275 } 276 EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock); 277 278 /** 279 * wakeup_sources_walk_start - Begin a walk on wakeup source list 280 * 281 * Returns first object of the list of wakeup sources. 282 * 283 * Note that to be safe, wakeup sources list needs to be locked by calling 284 * wakeup_source_read_lock() for this. 285 */ 286 struct wakeup_source *wakeup_sources_walk_start(void) 287 { 288 struct list_head *ws_head = &wakeup_sources; 289 290 return list_entry_rcu(ws_head->next, struct wakeup_source, entry); 291 } 292 EXPORT_SYMBOL_GPL(wakeup_sources_walk_start); 293 294 /** 295 * wakeup_sources_walk_next - Get next wakeup source from the list 296 * @ws: Previous wakeup source object 297 * 298 * Note that to be safe, wakeup sources list needs to be locked by calling 299 * wakeup_source_read_lock() for this. 300 */ 301 struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws) 302 { 303 struct list_head *ws_head = &wakeup_sources; 304 305 return list_next_or_null_rcu(ws_head, &ws->entry, 306 struct wakeup_source, entry); 307 } 308 EXPORT_SYMBOL_GPL(wakeup_sources_walk_next); 309 310 /** 311 * device_wakeup_attach - Attach a wakeup source object to a device object. 312 * @dev: Device to handle. 313 * @ws: Wakeup source object to attach to @dev. 314 * 315 * This causes @dev to be treated as a wakeup device. 316 */ 317 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) 318 { 319 spin_lock_irq(&dev->power.lock); 320 if (dev->power.wakeup) { 321 spin_unlock_irq(&dev->power.lock); 322 return -EEXIST; 323 } 324 dev->power.wakeup = ws; 325 if (dev->power.wakeirq) 326 device_wakeup_attach_irq(dev, dev->power.wakeirq); 327 spin_unlock_irq(&dev->power.lock); 328 return 0; 329 } 330 331 /** 332 * device_wakeup_enable - Enable given device to be a wakeup source. 333 * @dev: Device to handle. 334 * 335 * Create a wakeup source object, register it and attach it to @dev. 336 */ 337 int device_wakeup_enable(struct device *dev) 338 { 339 struct wakeup_source *ws; 340 int ret; 341 342 if (!dev || !dev->power.can_wakeup) 343 return -EINVAL; 344 345 if (pm_suspend_target_state != PM_SUSPEND_ON) 346 dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); 347 348 ws = wakeup_source_register(dev, dev_name(dev)); 349 if (!ws) 350 return -ENOMEM; 351 352 ret = device_wakeup_attach(dev, ws); 353 if (ret) 354 wakeup_source_unregister(ws); 355 356 return ret; 357 } 358 EXPORT_SYMBOL_GPL(device_wakeup_enable); 359 360 /** 361 * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source 362 * @dev: Device to handle 363 * @wakeirq: Device specific wakeirq entry 364 * 365 * Attach a device wakeirq to the wakeup source so the device 366 * wake IRQ can be configured automatically for suspend and 367 * resume. 368 * 369 * Call under the device's power.lock lock. 370 */ 371 void device_wakeup_attach_irq(struct device *dev, 372 struct wake_irq *wakeirq) 373 { 374 struct wakeup_source *ws; 375 376 ws = dev->power.wakeup; 377 if (!ws) 378 return; 379 380 if (ws->wakeirq) 381 dev_err(dev, "Leftover wakeup IRQ found, overriding\n"); 382 383 ws->wakeirq = wakeirq; 384 } 385 386 /** 387 * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source 388 * @dev: Device to handle 389 * 390 * Removes a device wakeirq from the wakeup source. 391 * 392 * Call under the device's power.lock lock. 393 */ 394 void device_wakeup_detach_irq(struct device *dev) 395 { 396 struct wakeup_source *ws; 397 398 ws = dev->power.wakeup; 399 if (ws) 400 ws->wakeirq = NULL; 401 } 402 403 /** 404 * device_wakeup_arm_wake_irqs - 405 * 406 * Iterates over the list of device wakeirqs to arm them. 407 */ 408 void device_wakeup_arm_wake_irqs(void) 409 { 410 struct wakeup_source *ws; 411 int srcuidx; 412 413 srcuidx = srcu_read_lock(&wakeup_srcu); 414 list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) 415 dev_pm_arm_wake_irq(ws->wakeirq); 416 srcu_read_unlock(&wakeup_srcu, srcuidx); 417 } 418 419 /** 420 * device_wakeup_disarm_wake_irqs - 421 * 422 * Iterates over the list of device wakeirqs to disarm them. 423 */ 424 void device_wakeup_disarm_wake_irqs(void) 425 { 426 struct wakeup_source *ws; 427 int srcuidx; 428 429 srcuidx = srcu_read_lock(&wakeup_srcu); 430 list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) 431 dev_pm_disarm_wake_irq(ws->wakeirq); 432 srcu_read_unlock(&wakeup_srcu, srcuidx); 433 } 434 435 /** 436 * device_wakeup_detach - Detach a device's wakeup source object from it. 437 * @dev: Device to detach the wakeup source object from. 438 * 439 * After it returns, @dev will not be treated as a wakeup device any more. 440 */ 441 static struct wakeup_source *device_wakeup_detach(struct device *dev) 442 { 443 struct wakeup_source *ws; 444 445 spin_lock_irq(&dev->power.lock); 446 ws = dev->power.wakeup; 447 dev->power.wakeup = NULL; 448 spin_unlock_irq(&dev->power.lock); 449 return ws; 450 } 451 452 /** 453 * device_wakeup_disable - Do not regard a device as a wakeup source any more. 454 * @dev: Device to handle. 455 * 456 * Detach the @dev's wakeup source object from it, unregister this wakeup source 457 * object and destroy it. 458 */ 459 int device_wakeup_disable(struct device *dev) 460 { 461 struct wakeup_source *ws; 462 463 if (!dev || !dev->power.can_wakeup) 464 return -EINVAL; 465 466 ws = device_wakeup_detach(dev); 467 wakeup_source_unregister(ws); 468 return 0; 469 } 470 EXPORT_SYMBOL_GPL(device_wakeup_disable); 471 472 /** 473 * device_set_wakeup_capable - Set/reset device wakeup capability flag. 474 * @dev: Device to handle. 475 * @capable: Whether or not @dev is capable of waking up the system from sleep. 476 * 477 * If @capable is set, set the @dev's power.can_wakeup flag and add its 478 * wakeup-related attributes to sysfs. Otherwise, unset the @dev's 479 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. 480 * 481 * This function may sleep and it can't be called from any context where 482 * sleeping is not allowed. 483 */ 484 void device_set_wakeup_capable(struct device *dev, bool capable) 485 { 486 if (!!dev->power.can_wakeup == !!capable) 487 return; 488 489 dev->power.can_wakeup = capable; 490 if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { 491 if (capable) { 492 int ret = wakeup_sysfs_add(dev); 493 494 if (ret) 495 dev_info(dev, "Wakeup sysfs attributes not added\n"); 496 } else { 497 wakeup_sysfs_remove(dev); 498 } 499 } 500 } 501 EXPORT_SYMBOL_GPL(device_set_wakeup_capable); 502 503 /** 504 * device_init_wakeup - Device wakeup initialization. 505 * @dev: Device to handle. 506 * @enable: Whether or not to enable @dev as a wakeup device. 507 * 508 * By default, most devices should leave wakeup disabled. The exceptions are 509 * devices that everyone expects to be wakeup sources: keyboards, power buttons, 510 * possibly network interfaces, etc. Also, devices that don't generate their 511 * own wakeup requests but merely forward requests from one bus to another 512 * (like PCI bridges) should have wakeup enabled by default. 513 */ 514 int device_init_wakeup(struct device *dev, bool enable) 515 { 516 int ret = 0; 517 518 if (!dev) 519 return -EINVAL; 520 521 if (enable) { 522 device_set_wakeup_capable(dev, true); 523 ret = device_wakeup_enable(dev); 524 } else { 525 device_wakeup_disable(dev); 526 device_set_wakeup_capable(dev, false); 527 } 528 529 return ret; 530 } 531 EXPORT_SYMBOL_GPL(device_init_wakeup); 532 533 /** 534 * device_set_wakeup_enable - Enable or disable a device to wake up the system. 535 * @dev: Device to handle. 536 * @enable: enable/disable flag 537 */ 538 int device_set_wakeup_enable(struct device *dev, bool enable) 539 { 540 return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); 541 } 542 EXPORT_SYMBOL_GPL(device_set_wakeup_enable); 543 544 /** 545 * wakeup_source_not_registered - validate the given wakeup source. 546 * @ws: Wakeup source to be validated. 547 */ 548 static bool wakeup_source_not_registered(struct wakeup_source *ws) 549 { 550 /* 551 * Use timer struct to check if the given source is initialized 552 * by wakeup_source_add. 553 */ 554 return ws->timer.function != pm_wakeup_timer_fn; 555 } 556 557 /* 558 * The functions below use the observation that each wakeup event starts a 559 * period in which the system should not be suspended. The moment this period 560 * will end depends on how the wakeup event is going to be processed after being 561 * detected and all of the possible cases can be divided into two distinct 562 * groups. 563 * 564 * First, a wakeup event may be detected by the same functional unit that will 565 * carry out the entire processing of it and possibly will pass it to user space 566 * for further processing. In that case the functional unit that has detected 567 * the event may later "close" the "no suspend" period associated with it 568 * directly as soon as it has been dealt with. The pair of pm_stay_awake() and 569 * pm_relax(), balanced with each other, is supposed to be used in such 570 * situations. 571 * 572 * Second, a wakeup event may be detected by one functional unit and processed 573 * by another one. In that case the unit that has detected it cannot really 574 * "close" the "no suspend" period associated with it, unless it knows in 575 * advance what's going to happen to the event during processing. This 576 * knowledge, however, may not be available to it, so it can simply specify time 577 * to wait before the system can be suspended and pass it as the second 578 * argument of pm_wakeup_event(). 579 * 580 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the 581 * "no suspend" period will be ended either by the pm_relax(), or by the timer 582 * function executed when the timer expires, whichever comes first. 583 */ 584 585 /** 586 * wakeup_source_activate - Mark given wakeup source as active. 587 * @ws: Wakeup source to handle. 588 * 589 * Update the @ws' statistics and, if @ws has just been activated, notify the PM 590 * core of the event by incrementing the counter of the wakeup events being 591 * processed. 592 */ 593 static void wakeup_source_activate(struct wakeup_source *ws) 594 { 595 unsigned int cec; 596 597 if (WARN_ONCE(wakeup_source_not_registered(ws), 598 "unregistered wakeup source\n")) 599 return; 600 601 ws->active = true; 602 ws->active_count++; 603 ws->last_time = ktime_get(); 604 if (ws->autosleep_enabled) 605 ws->start_prevent_time = ws->last_time; 606 607 /* Increment the counter of events in progress. */ 608 cec = atomic_inc_return(&combined_event_count); 609 610 trace_wakeup_source_activate(ws->name, cec); 611 } 612 613 /** 614 * wakeup_source_report_event - Report wakeup event using the given source. 615 * @ws: Wakeup source to report the event for. 616 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. 617 */ 618 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) 619 { 620 ws->event_count++; 621 /* This is racy, but the counter is approximate anyway. */ 622 if (events_check_enabled) 623 ws->wakeup_count++; 624 625 if (!ws->active) 626 wakeup_source_activate(ws); 627 628 if (hard) 629 pm_system_wakeup(); 630 } 631 632 /** 633 * __pm_stay_awake - Notify the PM core of a wakeup event. 634 * @ws: Wakeup source object associated with the source of the event. 635 * 636 * It is safe to call this function from interrupt context. 637 */ 638 void __pm_stay_awake(struct wakeup_source *ws) 639 { 640 unsigned long flags; 641 642 if (!ws) 643 return; 644 645 spin_lock_irqsave(&ws->lock, flags); 646 647 wakeup_source_report_event(ws, false); 648 del_timer(&ws->timer); 649 ws->timer_expires = 0; 650 651 spin_unlock_irqrestore(&ws->lock, flags); 652 } 653 EXPORT_SYMBOL_GPL(__pm_stay_awake); 654 655 /** 656 * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 657 * @dev: Device the wakeup event is related to. 658 * 659 * Notify the PM core of a wakeup event (signaled by @dev) by calling 660 * __pm_stay_awake for the @dev's wakeup source object. 661 * 662 * Call this function after detecting of a wakeup event if pm_relax() is going 663 * to be called directly after processing the event (and possibly passing it to 664 * user space for further processing). 665 */ 666 void pm_stay_awake(struct device *dev) 667 { 668 unsigned long flags; 669 670 if (!dev) 671 return; 672 673 spin_lock_irqsave(&dev->power.lock, flags); 674 __pm_stay_awake(dev->power.wakeup); 675 spin_unlock_irqrestore(&dev->power.lock, flags); 676 } 677 EXPORT_SYMBOL_GPL(pm_stay_awake); 678 679 #ifdef CONFIG_PM_AUTOSLEEP 680 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) 681 { 682 ktime_t delta = ktime_sub(now, ws->start_prevent_time); 683 ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); 684 } 685 #else 686 static inline void update_prevent_sleep_time(struct wakeup_source *ws, 687 ktime_t now) {} 688 #endif 689 690 /** 691 * wakeup_source_deactivate - Mark given wakeup source as inactive. 692 * @ws: Wakeup source to handle. 693 * 694 * Update the @ws' statistics and notify the PM core that the wakeup source has 695 * become inactive by decrementing the counter of wakeup events being processed 696 * and incrementing the counter of registered wakeup events. 697 */ 698 static void wakeup_source_deactivate(struct wakeup_source *ws) 699 { 700 unsigned int cnt, inpr, cec; 701 ktime_t duration; 702 ktime_t now; 703 704 ws->relax_count++; 705 /* 706 * __pm_relax() may be called directly or from a timer function. 707 * If it is called directly right after the timer function has been 708 * started, but before the timer function calls __pm_relax(), it is 709 * possible that __pm_stay_awake() will be called in the meantime and 710 * will set ws->active. Then, ws->active may be cleared immediately 711 * by the __pm_relax() called from the timer function, but in such a 712 * case ws->relax_count will be different from ws->active_count. 713 */ 714 if (ws->relax_count != ws->active_count) { 715 ws->relax_count--; 716 return; 717 } 718 719 ws->active = false; 720 721 now = ktime_get(); 722 duration = ktime_sub(now, ws->last_time); 723 ws->total_time = ktime_add(ws->total_time, duration); 724 if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) 725 ws->max_time = duration; 726 727 ws->last_time = now; 728 del_timer(&ws->timer); 729 ws->timer_expires = 0; 730 731 if (ws->autosleep_enabled) 732 update_prevent_sleep_time(ws, now); 733 734 /* 735 * Increment the counter of registered wakeup events and decrement the 736 * counter of wakeup events in progress simultaneously. 737 */ 738 cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); 739 trace_wakeup_source_deactivate(ws->name, cec); 740 741 split_counters(&cnt, &inpr); 742 if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) 743 wake_up(&wakeup_count_wait_queue); 744 } 745 746 /** 747 * __pm_relax - Notify the PM core that processing of a wakeup event has ended. 748 * @ws: Wakeup source object associated with the source of the event. 749 * 750 * Call this function for wakeup events whose processing started with calling 751 * __pm_stay_awake(). 752 * 753 * It is safe to call it from interrupt context. 754 */ 755 void __pm_relax(struct wakeup_source *ws) 756 { 757 unsigned long flags; 758 759 if (!ws) 760 return; 761 762 spin_lock_irqsave(&ws->lock, flags); 763 if (ws->active) 764 wakeup_source_deactivate(ws); 765 spin_unlock_irqrestore(&ws->lock, flags); 766 } 767 EXPORT_SYMBOL_GPL(__pm_relax); 768 769 /** 770 * pm_relax - Notify the PM core that processing of a wakeup event has ended. 771 * @dev: Device that signaled the event. 772 * 773 * Execute __pm_relax() for the @dev's wakeup source object. 774 */ 775 void pm_relax(struct device *dev) 776 { 777 unsigned long flags; 778 779 if (!dev) 780 return; 781 782 spin_lock_irqsave(&dev->power.lock, flags); 783 __pm_relax(dev->power.wakeup); 784 spin_unlock_irqrestore(&dev->power.lock, flags); 785 } 786 EXPORT_SYMBOL_GPL(pm_relax); 787 788 /** 789 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 790 * @t: timer list 791 * 792 * Call wakeup_source_deactivate() for the wakeup source whose address is stored 793 * in @data if it is currently active and its timer has not been canceled and 794 * the expiration time of the timer is not in future. 795 */ 796 static void pm_wakeup_timer_fn(struct timer_list *t) 797 { 798 struct wakeup_source *ws = from_timer(ws, t, timer); 799 unsigned long flags; 800 801 spin_lock_irqsave(&ws->lock, flags); 802 803 if (ws->active && ws->timer_expires 804 && time_after_eq(jiffies, ws->timer_expires)) { 805 wakeup_source_deactivate(ws); 806 ws->expire_count++; 807 } 808 809 spin_unlock_irqrestore(&ws->lock, flags); 810 } 811 812 /** 813 * pm_wakeup_ws_event - Notify the PM core of a wakeup event. 814 * @ws: Wakeup source object associated with the event source. 815 * @msec: Anticipated event processing time (in milliseconds). 816 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. 817 * 818 * Notify the PM core of a wakeup event whose source is @ws that will take 819 * approximately @msec milliseconds to be processed by the kernel. If @ws is 820 * not active, activate it. If @msec is nonzero, set up the @ws' timer to 821 * execute pm_wakeup_timer_fn() in future. 822 * 823 * It is safe to call this function from interrupt context. 824 */ 825 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) 826 { 827 unsigned long flags; 828 unsigned long expires; 829 830 if (!ws) 831 return; 832 833 spin_lock_irqsave(&ws->lock, flags); 834 835 wakeup_source_report_event(ws, hard); 836 837 if (!msec) { 838 wakeup_source_deactivate(ws); 839 goto unlock; 840 } 841 842 expires = jiffies + msecs_to_jiffies(msec); 843 if (!expires) 844 expires = 1; 845 846 if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { 847 mod_timer(&ws->timer, expires); 848 ws->timer_expires = expires; 849 } 850 851 unlock: 852 spin_unlock_irqrestore(&ws->lock, flags); 853 } 854 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); 855 856 /** 857 * pm_wakeup_dev_event - Notify the PM core of a wakeup event. 858 * @dev: Device the wakeup event is related to. 859 * @msec: Anticipated event processing time (in milliseconds). 860 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. 861 * 862 * Call pm_wakeup_ws_event() for the @dev's wakeup source object. 863 */ 864 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) 865 { 866 unsigned long flags; 867 868 if (!dev) 869 return; 870 871 spin_lock_irqsave(&dev->power.lock, flags); 872 pm_wakeup_ws_event(dev->power.wakeup, msec, hard); 873 spin_unlock_irqrestore(&dev->power.lock, flags); 874 } 875 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); 876 877 void pm_print_active_wakeup_sources(void) 878 { 879 struct wakeup_source *ws; 880 int srcuidx, active = 0; 881 struct wakeup_source *last_activity_ws = NULL; 882 883 srcuidx = srcu_read_lock(&wakeup_srcu); 884 list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { 885 if (ws->active) { 886 pm_pr_dbg("active wakeup source: %s\n", ws->name); 887 active = 1; 888 } else if (!active && 889 (!last_activity_ws || 890 ktime_to_ns(ws->last_time) > 891 ktime_to_ns(last_activity_ws->last_time))) { 892 last_activity_ws = ws; 893 } 894 } 895 896 if (!active && last_activity_ws) 897 pm_pr_dbg("last active wakeup source: %s\n", 898 last_activity_ws->name); 899 srcu_read_unlock(&wakeup_srcu, srcuidx); 900 } 901 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); 902 903 /** 904 * pm_wakeup_pending - Check if power transition in progress should be aborted. 905 * 906 * Compare the current number of registered wakeup events with its preserved 907 * value from the past and return true if new wakeup events have been registered 908 * since the old value was stored. Also return true if the current number of 909 * wakeup events being processed is different from zero. 910 */ 911 bool pm_wakeup_pending(void) 912 { 913 unsigned long flags; 914 bool ret = false; 915 916 raw_spin_lock_irqsave(&events_lock, flags); 917 if (events_check_enabled) { 918 unsigned int cnt, inpr; 919 920 split_counters(&cnt, &inpr); 921 ret = (cnt != saved_count || inpr > 0); 922 events_check_enabled = !ret; 923 } 924 raw_spin_unlock_irqrestore(&events_lock, flags); 925 926 if (ret) { 927 pm_pr_dbg("Wakeup pending, aborting suspend\n"); 928 pm_print_active_wakeup_sources(); 929 } 930 931 return ret || atomic_read(&pm_abort_suspend) > 0; 932 } 933 EXPORT_SYMBOL_GPL(pm_wakeup_pending); 934 935 void pm_system_wakeup(void) 936 { 937 atomic_inc(&pm_abort_suspend); 938 s2idle_wake(); 939 } 940 EXPORT_SYMBOL_GPL(pm_system_wakeup); 941 942 void pm_system_cancel_wakeup(void) 943 { 944 atomic_dec_if_positive(&pm_abort_suspend); 945 } 946 947 void pm_wakeup_clear(unsigned int irq_number) 948 { 949 raw_spin_lock_irq(&wakeup_irq_lock); 950 951 if (irq_number && wakeup_irq[0] == irq_number) 952 wakeup_irq[0] = wakeup_irq[1]; 953 else 954 wakeup_irq[0] = 0; 955 956 wakeup_irq[1] = 0; 957 958 raw_spin_unlock_irq(&wakeup_irq_lock); 959 960 if (!irq_number) 961 atomic_set(&pm_abort_suspend, 0); 962 } 963 964 void pm_system_irq_wakeup(unsigned int irq_number) 965 { 966 unsigned long flags; 967 968 raw_spin_lock_irqsave(&wakeup_irq_lock, flags); 969 970 if (wakeup_irq[0] == 0) 971 wakeup_irq[0] = irq_number; 972 else if (wakeup_irq[1] == 0) 973 wakeup_irq[1] = irq_number; 974 else 975 irq_number = 0; 976 977 raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); 978 979 if (irq_number) 980 pm_system_wakeup(); 981 } 982 983 unsigned int pm_wakeup_irq(void) 984 { 985 return wakeup_irq[0]; 986 } 987 988 /** 989 * pm_get_wakeup_count - Read the number of registered wakeup events. 990 * @count: Address to store the value at. 991 * @block: Whether or not to block. 992 * 993 * Store the number of registered wakeup events at the address in @count. If 994 * @block is set, block until the current number of wakeup events being 995 * processed is zero. 996 * 997 * Return 'false' if the current number of wakeup events being processed is 998 * nonzero. Otherwise return 'true'. 999 */ 1000 bool pm_get_wakeup_count(unsigned int *count, bool block) 1001 { 1002 unsigned int cnt, inpr; 1003 1004 if (block) { 1005 DEFINE_WAIT(wait); 1006 1007 for (;;) { 1008 prepare_to_wait(&wakeup_count_wait_queue, &wait, 1009 TASK_INTERRUPTIBLE); 1010 split_counters(&cnt, &inpr); 1011 if (inpr == 0 || signal_pending(current)) 1012 break; 1013 pm_print_active_wakeup_sources(); 1014 schedule(); 1015 } 1016 finish_wait(&wakeup_count_wait_queue, &wait); 1017 } 1018 1019 split_counters(&cnt, &inpr); 1020 *count = cnt; 1021 return !inpr; 1022 } 1023 1024 /** 1025 * pm_save_wakeup_count - Save the current number of registered wakeup events. 1026 * @count: Value to compare with the current number of registered wakeup events. 1027 * 1028 * If @count is equal to the current number of registered wakeup events and the 1029 * current number of wakeup events being processed is zero, store @count as the 1030 * old number of registered wakeup events for pm_check_wakeup_events(), enable 1031 * wakeup events detection and return 'true'. Otherwise disable wakeup events 1032 * detection and return 'false'. 1033 */ 1034 bool pm_save_wakeup_count(unsigned int count) 1035 { 1036 unsigned int cnt, inpr; 1037 unsigned long flags; 1038 1039 events_check_enabled = false; 1040 raw_spin_lock_irqsave(&events_lock, flags); 1041 split_counters(&cnt, &inpr); 1042 if (cnt == count && inpr == 0) { 1043 saved_count = count; 1044 events_check_enabled = true; 1045 } 1046 raw_spin_unlock_irqrestore(&events_lock, flags); 1047 return events_check_enabled; 1048 } 1049 1050 #ifdef CONFIG_PM_AUTOSLEEP 1051 /** 1052 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. 1053 * @set: Whether to set or to clear the autosleep_enabled flags. 1054 */ 1055 void pm_wakep_autosleep_enabled(bool set) 1056 { 1057 struct wakeup_source *ws; 1058 ktime_t now = ktime_get(); 1059 int srcuidx; 1060 1061 srcuidx = srcu_read_lock(&wakeup_srcu); 1062 list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { 1063 spin_lock_irq(&ws->lock); 1064 if (ws->autosleep_enabled != set) { 1065 ws->autosleep_enabled = set; 1066 if (ws->active) { 1067 if (set) 1068 ws->start_prevent_time = now; 1069 else 1070 update_prevent_sleep_time(ws, now); 1071 } 1072 } 1073 spin_unlock_irq(&ws->lock); 1074 } 1075 srcu_read_unlock(&wakeup_srcu, srcuidx); 1076 } 1077 #endif /* CONFIG_PM_AUTOSLEEP */ 1078 1079 /** 1080 * print_wakeup_source_stats - Print wakeup source statistics information. 1081 * @m: seq_file to print the statistics into. 1082 * @ws: Wakeup source object to print the statistics for. 1083 */ 1084 static int print_wakeup_source_stats(struct seq_file *m, 1085 struct wakeup_source *ws) 1086 { 1087 unsigned long flags; 1088 ktime_t total_time; 1089 ktime_t max_time; 1090 unsigned long active_count; 1091 ktime_t active_time; 1092 ktime_t prevent_sleep_time; 1093 1094 spin_lock_irqsave(&ws->lock, flags); 1095 1096 total_time = ws->total_time; 1097 max_time = ws->max_time; 1098 prevent_sleep_time = ws->prevent_sleep_time; 1099 active_count = ws->active_count; 1100 if (ws->active) { 1101 ktime_t now = ktime_get(); 1102 1103 active_time = ktime_sub(now, ws->last_time); 1104 total_time = ktime_add(total_time, active_time); 1105 if (active_time > max_time) 1106 max_time = active_time; 1107 1108 if (ws->autosleep_enabled) 1109 prevent_sleep_time = ktime_add(prevent_sleep_time, 1110 ktime_sub(now, ws->start_prevent_time)); 1111 } else { 1112 active_time = 0; 1113 } 1114 1115 seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", 1116 ws->name, active_count, ws->event_count, 1117 ws->wakeup_count, ws->expire_count, 1118 ktime_to_ms(active_time), ktime_to_ms(total_time), 1119 ktime_to_ms(max_time), ktime_to_ms(ws->last_time), 1120 ktime_to_ms(prevent_sleep_time)); 1121 1122 spin_unlock_irqrestore(&ws->lock, flags); 1123 1124 return 0; 1125 } 1126 1127 static void *wakeup_sources_stats_seq_start(struct seq_file *m, 1128 loff_t *pos) 1129 { 1130 struct wakeup_source *ws; 1131 loff_t n = *pos; 1132 int *srcuidx = m->private; 1133 1134 if (n == 0) { 1135 seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" 1136 "expire_count\tactive_since\ttotal_time\tmax_time\t" 1137 "last_change\tprevent_suspend_time\n"); 1138 } 1139 1140 *srcuidx = srcu_read_lock(&wakeup_srcu); 1141 list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { 1142 if (n-- <= 0) 1143 return ws; 1144 } 1145 1146 return NULL; 1147 } 1148 1149 static void *wakeup_sources_stats_seq_next(struct seq_file *m, 1150 void *v, loff_t *pos) 1151 { 1152 struct wakeup_source *ws = v; 1153 struct wakeup_source *next_ws = NULL; 1154 1155 ++(*pos); 1156 1157 list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) { 1158 next_ws = ws; 1159 break; 1160 } 1161 1162 if (!next_ws) 1163 print_wakeup_source_stats(m, &deleted_ws); 1164 1165 return next_ws; 1166 } 1167 1168 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v) 1169 { 1170 int *srcuidx = m->private; 1171 1172 srcu_read_unlock(&wakeup_srcu, *srcuidx); 1173 } 1174 1175 /** 1176 * wakeup_sources_stats_seq_show - Print wakeup sources statistics information. 1177 * @m: seq_file to print the statistics into. 1178 * @v: wakeup_source of each iteration 1179 */ 1180 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v) 1181 { 1182 struct wakeup_source *ws = v; 1183 1184 print_wakeup_source_stats(m, ws); 1185 1186 return 0; 1187 } 1188 1189 static const struct seq_operations wakeup_sources_stats_seq_ops = { 1190 .start = wakeup_sources_stats_seq_start, 1191 .next = wakeup_sources_stats_seq_next, 1192 .stop = wakeup_sources_stats_seq_stop, 1193 .show = wakeup_sources_stats_seq_show, 1194 }; 1195 1196 static int wakeup_sources_stats_open(struct inode *inode, struct file *file) 1197 { 1198 return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int)); 1199 } 1200 1201 static const struct file_operations wakeup_sources_stats_fops = { 1202 .owner = THIS_MODULE, 1203 .open = wakeup_sources_stats_open, 1204 .read = seq_read, 1205 .llseek = seq_lseek, 1206 .release = seq_release_private, 1207 }; 1208 1209 static int __init wakeup_sources_debugfs_init(void) 1210 { 1211 debugfs_create_file("wakeup_sources", 0444, NULL, NULL, 1212 &wakeup_sources_stats_fops); 1213 return 0; 1214 } 1215 1216 postcore_initcall(wakeup_sources_debugfs_init); 1217