1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/main.c - Where the driver meets power management. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 * 8 * The driver model core calls device_pm_add() when a device is registered. 9 * This will initialize the embedded device_pm_info object in the device 10 * and add it to the list of power-controlled devices. sysfs entries for 11 * controlling device power management will also be added. 12 * 13 * A separate list is used for keeping track of power info, because the power 14 * domain dependencies may differ from the ancestral dependencies that the 15 * subsystem list maintains. 16 */ 17 18 #define pr_fmt(fmt) "PM: " fmt 19 20 #include <linux/device.h> 21 #include <linux/export.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm-trace.h> 26 #include <linux/pm_wakeirq.h> 27 #include <linux/interrupt.h> 28 #include <linux/sched.h> 29 #include <linux/sched/debug.h> 30 #include <linux/async.h> 31 #include <linux/suspend.h> 32 #include <trace/events/power.h> 33 #include <linux/cpufreq.h> 34 #include <linux/cpuidle.h> 35 #include <linux/devfreq.h> 36 #include <linux/timer.h> 37 38 #include "../base.h" 39 #include "power.h" 40 41 typedef int (*pm_callback_t)(struct device *); 42 43 /* 44 * The entries in the dpm_list list are in a depth first order, simply 45 * because children are guaranteed to be discovered after parents, and 46 * are inserted at the back of the list on discovery. 47 * 48 * Since device_pm_add() may be called with a device lock held, 49 * we must never try to acquire a device lock while holding 50 * dpm_list_mutex. 51 */ 52 53 LIST_HEAD(dpm_list); 54 static LIST_HEAD(dpm_prepared_list); 55 static LIST_HEAD(dpm_suspended_list); 56 static LIST_HEAD(dpm_late_early_list); 57 static LIST_HEAD(dpm_noirq_list); 58 59 struct suspend_stats suspend_stats; 60 static DEFINE_MUTEX(dpm_list_mtx); 61 static pm_message_t pm_transition; 62 63 static int async_error; 64 65 static const char *pm_verb(int event) 66 { 67 switch (event) { 68 case PM_EVENT_SUSPEND: 69 return "suspend"; 70 case PM_EVENT_RESUME: 71 return "resume"; 72 case PM_EVENT_FREEZE: 73 return "freeze"; 74 case PM_EVENT_QUIESCE: 75 return "quiesce"; 76 case PM_EVENT_HIBERNATE: 77 return "hibernate"; 78 case PM_EVENT_THAW: 79 return "thaw"; 80 case PM_EVENT_RESTORE: 81 return "restore"; 82 case PM_EVENT_RECOVER: 83 return "recover"; 84 default: 85 return "(unknown PM event)"; 86 } 87 } 88 89 /** 90 * device_pm_sleep_init - Initialize system suspend-related device fields. 91 * @dev: Device object being initialized. 92 */ 93 void device_pm_sleep_init(struct device *dev) 94 { 95 dev->power.is_prepared = false; 96 dev->power.is_suspended = false; 97 dev->power.is_noirq_suspended = false; 98 dev->power.is_late_suspended = false; 99 init_completion(&dev->power.completion); 100 complete_all(&dev->power.completion); 101 dev->power.wakeup = NULL; 102 INIT_LIST_HEAD(&dev->power.entry); 103 } 104 105 /** 106 * device_pm_lock - Lock the list of active devices used by the PM core. 107 */ 108 void device_pm_lock(void) 109 { 110 mutex_lock(&dpm_list_mtx); 111 } 112 113 /** 114 * device_pm_unlock - Unlock the list of active devices used by the PM core. 115 */ 116 void device_pm_unlock(void) 117 { 118 mutex_unlock(&dpm_list_mtx); 119 } 120 121 /** 122 * device_pm_add - Add a device to the PM core's list of active devices. 123 * @dev: Device to add to the list. 124 */ 125 void device_pm_add(struct device *dev) 126 { 127 /* Skip PM setup/initialization. */ 128 if (device_pm_not_required(dev)) 129 return; 130 131 pr_debug("Adding info for %s:%s\n", 132 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 133 device_pm_check_callbacks(dev); 134 mutex_lock(&dpm_list_mtx); 135 if (dev->parent && dev->parent->power.is_prepared) 136 dev_warn(dev, "parent %s should not be sleeping\n", 137 dev_name(dev->parent)); 138 list_add_tail(&dev->power.entry, &dpm_list); 139 dev->power.in_dpm_list = true; 140 mutex_unlock(&dpm_list_mtx); 141 } 142 143 /** 144 * device_pm_remove - Remove a device from the PM core's list of active devices. 145 * @dev: Device to be removed from the list. 146 */ 147 void device_pm_remove(struct device *dev) 148 { 149 if (device_pm_not_required(dev)) 150 return; 151 152 pr_debug("Removing info for %s:%s\n", 153 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 154 complete_all(&dev->power.completion); 155 mutex_lock(&dpm_list_mtx); 156 list_del_init(&dev->power.entry); 157 dev->power.in_dpm_list = false; 158 mutex_unlock(&dpm_list_mtx); 159 device_wakeup_disable(dev); 160 pm_runtime_remove(dev); 161 device_pm_check_callbacks(dev); 162 } 163 164 /** 165 * device_pm_move_before - Move device in the PM core's list of active devices. 166 * @deva: Device to move in dpm_list. 167 * @devb: Device @deva should come before. 168 */ 169 void device_pm_move_before(struct device *deva, struct device *devb) 170 { 171 pr_debug("Moving %s:%s before %s:%s\n", 172 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 173 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 174 /* Delete deva from dpm_list and reinsert before devb. */ 175 list_move_tail(&deva->power.entry, &devb->power.entry); 176 } 177 178 /** 179 * device_pm_move_after - Move device in the PM core's list of active devices. 180 * @deva: Device to move in dpm_list. 181 * @devb: Device @deva should come after. 182 */ 183 void device_pm_move_after(struct device *deva, struct device *devb) 184 { 185 pr_debug("Moving %s:%s after %s:%s\n", 186 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 187 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 188 /* Delete deva from dpm_list and reinsert after devb. */ 189 list_move(&deva->power.entry, &devb->power.entry); 190 } 191 192 /** 193 * device_pm_move_last - Move device to end of the PM core's list of devices. 194 * @dev: Device to move in dpm_list. 195 */ 196 void device_pm_move_last(struct device *dev) 197 { 198 pr_debug("Moving %s:%s to end of list\n", 199 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 200 list_move_tail(&dev->power.entry, &dpm_list); 201 } 202 203 static ktime_t initcall_debug_start(struct device *dev, void *cb) 204 { 205 if (!pm_print_times_enabled) 206 return 0; 207 208 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb, 209 task_pid_nr(current), 210 dev->parent ? dev_name(dev->parent) : "none"); 211 return ktime_get(); 212 } 213 214 static void initcall_debug_report(struct device *dev, ktime_t calltime, 215 void *cb, int error) 216 { 217 ktime_t rettime; 218 s64 nsecs; 219 220 if (!pm_print_times_enabled) 221 return; 222 223 rettime = ktime_get(); 224 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 225 226 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, 227 (unsigned long long)nsecs >> 10); 228 } 229 230 /** 231 * dpm_wait - Wait for a PM operation to complete. 232 * @dev: Device to wait for. 233 * @async: If unset, wait only if the device's power.async_suspend flag is set. 234 */ 235 static void dpm_wait(struct device *dev, bool async) 236 { 237 if (!dev) 238 return; 239 240 if (async || (pm_async_enabled && dev->power.async_suspend)) 241 wait_for_completion(&dev->power.completion); 242 } 243 244 static int dpm_wait_fn(struct device *dev, void *async_ptr) 245 { 246 dpm_wait(dev, *((bool *)async_ptr)); 247 return 0; 248 } 249 250 static void dpm_wait_for_children(struct device *dev, bool async) 251 { 252 device_for_each_child(dev, &async, dpm_wait_fn); 253 } 254 255 static void dpm_wait_for_suppliers(struct device *dev, bool async) 256 { 257 struct device_link *link; 258 int idx; 259 260 idx = device_links_read_lock(); 261 262 /* 263 * If the supplier goes away right after we've checked the link to it, 264 * we'll wait for its completion to change the state, but that's fine, 265 * because the only things that will block as a result are the SRCU 266 * callbacks freeing the link objects for the links in the list we're 267 * walking. 268 */ 269 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 270 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 271 dpm_wait(link->supplier, async); 272 273 device_links_read_unlock(idx); 274 } 275 276 static void dpm_wait_for_superior(struct device *dev, bool async) 277 { 278 dpm_wait(dev->parent, async); 279 dpm_wait_for_suppliers(dev, async); 280 } 281 282 static void dpm_wait_for_consumers(struct device *dev, bool async) 283 { 284 struct device_link *link; 285 int idx; 286 287 idx = device_links_read_lock(); 288 289 /* 290 * The status of a device link can only be changed from "dormant" by a 291 * probe, but that cannot happen during system suspend/resume. In 292 * theory it can change to "dormant" at that time, but then it is 293 * reasonable to wait for the target device anyway (eg. if it goes 294 * away, it's better to wait for it to go away completely and then 295 * continue instead of trying to continue in parallel with its 296 * unregistration). 297 */ 298 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) 299 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 300 dpm_wait(link->consumer, async); 301 302 device_links_read_unlock(idx); 303 } 304 305 static void dpm_wait_for_subordinate(struct device *dev, bool async) 306 { 307 dpm_wait_for_children(dev, async); 308 dpm_wait_for_consumers(dev, async); 309 } 310 311 /** 312 * pm_op - Return the PM operation appropriate for given PM event. 313 * @ops: PM operations to choose from. 314 * @state: PM transition of the system being carried out. 315 */ 316 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 317 { 318 switch (state.event) { 319 #ifdef CONFIG_SUSPEND 320 case PM_EVENT_SUSPEND: 321 return ops->suspend; 322 case PM_EVENT_RESUME: 323 return ops->resume; 324 #endif /* CONFIG_SUSPEND */ 325 #ifdef CONFIG_HIBERNATE_CALLBACKS 326 case PM_EVENT_FREEZE: 327 case PM_EVENT_QUIESCE: 328 return ops->freeze; 329 case PM_EVENT_HIBERNATE: 330 return ops->poweroff; 331 case PM_EVENT_THAW: 332 case PM_EVENT_RECOVER: 333 return ops->thaw; 334 break; 335 case PM_EVENT_RESTORE: 336 return ops->restore; 337 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 338 } 339 340 return NULL; 341 } 342 343 /** 344 * pm_late_early_op - Return the PM operation appropriate for given PM event. 345 * @ops: PM operations to choose from. 346 * @state: PM transition of the system being carried out. 347 * 348 * Runtime PM is disabled for @dev while this function is being executed. 349 */ 350 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 351 pm_message_t state) 352 { 353 switch (state.event) { 354 #ifdef CONFIG_SUSPEND 355 case PM_EVENT_SUSPEND: 356 return ops->suspend_late; 357 case PM_EVENT_RESUME: 358 return ops->resume_early; 359 #endif /* CONFIG_SUSPEND */ 360 #ifdef CONFIG_HIBERNATE_CALLBACKS 361 case PM_EVENT_FREEZE: 362 case PM_EVENT_QUIESCE: 363 return ops->freeze_late; 364 case PM_EVENT_HIBERNATE: 365 return ops->poweroff_late; 366 case PM_EVENT_THAW: 367 case PM_EVENT_RECOVER: 368 return ops->thaw_early; 369 case PM_EVENT_RESTORE: 370 return ops->restore_early; 371 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 372 } 373 374 return NULL; 375 } 376 377 /** 378 * pm_noirq_op - Return the PM operation appropriate for given PM event. 379 * @ops: PM operations to choose from. 380 * @state: PM transition of the system being carried out. 381 * 382 * The driver of @dev will not receive interrupts while this function is being 383 * executed. 384 */ 385 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 386 { 387 switch (state.event) { 388 #ifdef CONFIG_SUSPEND 389 case PM_EVENT_SUSPEND: 390 return ops->suspend_noirq; 391 case PM_EVENT_RESUME: 392 return ops->resume_noirq; 393 #endif /* CONFIG_SUSPEND */ 394 #ifdef CONFIG_HIBERNATE_CALLBACKS 395 case PM_EVENT_FREEZE: 396 case PM_EVENT_QUIESCE: 397 return ops->freeze_noirq; 398 case PM_EVENT_HIBERNATE: 399 return ops->poweroff_noirq; 400 case PM_EVENT_THAW: 401 case PM_EVENT_RECOVER: 402 return ops->thaw_noirq; 403 case PM_EVENT_RESTORE: 404 return ops->restore_noirq; 405 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 406 } 407 408 return NULL; 409 } 410 411 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) 412 { 413 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 414 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 415 ", may wakeup" : ""); 416 } 417 418 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, 419 int error) 420 { 421 pr_err("Device %s failed to %s%s: error %d\n", 422 dev_name(dev), pm_verb(state.event), info, error); 423 } 424 425 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, 426 const char *info) 427 { 428 ktime_t calltime; 429 u64 usecs64; 430 int usecs; 431 432 calltime = ktime_get(); 433 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 434 do_div(usecs64, NSEC_PER_USEC); 435 usecs = usecs64; 436 if (usecs == 0) 437 usecs = 1; 438 439 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", 440 info ?: "", info ? " " : "", pm_verb(state.event), 441 error ? "aborted" : "complete", 442 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 443 } 444 445 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 446 pm_message_t state, const char *info) 447 { 448 ktime_t calltime; 449 int error; 450 451 if (!cb) 452 return 0; 453 454 calltime = initcall_debug_start(dev, cb); 455 456 pm_dev_dbg(dev, state, info); 457 trace_device_pm_callback_start(dev, info, state.event); 458 error = cb(dev); 459 trace_device_pm_callback_end(dev, error); 460 suspend_report_result(cb, error); 461 462 initcall_debug_report(dev, calltime, cb, error); 463 464 return error; 465 } 466 467 #ifdef CONFIG_DPM_WATCHDOG 468 struct dpm_watchdog { 469 struct device *dev; 470 struct task_struct *tsk; 471 struct timer_list timer; 472 }; 473 474 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 475 struct dpm_watchdog wd 476 477 /** 478 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 479 * @t: The timer that PM watchdog depends on. 480 * 481 * Called when a driver has timed out suspending or resuming. 482 * There's not much we can do here to recover so panic() to 483 * capture a crash-dump in pstore. 484 */ 485 static void dpm_watchdog_handler(struct timer_list *t) 486 { 487 struct dpm_watchdog *wd = from_timer(wd, t, timer); 488 489 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 490 show_stack(wd->tsk, NULL); 491 panic("%s %s: unrecoverable failure\n", 492 dev_driver_string(wd->dev), dev_name(wd->dev)); 493 } 494 495 /** 496 * dpm_watchdog_set - Enable pm watchdog for given device. 497 * @wd: Watchdog. Must be allocated on the stack. 498 * @dev: Device to handle. 499 */ 500 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 501 { 502 struct timer_list *timer = &wd->timer; 503 504 wd->dev = dev; 505 wd->tsk = current; 506 507 timer_setup_on_stack(timer, dpm_watchdog_handler, 0); 508 /* use same timeout value for both suspend and resume */ 509 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 510 add_timer(timer); 511 } 512 513 /** 514 * dpm_watchdog_clear - Disable suspend/resume watchdog. 515 * @wd: Watchdog to disable. 516 */ 517 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 518 { 519 struct timer_list *timer = &wd->timer; 520 521 del_timer_sync(timer); 522 destroy_timer_on_stack(timer); 523 } 524 #else 525 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 526 #define dpm_watchdog_set(x, y) 527 #define dpm_watchdog_clear(x) 528 #endif 529 530 /*------------------------- Resume routines -------------------------*/ 531 532 /** 533 * suspend_event - Return a "suspend" message for given "resume" one. 534 * @resume_msg: PM message representing a system-wide resume transition. 535 */ 536 static pm_message_t suspend_event(pm_message_t resume_msg) 537 { 538 switch (resume_msg.event) { 539 case PM_EVENT_RESUME: 540 return PMSG_SUSPEND; 541 case PM_EVENT_THAW: 542 case PM_EVENT_RESTORE: 543 return PMSG_FREEZE; 544 case PM_EVENT_RECOVER: 545 return PMSG_HIBERNATE; 546 } 547 return PMSG_ON; 548 } 549 550 /** 551 * dev_pm_may_skip_resume - System-wide device resume optimization check. 552 * @dev: Target device. 553 * 554 * Checks whether or not the device may be left in suspend after a system-wide 555 * transition to the working state. 556 */ 557 bool dev_pm_may_skip_resume(struct device *dev) 558 { 559 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE; 560 } 561 562 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev, 563 pm_message_t state, 564 const char **info_p) 565 { 566 pm_callback_t callback; 567 const char *info; 568 569 if (dev->pm_domain) { 570 info = "noirq power domain "; 571 callback = pm_noirq_op(&dev->pm_domain->ops, state); 572 } else if (dev->type && dev->type->pm) { 573 info = "noirq type "; 574 callback = pm_noirq_op(dev->type->pm, state); 575 } else if (dev->class && dev->class->pm) { 576 info = "noirq class "; 577 callback = pm_noirq_op(dev->class->pm, state); 578 } else if (dev->bus && dev->bus->pm) { 579 info = "noirq bus "; 580 callback = pm_noirq_op(dev->bus->pm, state); 581 } else { 582 return NULL; 583 } 584 585 if (info_p) 586 *info_p = info; 587 588 return callback; 589 } 590 591 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev, 592 pm_message_t state, 593 const char **info_p); 594 595 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev, 596 pm_message_t state, 597 const char **info_p); 598 599 /** 600 * device_resume_noirq - Execute a "noirq resume" callback for given device. 601 * @dev: Device to handle. 602 * @state: PM transition of the system being carried out. 603 * @async: If true, the device is being resumed asynchronously. 604 * 605 * The driver of @dev will not receive interrupts while this function is being 606 * executed. 607 */ 608 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) 609 { 610 pm_callback_t callback; 611 const char *info; 612 bool skip_resume; 613 int error = 0; 614 615 TRACE_DEVICE(dev); 616 TRACE_RESUME(0); 617 618 if (dev->power.syscore || dev->power.direct_complete) 619 goto Out; 620 621 if (!dev->power.is_noirq_suspended) 622 goto Out; 623 624 dpm_wait_for_superior(dev, async); 625 626 skip_resume = dev_pm_may_skip_resume(dev); 627 628 callback = dpm_subsys_resume_noirq_cb(dev, state, &info); 629 if (callback) 630 goto Run; 631 632 if (skip_resume) 633 goto Skip; 634 635 if (dev_pm_smart_suspend_and_suspended(dev)) { 636 pm_message_t suspend_msg = suspend_event(state); 637 638 /* 639 * If "freeze" callbacks have been skipped during a transition 640 * related to hibernation, the subsequent "thaw" callbacks must 641 * be skipped too or bad things may happen. Otherwise, resume 642 * callbacks are going to be run for the device, so its runtime 643 * PM status must be changed to reflect the new state after the 644 * transition under way. 645 */ 646 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) && 647 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) { 648 if (state.event == PM_EVENT_THAW) { 649 skip_resume = true; 650 goto Skip; 651 } else { 652 pm_runtime_set_active(dev); 653 } 654 } 655 } 656 657 if (dev->driver && dev->driver->pm) { 658 info = "noirq driver "; 659 callback = pm_noirq_op(dev->driver->pm, state); 660 } 661 662 Run: 663 error = dpm_run_callback(callback, dev, state, info); 664 665 Skip: 666 dev->power.is_noirq_suspended = false; 667 668 if (skip_resume) { 669 /* Make the next phases of resume skip the device. */ 670 dev->power.is_late_suspended = false; 671 dev->power.is_suspended = false; 672 /* 673 * The device is going to be left in suspend, but it might not 674 * have been in runtime suspend before the system suspended, so 675 * its runtime PM status needs to be updated to avoid confusing 676 * the runtime PM framework when runtime PM is enabled for the 677 * device again. 678 */ 679 pm_runtime_set_suspended(dev); 680 } 681 682 Out: 683 complete_all(&dev->power.completion); 684 TRACE_RESUME(error); 685 return error; 686 } 687 688 static bool is_async(struct device *dev) 689 { 690 return dev->power.async_suspend && pm_async_enabled 691 && !pm_trace_is_enabled(); 692 } 693 694 static bool dpm_async_fn(struct device *dev, async_func_t func) 695 { 696 reinit_completion(&dev->power.completion); 697 698 if (is_async(dev)) { 699 get_device(dev); 700 async_schedule(func, dev); 701 return true; 702 } 703 704 return false; 705 } 706 707 static void async_resume_noirq(void *data, async_cookie_t cookie) 708 { 709 struct device *dev = (struct device *)data; 710 int error; 711 712 error = device_resume_noirq(dev, pm_transition, true); 713 if (error) 714 pm_dev_err(dev, pm_transition, " async", error); 715 716 put_device(dev); 717 } 718 719 void dpm_noirq_resume_devices(pm_message_t state) 720 { 721 struct device *dev; 722 ktime_t starttime = ktime_get(); 723 724 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); 725 mutex_lock(&dpm_list_mtx); 726 pm_transition = state; 727 728 /* 729 * Advanced the async threads upfront, 730 * in case the starting of async threads is 731 * delayed by non-async resuming devices. 732 */ 733 list_for_each_entry(dev, &dpm_noirq_list, power.entry) 734 dpm_async_fn(dev, async_resume_noirq); 735 736 while (!list_empty(&dpm_noirq_list)) { 737 dev = to_device(dpm_noirq_list.next); 738 get_device(dev); 739 list_move_tail(&dev->power.entry, &dpm_late_early_list); 740 mutex_unlock(&dpm_list_mtx); 741 742 if (!is_async(dev)) { 743 int error; 744 745 error = device_resume_noirq(dev, state, false); 746 if (error) { 747 suspend_stats.failed_resume_noirq++; 748 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 749 dpm_save_failed_dev(dev_name(dev)); 750 pm_dev_err(dev, state, " noirq", error); 751 } 752 } 753 754 mutex_lock(&dpm_list_mtx); 755 put_device(dev); 756 } 757 mutex_unlock(&dpm_list_mtx); 758 async_synchronize_full(); 759 dpm_show_time(starttime, state, 0, "noirq"); 760 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 761 } 762 763 void dpm_noirq_end(void) 764 { 765 resume_device_irqs(); 766 device_wakeup_disarm_wake_irqs(); 767 cpuidle_resume(); 768 } 769 770 /** 771 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 772 * @state: PM transition of the system being carried out. 773 * 774 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and 775 * allow device drivers' interrupt handlers to be called. 776 */ 777 void dpm_resume_noirq(pm_message_t state) 778 { 779 dpm_noirq_resume_devices(state); 780 dpm_noirq_end(); 781 } 782 783 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev, 784 pm_message_t state, 785 const char **info_p) 786 { 787 pm_callback_t callback; 788 const char *info; 789 790 if (dev->pm_domain) { 791 info = "early power domain "; 792 callback = pm_late_early_op(&dev->pm_domain->ops, state); 793 } else if (dev->type && dev->type->pm) { 794 info = "early type "; 795 callback = pm_late_early_op(dev->type->pm, state); 796 } else if (dev->class && dev->class->pm) { 797 info = "early class "; 798 callback = pm_late_early_op(dev->class->pm, state); 799 } else if (dev->bus && dev->bus->pm) { 800 info = "early bus "; 801 callback = pm_late_early_op(dev->bus->pm, state); 802 } else { 803 return NULL; 804 } 805 806 if (info_p) 807 *info_p = info; 808 809 return callback; 810 } 811 812 /** 813 * device_resume_early - Execute an "early resume" callback for given device. 814 * @dev: Device to handle. 815 * @state: PM transition of the system being carried out. 816 * @async: If true, the device is being resumed asynchronously. 817 * 818 * Runtime PM is disabled for @dev while this function is being executed. 819 */ 820 static int device_resume_early(struct device *dev, pm_message_t state, bool async) 821 { 822 pm_callback_t callback; 823 const char *info; 824 int error = 0; 825 826 TRACE_DEVICE(dev); 827 TRACE_RESUME(0); 828 829 if (dev->power.syscore || dev->power.direct_complete) 830 goto Out; 831 832 if (!dev->power.is_late_suspended) 833 goto Out; 834 835 dpm_wait_for_superior(dev, async); 836 837 callback = dpm_subsys_resume_early_cb(dev, state, &info); 838 839 if (!callback && dev->driver && dev->driver->pm) { 840 info = "early driver "; 841 callback = pm_late_early_op(dev->driver->pm, state); 842 } 843 844 error = dpm_run_callback(callback, dev, state, info); 845 dev->power.is_late_suspended = false; 846 847 Out: 848 TRACE_RESUME(error); 849 850 pm_runtime_enable(dev); 851 complete_all(&dev->power.completion); 852 return error; 853 } 854 855 static void async_resume_early(void *data, async_cookie_t cookie) 856 { 857 struct device *dev = (struct device *)data; 858 int error; 859 860 error = device_resume_early(dev, pm_transition, true); 861 if (error) 862 pm_dev_err(dev, pm_transition, " async", error); 863 864 put_device(dev); 865 } 866 867 /** 868 * dpm_resume_early - Execute "early resume" callbacks for all devices. 869 * @state: PM transition of the system being carried out. 870 */ 871 void dpm_resume_early(pm_message_t state) 872 { 873 struct device *dev; 874 ktime_t starttime = ktime_get(); 875 876 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); 877 mutex_lock(&dpm_list_mtx); 878 pm_transition = state; 879 880 /* 881 * Advanced the async threads upfront, 882 * in case the starting of async threads is 883 * delayed by non-async resuming devices. 884 */ 885 list_for_each_entry(dev, &dpm_late_early_list, power.entry) 886 dpm_async_fn(dev, async_resume_early); 887 888 while (!list_empty(&dpm_late_early_list)) { 889 dev = to_device(dpm_late_early_list.next); 890 get_device(dev); 891 list_move_tail(&dev->power.entry, &dpm_suspended_list); 892 mutex_unlock(&dpm_list_mtx); 893 894 if (!is_async(dev)) { 895 int error; 896 897 error = device_resume_early(dev, state, false); 898 if (error) { 899 suspend_stats.failed_resume_early++; 900 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 901 dpm_save_failed_dev(dev_name(dev)); 902 pm_dev_err(dev, state, " early", error); 903 } 904 } 905 mutex_lock(&dpm_list_mtx); 906 put_device(dev); 907 } 908 mutex_unlock(&dpm_list_mtx); 909 async_synchronize_full(); 910 dpm_show_time(starttime, state, 0, "early"); 911 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 912 } 913 914 /** 915 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 916 * @state: PM transition of the system being carried out. 917 */ 918 void dpm_resume_start(pm_message_t state) 919 { 920 dpm_resume_noirq(state); 921 dpm_resume_early(state); 922 } 923 EXPORT_SYMBOL_GPL(dpm_resume_start); 924 925 /** 926 * device_resume - Execute "resume" callbacks for given device. 927 * @dev: Device to handle. 928 * @state: PM transition of the system being carried out. 929 * @async: If true, the device is being resumed asynchronously. 930 */ 931 static int device_resume(struct device *dev, pm_message_t state, bool async) 932 { 933 pm_callback_t callback = NULL; 934 const char *info = NULL; 935 int error = 0; 936 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 937 938 TRACE_DEVICE(dev); 939 TRACE_RESUME(0); 940 941 if (dev->power.syscore) 942 goto Complete; 943 944 if (dev->power.direct_complete) { 945 /* Match the pm_runtime_disable() in __device_suspend(). */ 946 pm_runtime_enable(dev); 947 goto Complete; 948 } 949 950 dpm_wait_for_superior(dev, async); 951 dpm_watchdog_set(&wd, dev); 952 device_lock(dev); 953 954 /* 955 * This is a fib. But we'll allow new children to be added below 956 * a resumed device, even if the device hasn't been completed yet. 957 */ 958 dev->power.is_prepared = false; 959 960 if (!dev->power.is_suspended) 961 goto Unlock; 962 963 if (dev->pm_domain) { 964 info = "power domain "; 965 callback = pm_op(&dev->pm_domain->ops, state); 966 goto Driver; 967 } 968 969 if (dev->type && dev->type->pm) { 970 info = "type "; 971 callback = pm_op(dev->type->pm, state); 972 goto Driver; 973 } 974 975 if (dev->class && dev->class->pm) { 976 info = "class "; 977 callback = pm_op(dev->class->pm, state); 978 goto Driver; 979 } 980 981 if (dev->bus) { 982 if (dev->bus->pm) { 983 info = "bus "; 984 callback = pm_op(dev->bus->pm, state); 985 } else if (dev->bus->resume) { 986 info = "legacy bus "; 987 callback = dev->bus->resume; 988 goto End; 989 } 990 } 991 992 Driver: 993 if (!callback && dev->driver && dev->driver->pm) { 994 info = "driver "; 995 callback = pm_op(dev->driver->pm, state); 996 } 997 998 End: 999 error = dpm_run_callback(callback, dev, state, info); 1000 dev->power.is_suspended = false; 1001 1002 Unlock: 1003 device_unlock(dev); 1004 dpm_watchdog_clear(&wd); 1005 1006 Complete: 1007 complete_all(&dev->power.completion); 1008 1009 TRACE_RESUME(error); 1010 1011 return error; 1012 } 1013 1014 static void async_resume(void *data, async_cookie_t cookie) 1015 { 1016 struct device *dev = (struct device *)data; 1017 int error; 1018 1019 error = device_resume(dev, pm_transition, true); 1020 if (error) 1021 pm_dev_err(dev, pm_transition, " async", error); 1022 put_device(dev); 1023 } 1024 1025 /** 1026 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 1027 * @state: PM transition of the system being carried out. 1028 * 1029 * Execute the appropriate "resume" callback for all devices whose status 1030 * indicates that they are suspended. 1031 */ 1032 void dpm_resume(pm_message_t state) 1033 { 1034 struct device *dev; 1035 ktime_t starttime = ktime_get(); 1036 1037 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 1038 might_sleep(); 1039 1040 mutex_lock(&dpm_list_mtx); 1041 pm_transition = state; 1042 async_error = 0; 1043 1044 list_for_each_entry(dev, &dpm_suspended_list, power.entry) 1045 dpm_async_fn(dev, async_resume); 1046 1047 while (!list_empty(&dpm_suspended_list)) { 1048 dev = to_device(dpm_suspended_list.next); 1049 get_device(dev); 1050 if (!is_async(dev)) { 1051 int error; 1052 1053 mutex_unlock(&dpm_list_mtx); 1054 1055 error = device_resume(dev, state, false); 1056 if (error) { 1057 suspend_stats.failed_resume++; 1058 dpm_save_failed_step(SUSPEND_RESUME); 1059 dpm_save_failed_dev(dev_name(dev)); 1060 pm_dev_err(dev, state, "", error); 1061 } 1062 1063 mutex_lock(&dpm_list_mtx); 1064 } 1065 if (!list_empty(&dev->power.entry)) 1066 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1067 put_device(dev); 1068 } 1069 mutex_unlock(&dpm_list_mtx); 1070 async_synchronize_full(); 1071 dpm_show_time(starttime, state, 0, NULL); 1072 1073 cpufreq_resume(); 1074 devfreq_resume(); 1075 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 1076 } 1077 1078 /** 1079 * device_complete - Complete a PM transition for given device. 1080 * @dev: Device to handle. 1081 * @state: PM transition of the system being carried out. 1082 */ 1083 static void device_complete(struct device *dev, pm_message_t state) 1084 { 1085 void (*callback)(struct device *) = NULL; 1086 const char *info = NULL; 1087 1088 if (dev->power.syscore) 1089 return; 1090 1091 device_lock(dev); 1092 1093 if (dev->pm_domain) { 1094 info = "completing power domain "; 1095 callback = dev->pm_domain->ops.complete; 1096 } else if (dev->type && dev->type->pm) { 1097 info = "completing type "; 1098 callback = dev->type->pm->complete; 1099 } else if (dev->class && dev->class->pm) { 1100 info = "completing class "; 1101 callback = dev->class->pm->complete; 1102 } else if (dev->bus && dev->bus->pm) { 1103 info = "completing bus "; 1104 callback = dev->bus->pm->complete; 1105 } 1106 1107 if (!callback && dev->driver && dev->driver->pm) { 1108 info = "completing driver "; 1109 callback = dev->driver->pm->complete; 1110 } 1111 1112 if (callback) { 1113 pm_dev_dbg(dev, state, info); 1114 callback(dev); 1115 } 1116 1117 device_unlock(dev); 1118 1119 pm_runtime_put(dev); 1120 } 1121 1122 /** 1123 * dpm_complete - Complete a PM transition for all non-sysdev devices. 1124 * @state: PM transition of the system being carried out. 1125 * 1126 * Execute the ->complete() callbacks for all devices whose PM status is not 1127 * DPM_ON (this allows new devices to be registered). 1128 */ 1129 void dpm_complete(pm_message_t state) 1130 { 1131 struct list_head list; 1132 1133 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1134 might_sleep(); 1135 1136 INIT_LIST_HEAD(&list); 1137 mutex_lock(&dpm_list_mtx); 1138 while (!list_empty(&dpm_prepared_list)) { 1139 struct device *dev = to_device(dpm_prepared_list.prev); 1140 1141 get_device(dev); 1142 dev->power.is_prepared = false; 1143 list_move(&dev->power.entry, &list); 1144 mutex_unlock(&dpm_list_mtx); 1145 1146 trace_device_pm_callback_start(dev, "", state.event); 1147 device_complete(dev, state); 1148 trace_device_pm_callback_end(dev, 0); 1149 1150 mutex_lock(&dpm_list_mtx); 1151 put_device(dev); 1152 } 1153 list_splice(&list, &dpm_list); 1154 mutex_unlock(&dpm_list_mtx); 1155 1156 /* Allow device probing and trigger re-probing of deferred devices */ 1157 device_unblock_probing(); 1158 trace_suspend_resume(TPS("dpm_complete"), state.event, false); 1159 } 1160 1161 /** 1162 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 1163 * @state: PM transition of the system being carried out. 1164 * 1165 * Execute "resume" callbacks for all devices and complete the PM transition of 1166 * the system. 1167 */ 1168 void dpm_resume_end(pm_message_t state) 1169 { 1170 dpm_resume(state); 1171 dpm_complete(state); 1172 } 1173 EXPORT_SYMBOL_GPL(dpm_resume_end); 1174 1175 1176 /*------------------------- Suspend routines -------------------------*/ 1177 1178 /** 1179 * resume_event - Return a "resume" message for given "suspend" sleep state. 1180 * @sleep_state: PM message representing a sleep state. 1181 * 1182 * Return a PM message representing the resume event corresponding to given 1183 * sleep state. 1184 */ 1185 static pm_message_t resume_event(pm_message_t sleep_state) 1186 { 1187 switch (sleep_state.event) { 1188 case PM_EVENT_SUSPEND: 1189 return PMSG_RESUME; 1190 case PM_EVENT_FREEZE: 1191 case PM_EVENT_QUIESCE: 1192 return PMSG_RECOVER; 1193 case PM_EVENT_HIBERNATE: 1194 return PMSG_RESTORE; 1195 } 1196 return PMSG_ON; 1197 } 1198 1199 static void dpm_superior_set_must_resume(struct device *dev) 1200 { 1201 struct device_link *link; 1202 int idx; 1203 1204 if (dev->parent) 1205 dev->parent->power.must_resume = true; 1206 1207 idx = device_links_read_lock(); 1208 1209 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1210 link->supplier->power.must_resume = true; 1211 1212 device_links_read_unlock(idx); 1213 } 1214 1215 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev, 1216 pm_message_t state, 1217 const char **info_p) 1218 { 1219 pm_callback_t callback; 1220 const char *info; 1221 1222 if (dev->pm_domain) { 1223 info = "noirq power domain "; 1224 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1225 } else if (dev->type && dev->type->pm) { 1226 info = "noirq type "; 1227 callback = pm_noirq_op(dev->type->pm, state); 1228 } else if (dev->class && dev->class->pm) { 1229 info = "noirq class "; 1230 callback = pm_noirq_op(dev->class->pm, state); 1231 } else if (dev->bus && dev->bus->pm) { 1232 info = "noirq bus "; 1233 callback = pm_noirq_op(dev->bus->pm, state); 1234 } else { 1235 return NULL; 1236 } 1237 1238 if (info_p) 1239 *info_p = info; 1240 1241 return callback; 1242 } 1243 1244 static bool device_must_resume(struct device *dev, pm_message_t state, 1245 bool no_subsys_suspend_noirq) 1246 { 1247 pm_message_t resume_msg = resume_event(state); 1248 1249 /* 1250 * If all of the device driver's "noirq", "late" and "early" callbacks 1251 * are invoked directly by the core, the decision to allow the device to 1252 * stay in suspend can be based on its current runtime PM status and its 1253 * wakeup settings. 1254 */ 1255 if (no_subsys_suspend_noirq && 1256 !dpm_subsys_suspend_late_cb(dev, state, NULL) && 1257 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) && 1258 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL)) 1259 return !pm_runtime_status_suspended(dev) && 1260 (resume_msg.event != PM_EVENT_RESUME || 1261 (device_can_wakeup(dev) && !device_may_wakeup(dev))); 1262 1263 /* 1264 * The only safe strategy here is to require that if the device may not 1265 * be left in suspend, resume callbacks must be invoked for it. 1266 */ 1267 return !dev->power.may_skip_resume; 1268 } 1269 1270 /** 1271 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. 1272 * @dev: Device to handle. 1273 * @state: PM transition of the system being carried out. 1274 * @async: If true, the device is being suspended asynchronously. 1275 * 1276 * The driver of @dev will not receive interrupts while this function is being 1277 * executed. 1278 */ 1279 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1280 { 1281 pm_callback_t callback; 1282 const char *info; 1283 bool no_subsys_cb = false; 1284 int error = 0; 1285 1286 TRACE_DEVICE(dev); 1287 TRACE_SUSPEND(0); 1288 1289 dpm_wait_for_subordinate(dev, async); 1290 1291 if (async_error) 1292 goto Complete; 1293 1294 if (pm_wakeup_pending()) { 1295 async_error = -EBUSY; 1296 goto Complete; 1297 } 1298 1299 if (dev->power.syscore || dev->power.direct_complete) 1300 goto Complete; 1301 1302 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info); 1303 if (callback) 1304 goto Run; 1305 1306 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL); 1307 1308 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb) 1309 goto Skip; 1310 1311 if (dev->driver && dev->driver->pm) { 1312 info = "noirq driver "; 1313 callback = pm_noirq_op(dev->driver->pm, state); 1314 } 1315 1316 Run: 1317 error = dpm_run_callback(callback, dev, state, info); 1318 if (error) { 1319 async_error = error; 1320 goto Complete; 1321 } 1322 1323 Skip: 1324 dev->power.is_noirq_suspended = true; 1325 1326 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) { 1327 dev->power.must_resume = dev->power.must_resume || 1328 atomic_read(&dev->power.usage_count) > 1 || 1329 device_must_resume(dev, state, no_subsys_cb); 1330 } else { 1331 dev->power.must_resume = true; 1332 } 1333 1334 if (dev->power.must_resume) 1335 dpm_superior_set_must_resume(dev); 1336 1337 Complete: 1338 complete_all(&dev->power.completion); 1339 TRACE_SUSPEND(error); 1340 return error; 1341 } 1342 1343 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1344 { 1345 struct device *dev = (struct device *)data; 1346 int error; 1347 1348 error = __device_suspend_noirq(dev, pm_transition, true); 1349 if (error) { 1350 dpm_save_failed_dev(dev_name(dev)); 1351 pm_dev_err(dev, pm_transition, " async", error); 1352 } 1353 1354 put_device(dev); 1355 } 1356 1357 static int device_suspend_noirq(struct device *dev) 1358 { 1359 if (dpm_async_fn(dev, async_suspend_noirq)) 1360 return 0; 1361 1362 return __device_suspend_noirq(dev, pm_transition, false); 1363 } 1364 1365 void dpm_noirq_begin(void) 1366 { 1367 cpuidle_pause(); 1368 device_wakeup_arm_wake_irqs(); 1369 suspend_device_irqs(); 1370 } 1371 1372 int dpm_noirq_suspend_devices(pm_message_t state) 1373 { 1374 ktime_t starttime = ktime_get(); 1375 int error = 0; 1376 1377 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1378 mutex_lock(&dpm_list_mtx); 1379 pm_transition = state; 1380 async_error = 0; 1381 1382 while (!list_empty(&dpm_late_early_list)) { 1383 struct device *dev = to_device(dpm_late_early_list.prev); 1384 1385 get_device(dev); 1386 mutex_unlock(&dpm_list_mtx); 1387 1388 error = device_suspend_noirq(dev); 1389 1390 mutex_lock(&dpm_list_mtx); 1391 if (error) { 1392 pm_dev_err(dev, state, " noirq", error); 1393 dpm_save_failed_dev(dev_name(dev)); 1394 put_device(dev); 1395 break; 1396 } 1397 if (!list_empty(&dev->power.entry)) 1398 list_move(&dev->power.entry, &dpm_noirq_list); 1399 put_device(dev); 1400 1401 if (async_error) 1402 break; 1403 } 1404 mutex_unlock(&dpm_list_mtx); 1405 async_synchronize_full(); 1406 if (!error) 1407 error = async_error; 1408 1409 if (error) { 1410 suspend_stats.failed_suspend_noirq++; 1411 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1412 } 1413 dpm_show_time(starttime, state, error, "noirq"); 1414 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1415 return error; 1416 } 1417 1418 /** 1419 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1420 * @state: PM transition of the system being carried out. 1421 * 1422 * Prevent device drivers' interrupt handlers from being called and invoke 1423 * "noirq" suspend callbacks for all non-sysdev devices. 1424 */ 1425 int dpm_suspend_noirq(pm_message_t state) 1426 { 1427 int ret; 1428 1429 dpm_noirq_begin(); 1430 ret = dpm_noirq_suspend_devices(state); 1431 if (ret) 1432 dpm_resume_noirq(resume_event(state)); 1433 1434 return ret; 1435 } 1436 1437 static void dpm_propagate_wakeup_to_parent(struct device *dev) 1438 { 1439 struct device *parent = dev->parent; 1440 1441 if (!parent) 1442 return; 1443 1444 spin_lock_irq(&parent->power.lock); 1445 1446 if (dev->power.wakeup_path && !parent->power.ignore_children) 1447 parent->power.wakeup_path = true; 1448 1449 spin_unlock_irq(&parent->power.lock); 1450 } 1451 1452 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev, 1453 pm_message_t state, 1454 const char **info_p) 1455 { 1456 pm_callback_t callback; 1457 const char *info; 1458 1459 if (dev->pm_domain) { 1460 info = "late power domain "; 1461 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1462 } else if (dev->type && dev->type->pm) { 1463 info = "late type "; 1464 callback = pm_late_early_op(dev->type->pm, state); 1465 } else if (dev->class && dev->class->pm) { 1466 info = "late class "; 1467 callback = pm_late_early_op(dev->class->pm, state); 1468 } else if (dev->bus && dev->bus->pm) { 1469 info = "late bus "; 1470 callback = pm_late_early_op(dev->bus->pm, state); 1471 } else { 1472 return NULL; 1473 } 1474 1475 if (info_p) 1476 *info_p = info; 1477 1478 return callback; 1479 } 1480 1481 /** 1482 * __device_suspend_late - Execute a "late suspend" callback for given device. 1483 * @dev: Device to handle. 1484 * @state: PM transition of the system being carried out. 1485 * @async: If true, the device is being suspended asynchronously. 1486 * 1487 * Runtime PM is disabled for @dev while this function is being executed. 1488 */ 1489 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) 1490 { 1491 pm_callback_t callback; 1492 const char *info; 1493 int error = 0; 1494 1495 TRACE_DEVICE(dev); 1496 TRACE_SUSPEND(0); 1497 1498 __pm_runtime_disable(dev, false); 1499 1500 dpm_wait_for_subordinate(dev, async); 1501 1502 if (async_error) 1503 goto Complete; 1504 1505 if (pm_wakeup_pending()) { 1506 async_error = -EBUSY; 1507 goto Complete; 1508 } 1509 1510 if (dev->power.syscore || dev->power.direct_complete) 1511 goto Complete; 1512 1513 callback = dpm_subsys_suspend_late_cb(dev, state, &info); 1514 if (callback) 1515 goto Run; 1516 1517 if (dev_pm_smart_suspend_and_suspended(dev) && 1518 !dpm_subsys_suspend_noirq_cb(dev, state, NULL)) 1519 goto Skip; 1520 1521 if (dev->driver && dev->driver->pm) { 1522 info = "late driver "; 1523 callback = pm_late_early_op(dev->driver->pm, state); 1524 } 1525 1526 Run: 1527 error = dpm_run_callback(callback, dev, state, info); 1528 if (error) { 1529 async_error = error; 1530 goto Complete; 1531 } 1532 dpm_propagate_wakeup_to_parent(dev); 1533 1534 Skip: 1535 dev->power.is_late_suspended = true; 1536 1537 Complete: 1538 TRACE_SUSPEND(error); 1539 complete_all(&dev->power.completion); 1540 return error; 1541 } 1542 1543 static void async_suspend_late(void *data, async_cookie_t cookie) 1544 { 1545 struct device *dev = (struct device *)data; 1546 int error; 1547 1548 error = __device_suspend_late(dev, pm_transition, true); 1549 if (error) { 1550 dpm_save_failed_dev(dev_name(dev)); 1551 pm_dev_err(dev, pm_transition, " async", error); 1552 } 1553 put_device(dev); 1554 } 1555 1556 static int device_suspend_late(struct device *dev) 1557 { 1558 if (dpm_async_fn(dev, async_suspend_late)) 1559 return 0; 1560 1561 return __device_suspend_late(dev, pm_transition, false); 1562 } 1563 1564 /** 1565 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1566 * @state: PM transition of the system being carried out. 1567 */ 1568 int dpm_suspend_late(pm_message_t state) 1569 { 1570 ktime_t starttime = ktime_get(); 1571 int error = 0; 1572 1573 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); 1574 mutex_lock(&dpm_list_mtx); 1575 pm_transition = state; 1576 async_error = 0; 1577 1578 while (!list_empty(&dpm_suspended_list)) { 1579 struct device *dev = to_device(dpm_suspended_list.prev); 1580 1581 get_device(dev); 1582 mutex_unlock(&dpm_list_mtx); 1583 1584 error = device_suspend_late(dev); 1585 1586 mutex_lock(&dpm_list_mtx); 1587 if (!list_empty(&dev->power.entry)) 1588 list_move(&dev->power.entry, &dpm_late_early_list); 1589 1590 if (error) { 1591 pm_dev_err(dev, state, " late", error); 1592 dpm_save_failed_dev(dev_name(dev)); 1593 put_device(dev); 1594 break; 1595 } 1596 put_device(dev); 1597 1598 if (async_error) 1599 break; 1600 } 1601 mutex_unlock(&dpm_list_mtx); 1602 async_synchronize_full(); 1603 if (!error) 1604 error = async_error; 1605 if (error) { 1606 suspend_stats.failed_suspend_late++; 1607 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1608 dpm_resume_early(resume_event(state)); 1609 } 1610 dpm_show_time(starttime, state, error, "late"); 1611 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1612 return error; 1613 } 1614 1615 /** 1616 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1617 * @state: PM transition of the system being carried out. 1618 */ 1619 int dpm_suspend_end(pm_message_t state) 1620 { 1621 ktime_t starttime = ktime_get(); 1622 int error; 1623 1624 error = dpm_suspend_late(state); 1625 if (error) 1626 goto out; 1627 1628 error = dpm_suspend_noirq(state); 1629 if (error) 1630 dpm_resume_early(resume_event(state)); 1631 1632 out: 1633 dpm_show_time(starttime, state, error, "end"); 1634 return error; 1635 } 1636 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1637 1638 /** 1639 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1640 * @dev: Device to suspend. 1641 * @state: PM transition of the system being carried out. 1642 * @cb: Suspend callback to execute. 1643 * @info: string description of caller. 1644 */ 1645 static int legacy_suspend(struct device *dev, pm_message_t state, 1646 int (*cb)(struct device *dev, pm_message_t state), 1647 const char *info) 1648 { 1649 int error; 1650 ktime_t calltime; 1651 1652 calltime = initcall_debug_start(dev, cb); 1653 1654 trace_device_pm_callback_start(dev, info, state.event); 1655 error = cb(dev, state); 1656 trace_device_pm_callback_end(dev, error); 1657 suspend_report_result(cb, error); 1658 1659 initcall_debug_report(dev, calltime, cb, error); 1660 1661 return error; 1662 } 1663 1664 static void dpm_clear_superiors_direct_complete(struct device *dev) 1665 { 1666 struct device_link *link; 1667 int idx; 1668 1669 if (dev->parent) { 1670 spin_lock_irq(&dev->parent->power.lock); 1671 dev->parent->power.direct_complete = false; 1672 spin_unlock_irq(&dev->parent->power.lock); 1673 } 1674 1675 idx = device_links_read_lock(); 1676 1677 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 1678 spin_lock_irq(&link->supplier->power.lock); 1679 link->supplier->power.direct_complete = false; 1680 spin_unlock_irq(&link->supplier->power.lock); 1681 } 1682 1683 device_links_read_unlock(idx); 1684 } 1685 1686 /** 1687 * __device_suspend - Execute "suspend" callbacks for given device. 1688 * @dev: Device to handle. 1689 * @state: PM transition of the system being carried out. 1690 * @async: If true, the device is being suspended asynchronously. 1691 */ 1692 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1693 { 1694 pm_callback_t callback = NULL; 1695 const char *info = NULL; 1696 int error = 0; 1697 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1698 1699 TRACE_DEVICE(dev); 1700 TRACE_SUSPEND(0); 1701 1702 dpm_wait_for_subordinate(dev, async); 1703 1704 if (async_error) { 1705 dev->power.direct_complete = false; 1706 goto Complete; 1707 } 1708 1709 /* 1710 * If a device configured to wake up the system from sleep states 1711 * has been suspended at run time and there's a resume request pending 1712 * for it, this is equivalent to the device signaling wakeup, so the 1713 * system suspend operation should be aborted. 1714 */ 1715 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1716 pm_wakeup_event(dev, 0); 1717 1718 if (pm_wakeup_pending()) { 1719 dev->power.direct_complete = false; 1720 async_error = -EBUSY; 1721 goto Complete; 1722 } 1723 1724 if (dev->power.syscore) 1725 goto Complete; 1726 1727 /* Avoid direct_complete to let wakeup_path propagate. */ 1728 if (device_may_wakeup(dev) || dev->power.wakeup_path) 1729 dev->power.direct_complete = false; 1730 1731 if (dev->power.direct_complete) { 1732 if (pm_runtime_status_suspended(dev)) { 1733 pm_runtime_disable(dev); 1734 if (pm_runtime_status_suspended(dev)) { 1735 pm_dev_dbg(dev, state, "direct-complete "); 1736 goto Complete; 1737 } 1738 1739 pm_runtime_enable(dev); 1740 } 1741 dev->power.direct_complete = false; 1742 } 1743 1744 dev->power.may_skip_resume = false; 1745 dev->power.must_resume = false; 1746 1747 dpm_watchdog_set(&wd, dev); 1748 device_lock(dev); 1749 1750 if (dev->pm_domain) { 1751 info = "power domain "; 1752 callback = pm_op(&dev->pm_domain->ops, state); 1753 goto Run; 1754 } 1755 1756 if (dev->type && dev->type->pm) { 1757 info = "type "; 1758 callback = pm_op(dev->type->pm, state); 1759 goto Run; 1760 } 1761 1762 if (dev->class && dev->class->pm) { 1763 info = "class "; 1764 callback = pm_op(dev->class->pm, state); 1765 goto Run; 1766 } 1767 1768 if (dev->bus) { 1769 if (dev->bus->pm) { 1770 info = "bus "; 1771 callback = pm_op(dev->bus->pm, state); 1772 } else if (dev->bus->suspend) { 1773 pm_dev_dbg(dev, state, "legacy bus "); 1774 error = legacy_suspend(dev, state, dev->bus->suspend, 1775 "legacy bus "); 1776 goto End; 1777 } 1778 } 1779 1780 Run: 1781 if (!callback && dev->driver && dev->driver->pm) { 1782 info = "driver "; 1783 callback = pm_op(dev->driver->pm, state); 1784 } 1785 1786 error = dpm_run_callback(callback, dev, state, info); 1787 1788 End: 1789 if (!error) { 1790 dev->power.is_suspended = true; 1791 if (device_may_wakeup(dev)) 1792 dev->power.wakeup_path = true; 1793 1794 dpm_propagate_wakeup_to_parent(dev); 1795 dpm_clear_superiors_direct_complete(dev); 1796 } 1797 1798 device_unlock(dev); 1799 dpm_watchdog_clear(&wd); 1800 1801 Complete: 1802 if (error) 1803 async_error = error; 1804 1805 complete_all(&dev->power.completion); 1806 TRACE_SUSPEND(error); 1807 return error; 1808 } 1809 1810 static void async_suspend(void *data, async_cookie_t cookie) 1811 { 1812 struct device *dev = (struct device *)data; 1813 int error; 1814 1815 error = __device_suspend(dev, pm_transition, true); 1816 if (error) { 1817 dpm_save_failed_dev(dev_name(dev)); 1818 pm_dev_err(dev, pm_transition, " async", error); 1819 } 1820 1821 put_device(dev); 1822 } 1823 1824 static int device_suspend(struct device *dev) 1825 { 1826 if (dpm_async_fn(dev, async_suspend)) 1827 return 0; 1828 1829 return __device_suspend(dev, pm_transition, false); 1830 } 1831 1832 /** 1833 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1834 * @state: PM transition of the system being carried out. 1835 */ 1836 int dpm_suspend(pm_message_t state) 1837 { 1838 ktime_t starttime = ktime_get(); 1839 int error = 0; 1840 1841 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1842 might_sleep(); 1843 1844 devfreq_suspend(); 1845 cpufreq_suspend(); 1846 1847 mutex_lock(&dpm_list_mtx); 1848 pm_transition = state; 1849 async_error = 0; 1850 while (!list_empty(&dpm_prepared_list)) { 1851 struct device *dev = to_device(dpm_prepared_list.prev); 1852 1853 get_device(dev); 1854 mutex_unlock(&dpm_list_mtx); 1855 1856 error = device_suspend(dev); 1857 1858 mutex_lock(&dpm_list_mtx); 1859 if (error) { 1860 pm_dev_err(dev, state, "", error); 1861 dpm_save_failed_dev(dev_name(dev)); 1862 put_device(dev); 1863 break; 1864 } 1865 if (!list_empty(&dev->power.entry)) 1866 list_move(&dev->power.entry, &dpm_suspended_list); 1867 put_device(dev); 1868 if (async_error) 1869 break; 1870 } 1871 mutex_unlock(&dpm_list_mtx); 1872 async_synchronize_full(); 1873 if (!error) 1874 error = async_error; 1875 if (error) { 1876 suspend_stats.failed_suspend++; 1877 dpm_save_failed_step(SUSPEND_SUSPEND); 1878 } 1879 dpm_show_time(starttime, state, error, NULL); 1880 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1881 return error; 1882 } 1883 1884 /** 1885 * device_prepare - Prepare a device for system power transition. 1886 * @dev: Device to handle. 1887 * @state: PM transition of the system being carried out. 1888 * 1889 * Execute the ->prepare() callback(s) for given device. No new children of the 1890 * device may be registered after this function has returned. 1891 */ 1892 static int device_prepare(struct device *dev, pm_message_t state) 1893 { 1894 int (*callback)(struct device *) = NULL; 1895 int ret = 0; 1896 1897 if (dev->power.syscore) 1898 return 0; 1899 1900 WARN_ON(!pm_runtime_enabled(dev) && 1901 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | 1902 DPM_FLAG_LEAVE_SUSPENDED)); 1903 1904 /* 1905 * If a device's parent goes into runtime suspend at the wrong time, 1906 * it won't be possible to resume the device. To prevent this we 1907 * block runtime suspend here, during the prepare phase, and allow 1908 * it again during the complete phase. 1909 */ 1910 pm_runtime_get_noresume(dev); 1911 1912 device_lock(dev); 1913 1914 dev->power.wakeup_path = false; 1915 1916 if (dev->power.no_pm_callbacks) 1917 goto unlock; 1918 1919 if (dev->pm_domain) 1920 callback = dev->pm_domain->ops.prepare; 1921 else if (dev->type && dev->type->pm) 1922 callback = dev->type->pm->prepare; 1923 else if (dev->class && dev->class->pm) 1924 callback = dev->class->pm->prepare; 1925 else if (dev->bus && dev->bus->pm) 1926 callback = dev->bus->pm->prepare; 1927 1928 if (!callback && dev->driver && dev->driver->pm) 1929 callback = dev->driver->pm->prepare; 1930 1931 if (callback) 1932 ret = callback(dev); 1933 1934 unlock: 1935 device_unlock(dev); 1936 1937 if (ret < 0) { 1938 suspend_report_result(callback, ret); 1939 pm_runtime_put(dev); 1940 return ret; 1941 } 1942 /* 1943 * A positive return value from ->prepare() means "this device appears 1944 * to be runtime-suspended and its state is fine, so if it really is 1945 * runtime-suspended, you can leave it in that state provided that you 1946 * will do the same thing with all of its descendants". This only 1947 * applies to suspend transitions, however. 1948 */ 1949 spin_lock_irq(&dev->power.lock); 1950 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && 1951 ((pm_runtime_suspended(dev) && ret > 0) || 1952 dev->power.no_pm_callbacks) && 1953 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); 1954 spin_unlock_irq(&dev->power.lock); 1955 return 0; 1956 } 1957 1958 /** 1959 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1960 * @state: PM transition of the system being carried out. 1961 * 1962 * Execute the ->prepare() callback(s) for all devices. 1963 */ 1964 int dpm_prepare(pm_message_t state) 1965 { 1966 int error = 0; 1967 1968 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 1969 might_sleep(); 1970 1971 /* 1972 * Give a chance for the known devices to complete their probes, before 1973 * disable probing of devices. This sync point is important at least 1974 * at boot time + hibernation restore. 1975 */ 1976 wait_for_device_probe(); 1977 /* 1978 * It is unsafe if probing of devices will happen during suspend or 1979 * hibernation and system behavior will be unpredictable in this case. 1980 * So, let's prohibit device's probing here and defer their probes 1981 * instead. The normal behavior will be restored in dpm_complete(). 1982 */ 1983 device_block_probing(); 1984 1985 mutex_lock(&dpm_list_mtx); 1986 while (!list_empty(&dpm_list)) { 1987 struct device *dev = to_device(dpm_list.next); 1988 1989 get_device(dev); 1990 mutex_unlock(&dpm_list_mtx); 1991 1992 trace_device_pm_callback_start(dev, "", state.event); 1993 error = device_prepare(dev, state); 1994 trace_device_pm_callback_end(dev, error); 1995 1996 mutex_lock(&dpm_list_mtx); 1997 if (error) { 1998 if (error == -EAGAIN) { 1999 put_device(dev); 2000 error = 0; 2001 continue; 2002 } 2003 pr_info("Device %s not prepared for power transition: code %d\n", 2004 dev_name(dev), error); 2005 put_device(dev); 2006 break; 2007 } 2008 dev->power.is_prepared = true; 2009 if (!list_empty(&dev->power.entry)) 2010 list_move_tail(&dev->power.entry, &dpm_prepared_list); 2011 put_device(dev); 2012 } 2013 mutex_unlock(&dpm_list_mtx); 2014 trace_suspend_resume(TPS("dpm_prepare"), state.event, false); 2015 return error; 2016 } 2017 2018 /** 2019 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 2020 * @state: PM transition of the system being carried out. 2021 * 2022 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 2023 * callbacks for them. 2024 */ 2025 int dpm_suspend_start(pm_message_t state) 2026 { 2027 ktime_t starttime = ktime_get(); 2028 int error; 2029 2030 error = dpm_prepare(state); 2031 if (error) { 2032 suspend_stats.failed_prepare++; 2033 dpm_save_failed_step(SUSPEND_PREPARE); 2034 } else 2035 error = dpm_suspend(state); 2036 dpm_show_time(starttime, state, error, "start"); 2037 return error; 2038 } 2039 EXPORT_SYMBOL_GPL(dpm_suspend_start); 2040 2041 void __suspend_report_result(const char *function, void *fn, int ret) 2042 { 2043 if (ret) 2044 pr_err("%s(): %pS returns %d\n", function, fn, ret); 2045 } 2046 EXPORT_SYMBOL_GPL(__suspend_report_result); 2047 2048 /** 2049 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 2050 * @subordinate: Device that needs to wait for @dev. 2051 * @dev: Device to wait for. 2052 */ 2053 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 2054 { 2055 dpm_wait(dev, subordinate->power.async_suspend); 2056 return async_error; 2057 } 2058 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 2059 2060 /** 2061 * dpm_for_each_dev - device iterator. 2062 * @data: data for the callback. 2063 * @fn: function to be called for each device. 2064 * 2065 * Iterate over devices in dpm_list, and call @fn for each device, 2066 * passing it @data. 2067 */ 2068 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 2069 { 2070 struct device *dev; 2071 2072 if (!fn) 2073 return; 2074 2075 device_pm_lock(); 2076 list_for_each_entry(dev, &dpm_list, power.entry) 2077 fn(dev, data); 2078 device_pm_unlock(); 2079 } 2080 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 2081 2082 static bool pm_ops_is_empty(const struct dev_pm_ops *ops) 2083 { 2084 if (!ops) 2085 return true; 2086 2087 return !ops->prepare && 2088 !ops->suspend && 2089 !ops->suspend_late && 2090 !ops->suspend_noirq && 2091 !ops->resume_noirq && 2092 !ops->resume_early && 2093 !ops->resume && 2094 !ops->complete; 2095 } 2096 2097 void device_pm_check_callbacks(struct device *dev) 2098 { 2099 spin_lock_irq(&dev->power.lock); 2100 dev->power.no_pm_callbacks = 2101 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && 2102 !dev->bus->suspend && !dev->bus->resume)) && 2103 (!dev->class || pm_ops_is_empty(dev->class->pm)) && 2104 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 2105 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 2106 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && 2107 !dev->driver->suspend && !dev->driver->resume)); 2108 spin_unlock_irq(&dev->power.lock); 2109 } 2110 2111 bool dev_pm_smart_suspend_and_suspended(struct device *dev) 2112 { 2113 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && 2114 pm_runtime_status_suspended(dev); 2115 } 2116