1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm-trace.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/interrupt.h> 29 #include <linux/sched.h> 30 #include <linux/sched/debug.h> 31 #include <linux/async.h> 32 #include <linux/suspend.h> 33 #include <trace/events/power.h> 34 #include <linux/cpufreq.h> 35 #include <linux/cpuidle.h> 36 #include <linux/timer.h> 37 38 #include "../base.h" 39 #include "power.h" 40 41 typedef int (*pm_callback_t)(struct device *); 42 43 /* 44 * The entries in the dpm_list list are in a depth first order, simply 45 * because children are guaranteed to be discovered after parents, and 46 * are inserted at the back of the list on discovery. 47 * 48 * Since device_pm_add() may be called with a device lock held, 49 * we must never try to acquire a device lock while holding 50 * dpm_list_mutex. 51 */ 52 53 LIST_HEAD(dpm_list); 54 static LIST_HEAD(dpm_prepared_list); 55 static LIST_HEAD(dpm_suspended_list); 56 static LIST_HEAD(dpm_late_early_list); 57 static LIST_HEAD(dpm_noirq_list); 58 59 struct suspend_stats suspend_stats; 60 static DEFINE_MUTEX(dpm_list_mtx); 61 static pm_message_t pm_transition; 62 63 static int async_error; 64 65 static char *pm_verb(int event) 66 { 67 switch (event) { 68 case PM_EVENT_SUSPEND: 69 return "suspend"; 70 case PM_EVENT_RESUME: 71 return "resume"; 72 case PM_EVENT_FREEZE: 73 return "freeze"; 74 case PM_EVENT_QUIESCE: 75 return "quiesce"; 76 case PM_EVENT_HIBERNATE: 77 return "hibernate"; 78 case PM_EVENT_THAW: 79 return "thaw"; 80 case PM_EVENT_RESTORE: 81 return "restore"; 82 case PM_EVENT_RECOVER: 83 return "recover"; 84 default: 85 return "(unknown PM event)"; 86 } 87 } 88 89 /** 90 * device_pm_sleep_init - Initialize system suspend-related device fields. 91 * @dev: Device object being initialized. 92 */ 93 void device_pm_sleep_init(struct device *dev) 94 { 95 dev->power.is_prepared = false; 96 dev->power.is_suspended = false; 97 dev->power.is_noirq_suspended = false; 98 dev->power.is_late_suspended = false; 99 init_completion(&dev->power.completion); 100 complete_all(&dev->power.completion); 101 dev->power.wakeup = NULL; 102 INIT_LIST_HEAD(&dev->power.entry); 103 } 104 105 /** 106 * device_pm_lock - Lock the list of active devices used by the PM core. 107 */ 108 void device_pm_lock(void) 109 { 110 mutex_lock(&dpm_list_mtx); 111 } 112 113 /** 114 * device_pm_unlock - Unlock the list of active devices used by the PM core. 115 */ 116 void device_pm_unlock(void) 117 { 118 mutex_unlock(&dpm_list_mtx); 119 } 120 121 /** 122 * device_pm_add - Add a device to the PM core's list of active devices. 123 * @dev: Device to add to the list. 124 */ 125 void device_pm_add(struct device *dev) 126 { 127 pr_debug("PM: Adding info for %s:%s\n", 128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 129 device_pm_check_callbacks(dev); 130 mutex_lock(&dpm_list_mtx); 131 if (dev->parent && dev->parent->power.is_prepared) 132 dev_warn(dev, "parent %s should not be sleeping\n", 133 dev_name(dev->parent)); 134 list_add_tail(&dev->power.entry, &dpm_list); 135 dev->power.in_dpm_list = true; 136 mutex_unlock(&dpm_list_mtx); 137 } 138 139 /** 140 * device_pm_remove - Remove a device from the PM core's list of active devices. 141 * @dev: Device to be removed from the list. 142 */ 143 void device_pm_remove(struct device *dev) 144 { 145 pr_debug("PM: Removing info for %s:%s\n", 146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 147 complete_all(&dev->power.completion); 148 mutex_lock(&dpm_list_mtx); 149 list_del_init(&dev->power.entry); 150 dev->power.in_dpm_list = false; 151 mutex_unlock(&dpm_list_mtx); 152 device_wakeup_disable(dev); 153 pm_runtime_remove(dev); 154 device_pm_check_callbacks(dev); 155 } 156 157 /** 158 * device_pm_move_before - Move device in the PM core's list of active devices. 159 * @deva: Device to move in dpm_list. 160 * @devb: Device @deva should come before. 161 */ 162 void device_pm_move_before(struct device *deva, struct device *devb) 163 { 164 pr_debug("PM: Moving %s:%s before %s:%s\n", 165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 167 /* Delete deva from dpm_list and reinsert before devb. */ 168 list_move_tail(&deva->power.entry, &devb->power.entry); 169 } 170 171 /** 172 * device_pm_move_after - Move device in the PM core's list of active devices. 173 * @deva: Device to move in dpm_list. 174 * @devb: Device @deva should come after. 175 */ 176 void device_pm_move_after(struct device *deva, struct device *devb) 177 { 178 pr_debug("PM: Moving %s:%s after %s:%s\n", 179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 181 /* Delete deva from dpm_list and reinsert after devb. */ 182 list_move(&deva->power.entry, &devb->power.entry); 183 } 184 185 /** 186 * device_pm_move_last - Move device to end of the PM core's list of devices. 187 * @dev: Device to move in dpm_list. 188 */ 189 void device_pm_move_last(struct device *dev) 190 { 191 pr_debug("PM: Moving %s:%s to end of list\n", 192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 193 list_move_tail(&dev->power.entry, &dpm_list); 194 } 195 196 static ktime_t initcall_debug_start(struct device *dev) 197 { 198 ktime_t calltime = 0; 199 200 if (pm_print_times_enabled) { 201 pr_info("calling %s+ @ %i, parent: %s\n", 202 dev_name(dev), task_pid_nr(current), 203 dev->parent ? dev_name(dev->parent) : "none"); 204 calltime = ktime_get(); 205 } 206 207 return calltime; 208 } 209 210 static void initcall_debug_report(struct device *dev, ktime_t calltime, 211 int error, pm_message_t state, char *info) 212 { 213 ktime_t rettime; 214 s64 nsecs; 215 216 rettime = ktime_get(); 217 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 218 219 if (pm_print_times_enabled) { 220 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 221 error, (unsigned long long)nsecs >> 10); 222 } 223 } 224 225 /** 226 * dpm_wait - Wait for a PM operation to complete. 227 * @dev: Device to wait for. 228 * @async: If unset, wait only if the device's power.async_suspend flag is set. 229 */ 230 static void dpm_wait(struct device *dev, bool async) 231 { 232 if (!dev) 233 return; 234 235 if (async || (pm_async_enabled && dev->power.async_suspend)) 236 wait_for_completion(&dev->power.completion); 237 } 238 239 static int dpm_wait_fn(struct device *dev, void *async_ptr) 240 { 241 dpm_wait(dev, *((bool *)async_ptr)); 242 return 0; 243 } 244 245 static void dpm_wait_for_children(struct device *dev, bool async) 246 { 247 device_for_each_child(dev, &async, dpm_wait_fn); 248 } 249 250 static void dpm_wait_for_suppliers(struct device *dev, bool async) 251 { 252 struct device_link *link; 253 int idx; 254 255 idx = device_links_read_lock(); 256 257 /* 258 * If the supplier goes away right after we've checked the link to it, 259 * we'll wait for its completion to change the state, but that's fine, 260 * because the only things that will block as a result are the SRCU 261 * callbacks freeing the link objects for the links in the list we're 262 * walking. 263 */ 264 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 265 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 266 dpm_wait(link->supplier, async); 267 268 device_links_read_unlock(idx); 269 } 270 271 static void dpm_wait_for_superior(struct device *dev, bool async) 272 { 273 dpm_wait(dev->parent, async); 274 dpm_wait_for_suppliers(dev, async); 275 } 276 277 static void dpm_wait_for_consumers(struct device *dev, bool async) 278 { 279 struct device_link *link; 280 int idx; 281 282 idx = device_links_read_lock(); 283 284 /* 285 * The status of a device link can only be changed from "dormant" by a 286 * probe, but that cannot happen during system suspend/resume. In 287 * theory it can change to "dormant" at that time, but then it is 288 * reasonable to wait for the target device anyway (eg. if it goes 289 * away, it's better to wait for it to go away completely and then 290 * continue instead of trying to continue in parallel with its 291 * unregistration). 292 */ 293 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) 294 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 295 dpm_wait(link->consumer, async); 296 297 device_links_read_unlock(idx); 298 } 299 300 static void dpm_wait_for_subordinate(struct device *dev, bool async) 301 { 302 dpm_wait_for_children(dev, async); 303 dpm_wait_for_consumers(dev, async); 304 } 305 306 /** 307 * pm_op - Return the PM operation appropriate for given PM event. 308 * @ops: PM operations to choose from. 309 * @state: PM transition of the system being carried out. 310 */ 311 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 312 { 313 switch (state.event) { 314 #ifdef CONFIG_SUSPEND 315 case PM_EVENT_SUSPEND: 316 return ops->suspend; 317 case PM_EVENT_RESUME: 318 return ops->resume; 319 #endif /* CONFIG_SUSPEND */ 320 #ifdef CONFIG_HIBERNATE_CALLBACKS 321 case PM_EVENT_FREEZE: 322 case PM_EVENT_QUIESCE: 323 return ops->freeze; 324 case PM_EVENT_HIBERNATE: 325 return ops->poweroff; 326 case PM_EVENT_THAW: 327 case PM_EVENT_RECOVER: 328 return ops->thaw; 329 break; 330 case PM_EVENT_RESTORE: 331 return ops->restore; 332 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 333 } 334 335 return NULL; 336 } 337 338 /** 339 * pm_late_early_op - Return the PM operation appropriate for given PM event. 340 * @ops: PM operations to choose from. 341 * @state: PM transition of the system being carried out. 342 * 343 * Runtime PM is disabled for @dev while this function is being executed. 344 */ 345 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 346 pm_message_t state) 347 { 348 switch (state.event) { 349 #ifdef CONFIG_SUSPEND 350 case PM_EVENT_SUSPEND: 351 return ops->suspend_late; 352 case PM_EVENT_RESUME: 353 return ops->resume_early; 354 #endif /* CONFIG_SUSPEND */ 355 #ifdef CONFIG_HIBERNATE_CALLBACKS 356 case PM_EVENT_FREEZE: 357 case PM_EVENT_QUIESCE: 358 return ops->freeze_late; 359 case PM_EVENT_HIBERNATE: 360 return ops->poweroff_late; 361 case PM_EVENT_THAW: 362 case PM_EVENT_RECOVER: 363 return ops->thaw_early; 364 case PM_EVENT_RESTORE: 365 return ops->restore_early; 366 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 367 } 368 369 return NULL; 370 } 371 372 /** 373 * pm_noirq_op - Return the PM operation appropriate for given PM event. 374 * @ops: PM operations to choose from. 375 * @state: PM transition of the system being carried out. 376 * 377 * The driver of @dev will not receive interrupts while this function is being 378 * executed. 379 */ 380 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 381 { 382 switch (state.event) { 383 #ifdef CONFIG_SUSPEND 384 case PM_EVENT_SUSPEND: 385 return ops->suspend_noirq; 386 case PM_EVENT_RESUME: 387 return ops->resume_noirq; 388 #endif /* CONFIG_SUSPEND */ 389 #ifdef CONFIG_HIBERNATE_CALLBACKS 390 case PM_EVENT_FREEZE: 391 case PM_EVENT_QUIESCE: 392 return ops->freeze_noirq; 393 case PM_EVENT_HIBERNATE: 394 return ops->poweroff_noirq; 395 case PM_EVENT_THAW: 396 case PM_EVENT_RECOVER: 397 return ops->thaw_noirq; 398 case PM_EVENT_RESTORE: 399 return ops->restore_noirq; 400 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 401 } 402 403 return NULL; 404 } 405 406 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 407 { 408 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 409 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 410 ", may wakeup" : ""); 411 } 412 413 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 414 int error) 415 { 416 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 417 dev_name(dev), pm_verb(state.event), info, error); 418 } 419 420 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 421 { 422 ktime_t calltime; 423 u64 usecs64; 424 int usecs; 425 426 calltime = ktime_get(); 427 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 428 do_div(usecs64, NSEC_PER_USEC); 429 usecs = usecs64; 430 if (usecs == 0) 431 usecs = 1; 432 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 433 info ?: "", info ? " " : "", pm_verb(state.event), 434 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 435 } 436 437 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 438 pm_message_t state, char *info) 439 { 440 ktime_t calltime; 441 int error; 442 443 if (!cb) 444 return 0; 445 446 calltime = initcall_debug_start(dev); 447 448 pm_dev_dbg(dev, state, info); 449 trace_device_pm_callback_start(dev, info, state.event); 450 error = cb(dev); 451 trace_device_pm_callback_end(dev, error); 452 suspend_report_result(cb, error); 453 454 initcall_debug_report(dev, calltime, error, state, info); 455 456 return error; 457 } 458 459 #ifdef CONFIG_DPM_WATCHDOG 460 struct dpm_watchdog { 461 struct device *dev; 462 struct task_struct *tsk; 463 struct timer_list timer; 464 }; 465 466 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 467 struct dpm_watchdog wd 468 469 /** 470 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 471 * @data: Watchdog object address. 472 * 473 * Called when a driver has timed out suspending or resuming. 474 * There's not much we can do here to recover so panic() to 475 * capture a crash-dump in pstore. 476 */ 477 static void dpm_watchdog_handler(unsigned long data) 478 { 479 struct dpm_watchdog *wd = (void *)data; 480 481 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 482 show_stack(wd->tsk, NULL); 483 panic("%s %s: unrecoverable failure\n", 484 dev_driver_string(wd->dev), dev_name(wd->dev)); 485 } 486 487 /** 488 * dpm_watchdog_set - Enable pm watchdog for given device. 489 * @wd: Watchdog. Must be allocated on the stack. 490 * @dev: Device to handle. 491 */ 492 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 493 { 494 struct timer_list *timer = &wd->timer; 495 496 wd->dev = dev; 497 wd->tsk = current; 498 499 init_timer_on_stack(timer); 500 /* use same timeout value for both suspend and resume */ 501 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 502 timer->function = dpm_watchdog_handler; 503 timer->data = (unsigned long)wd; 504 add_timer(timer); 505 } 506 507 /** 508 * dpm_watchdog_clear - Disable suspend/resume watchdog. 509 * @wd: Watchdog to disable. 510 */ 511 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 512 { 513 struct timer_list *timer = &wd->timer; 514 515 del_timer_sync(timer); 516 destroy_timer_on_stack(timer); 517 } 518 #else 519 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 520 #define dpm_watchdog_set(x, y) 521 #define dpm_watchdog_clear(x) 522 #endif 523 524 /*------------------------- Resume routines -------------------------*/ 525 526 /** 527 * device_resume_noirq - Execute an "early resume" callback for given device. 528 * @dev: Device to handle. 529 * @state: PM transition of the system being carried out. 530 * @async: If true, the device is being resumed asynchronously. 531 * 532 * The driver of @dev will not receive interrupts while this function is being 533 * executed. 534 */ 535 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) 536 { 537 pm_callback_t callback = NULL; 538 char *info = NULL; 539 int error = 0; 540 541 TRACE_DEVICE(dev); 542 TRACE_RESUME(0); 543 544 if (dev->power.syscore || dev->power.direct_complete) 545 goto Out; 546 547 if (!dev->power.is_noirq_suspended) 548 goto Out; 549 550 dpm_wait_for_superior(dev, async); 551 552 if (dev->pm_domain) { 553 info = "noirq power domain "; 554 callback = pm_noirq_op(&dev->pm_domain->ops, state); 555 } else if (dev->type && dev->type->pm) { 556 info = "noirq type "; 557 callback = pm_noirq_op(dev->type->pm, state); 558 } else if (dev->class && dev->class->pm) { 559 info = "noirq class "; 560 callback = pm_noirq_op(dev->class->pm, state); 561 } else if (dev->bus && dev->bus->pm) { 562 info = "noirq bus "; 563 callback = pm_noirq_op(dev->bus->pm, state); 564 } 565 566 if (!callback && dev->driver && dev->driver->pm) { 567 info = "noirq driver "; 568 callback = pm_noirq_op(dev->driver->pm, state); 569 } 570 571 error = dpm_run_callback(callback, dev, state, info); 572 dev->power.is_noirq_suspended = false; 573 574 Out: 575 complete_all(&dev->power.completion); 576 TRACE_RESUME(error); 577 return error; 578 } 579 580 static bool is_async(struct device *dev) 581 { 582 return dev->power.async_suspend && pm_async_enabled 583 && !pm_trace_is_enabled(); 584 } 585 586 static void async_resume_noirq(void *data, async_cookie_t cookie) 587 { 588 struct device *dev = (struct device *)data; 589 int error; 590 591 error = device_resume_noirq(dev, pm_transition, true); 592 if (error) 593 pm_dev_err(dev, pm_transition, " async", error); 594 595 put_device(dev); 596 } 597 598 /** 599 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 600 * @state: PM transition of the system being carried out. 601 * 602 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 603 * enable device drivers to receive interrupts. 604 */ 605 void dpm_resume_noirq(pm_message_t state) 606 { 607 struct device *dev; 608 ktime_t starttime = ktime_get(); 609 610 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); 611 mutex_lock(&dpm_list_mtx); 612 pm_transition = state; 613 614 /* 615 * Advanced the async threads upfront, 616 * in case the starting of async threads is 617 * delayed by non-async resuming devices. 618 */ 619 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 620 reinit_completion(&dev->power.completion); 621 if (is_async(dev)) { 622 get_device(dev); 623 async_schedule(async_resume_noirq, dev); 624 } 625 } 626 627 while (!list_empty(&dpm_noirq_list)) { 628 dev = to_device(dpm_noirq_list.next); 629 get_device(dev); 630 list_move_tail(&dev->power.entry, &dpm_late_early_list); 631 mutex_unlock(&dpm_list_mtx); 632 633 if (!is_async(dev)) { 634 int error; 635 636 error = device_resume_noirq(dev, state, false); 637 if (error) { 638 suspend_stats.failed_resume_noirq++; 639 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 640 dpm_save_failed_dev(dev_name(dev)); 641 pm_dev_err(dev, state, " noirq", error); 642 } 643 } 644 645 mutex_lock(&dpm_list_mtx); 646 put_device(dev); 647 } 648 mutex_unlock(&dpm_list_mtx); 649 async_synchronize_full(); 650 dpm_show_time(starttime, state, "noirq"); 651 resume_device_irqs(); 652 device_wakeup_disarm_wake_irqs(); 653 cpuidle_resume(); 654 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 655 } 656 657 /** 658 * device_resume_early - Execute an "early resume" callback for given device. 659 * @dev: Device to handle. 660 * @state: PM transition of the system being carried out. 661 * @async: If true, the device is being resumed asynchronously. 662 * 663 * Runtime PM is disabled for @dev while this function is being executed. 664 */ 665 static int device_resume_early(struct device *dev, pm_message_t state, bool async) 666 { 667 pm_callback_t callback = NULL; 668 char *info = NULL; 669 int error = 0; 670 671 TRACE_DEVICE(dev); 672 TRACE_RESUME(0); 673 674 if (dev->power.syscore || dev->power.direct_complete) 675 goto Out; 676 677 if (!dev->power.is_late_suspended) 678 goto Out; 679 680 dpm_wait_for_superior(dev, async); 681 682 if (dev->pm_domain) { 683 info = "early power domain "; 684 callback = pm_late_early_op(&dev->pm_domain->ops, state); 685 } else if (dev->type && dev->type->pm) { 686 info = "early type "; 687 callback = pm_late_early_op(dev->type->pm, state); 688 } else if (dev->class && dev->class->pm) { 689 info = "early class "; 690 callback = pm_late_early_op(dev->class->pm, state); 691 } else if (dev->bus && dev->bus->pm) { 692 info = "early bus "; 693 callback = pm_late_early_op(dev->bus->pm, state); 694 } 695 696 if (!callback && dev->driver && dev->driver->pm) { 697 info = "early driver "; 698 callback = pm_late_early_op(dev->driver->pm, state); 699 } 700 701 error = dpm_run_callback(callback, dev, state, info); 702 dev->power.is_late_suspended = false; 703 704 Out: 705 TRACE_RESUME(error); 706 707 pm_runtime_enable(dev); 708 complete_all(&dev->power.completion); 709 return error; 710 } 711 712 static void async_resume_early(void *data, async_cookie_t cookie) 713 { 714 struct device *dev = (struct device *)data; 715 int error; 716 717 error = device_resume_early(dev, pm_transition, true); 718 if (error) 719 pm_dev_err(dev, pm_transition, " async", error); 720 721 put_device(dev); 722 } 723 724 /** 725 * dpm_resume_early - Execute "early resume" callbacks for all devices. 726 * @state: PM transition of the system being carried out. 727 */ 728 void dpm_resume_early(pm_message_t state) 729 { 730 struct device *dev; 731 ktime_t starttime = ktime_get(); 732 733 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); 734 mutex_lock(&dpm_list_mtx); 735 pm_transition = state; 736 737 /* 738 * Advanced the async threads upfront, 739 * in case the starting of async threads is 740 * delayed by non-async resuming devices. 741 */ 742 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 743 reinit_completion(&dev->power.completion); 744 if (is_async(dev)) { 745 get_device(dev); 746 async_schedule(async_resume_early, dev); 747 } 748 } 749 750 while (!list_empty(&dpm_late_early_list)) { 751 dev = to_device(dpm_late_early_list.next); 752 get_device(dev); 753 list_move_tail(&dev->power.entry, &dpm_suspended_list); 754 mutex_unlock(&dpm_list_mtx); 755 756 if (!is_async(dev)) { 757 int error; 758 759 error = device_resume_early(dev, state, false); 760 if (error) { 761 suspend_stats.failed_resume_early++; 762 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 763 dpm_save_failed_dev(dev_name(dev)); 764 pm_dev_err(dev, state, " early", error); 765 } 766 } 767 mutex_lock(&dpm_list_mtx); 768 put_device(dev); 769 } 770 mutex_unlock(&dpm_list_mtx); 771 async_synchronize_full(); 772 dpm_show_time(starttime, state, "early"); 773 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 774 } 775 776 /** 777 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 778 * @state: PM transition of the system being carried out. 779 */ 780 void dpm_resume_start(pm_message_t state) 781 { 782 dpm_resume_noirq(state); 783 dpm_resume_early(state); 784 } 785 EXPORT_SYMBOL_GPL(dpm_resume_start); 786 787 /** 788 * device_resume - Execute "resume" callbacks for given device. 789 * @dev: Device to handle. 790 * @state: PM transition of the system being carried out. 791 * @async: If true, the device is being resumed asynchronously. 792 */ 793 static int device_resume(struct device *dev, pm_message_t state, bool async) 794 { 795 pm_callback_t callback = NULL; 796 char *info = NULL; 797 int error = 0; 798 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 799 800 TRACE_DEVICE(dev); 801 TRACE_RESUME(0); 802 803 if (dev->power.syscore) 804 goto Complete; 805 806 if (dev->power.direct_complete) { 807 /* Match the pm_runtime_disable() in __device_suspend(). */ 808 pm_runtime_enable(dev); 809 goto Complete; 810 } 811 812 dpm_wait_for_superior(dev, async); 813 dpm_watchdog_set(&wd, dev); 814 device_lock(dev); 815 816 /* 817 * This is a fib. But we'll allow new children to be added below 818 * a resumed device, even if the device hasn't been completed yet. 819 */ 820 dev->power.is_prepared = false; 821 822 if (!dev->power.is_suspended) 823 goto Unlock; 824 825 if (dev->pm_domain) { 826 info = "power domain "; 827 callback = pm_op(&dev->pm_domain->ops, state); 828 goto Driver; 829 } 830 831 if (dev->type && dev->type->pm) { 832 info = "type "; 833 callback = pm_op(dev->type->pm, state); 834 goto Driver; 835 } 836 837 if (dev->class) { 838 if (dev->class->pm) { 839 info = "class "; 840 callback = pm_op(dev->class->pm, state); 841 goto Driver; 842 } else if (dev->class->resume) { 843 info = "legacy class "; 844 callback = dev->class->resume; 845 goto End; 846 } 847 } 848 849 if (dev->bus) { 850 if (dev->bus->pm) { 851 info = "bus "; 852 callback = pm_op(dev->bus->pm, state); 853 } else if (dev->bus->resume) { 854 info = "legacy bus "; 855 callback = dev->bus->resume; 856 goto End; 857 } 858 } 859 860 Driver: 861 if (!callback && dev->driver && dev->driver->pm) { 862 info = "driver "; 863 callback = pm_op(dev->driver->pm, state); 864 } 865 866 End: 867 error = dpm_run_callback(callback, dev, state, info); 868 dev->power.is_suspended = false; 869 870 Unlock: 871 device_unlock(dev); 872 dpm_watchdog_clear(&wd); 873 874 Complete: 875 complete_all(&dev->power.completion); 876 877 TRACE_RESUME(error); 878 879 return error; 880 } 881 882 static void async_resume(void *data, async_cookie_t cookie) 883 { 884 struct device *dev = (struct device *)data; 885 int error; 886 887 error = device_resume(dev, pm_transition, true); 888 if (error) 889 pm_dev_err(dev, pm_transition, " async", error); 890 put_device(dev); 891 } 892 893 /** 894 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 895 * @state: PM transition of the system being carried out. 896 * 897 * Execute the appropriate "resume" callback for all devices whose status 898 * indicates that they are suspended. 899 */ 900 void dpm_resume(pm_message_t state) 901 { 902 struct device *dev; 903 ktime_t starttime = ktime_get(); 904 905 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 906 might_sleep(); 907 908 mutex_lock(&dpm_list_mtx); 909 pm_transition = state; 910 async_error = 0; 911 912 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 913 reinit_completion(&dev->power.completion); 914 if (is_async(dev)) { 915 get_device(dev); 916 async_schedule(async_resume, dev); 917 } 918 } 919 920 while (!list_empty(&dpm_suspended_list)) { 921 dev = to_device(dpm_suspended_list.next); 922 get_device(dev); 923 if (!is_async(dev)) { 924 int error; 925 926 mutex_unlock(&dpm_list_mtx); 927 928 error = device_resume(dev, state, false); 929 if (error) { 930 suspend_stats.failed_resume++; 931 dpm_save_failed_step(SUSPEND_RESUME); 932 dpm_save_failed_dev(dev_name(dev)); 933 pm_dev_err(dev, state, "", error); 934 } 935 936 mutex_lock(&dpm_list_mtx); 937 } 938 if (!list_empty(&dev->power.entry)) 939 list_move_tail(&dev->power.entry, &dpm_prepared_list); 940 put_device(dev); 941 } 942 mutex_unlock(&dpm_list_mtx); 943 async_synchronize_full(); 944 dpm_show_time(starttime, state, NULL); 945 946 cpufreq_resume(); 947 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 948 } 949 950 /** 951 * device_complete - Complete a PM transition for given device. 952 * @dev: Device to handle. 953 * @state: PM transition of the system being carried out. 954 */ 955 static void device_complete(struct device *dev, pm_message_t state) 956 { 957 void (*callback)(struct device *) = NULL; 958 char *info = NULL; 959 960 if (dev->power.syscore) 961 return; 962 963 device_lock(dev); 964 965 if (dev->pm_domain) { 966 info = "completing power domain "; 967 callback = dev->pm_domain->ops.complete; 968 } else if (dev->type && dev->type->pm) { 969 info = "completing type "; 970 callback = dev->type->pm->complete; 971 } else if (dev->class && dev->class->pm) { 972 info = "completing class "; 973 callback = dev->class->pm->complete; 974 } else if (dev->bus && dev->bus->pm) { 975 info = "completing bus "; 976 callback = dev->bus->pm->complete; 977 } 978 979 if (!callback && dev->driver && dev->driver->pm) { 980 info = "completing driver "; 981 callback = dev->driver->pm->complete; 982 } 983 984 if (callback) { 985 pm_dev_dbg(dev, state, info); 986 callback(dev); 987 } 988 989 device_unlock(dev); 990 991 pm_runtime_put(dev); 992 } 993 994 /** 995 * dpm_complete - Complete a PM transition for all non-sysdev devices. 996 * @state: PM transition of the system being carried out. 997 * 998 * Execute the ->complete() callbacks for all devices whose PM status is not 999 * DPM_ON (this allows new devices to be registered). 1000 */ 1001 void dpm_complete(pm_message_t state) 1002 { 1003 struct list_head list; 1004 1005 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1006 might_sleep(); 1007 1008 INIT_LIST_HEAD(&list); 1009 mutex_lock(&dpm_list_mtx); 1010 while (!list_empty(&dpm_prepared_list)) { 1011 struct device *dev = to_device(dpm_prepared_list.prev); 1012 1013 get_device(dev); 1014 dev->power.is_prepared = false; 1015 list_move(&dev->power.entry, &list); 1016 mutex_unlock(&dpm_list_mtx); 1017 1018 trace_device_pm_callback_start(dev, "", state.event); 1019 device_complete(dev, state); 1020 trace_device_pm_callback_end(dev, 0); 1021 1022 mutex_lock(&dpm_list_mtx); 1023 put_device(dev); 1024 } 1025 list_splice(&list, &dpm_list); 1026 mutex_unlock(&dpm_list_mtx); 1027 1028 /* Allow device probing and trigger re-probing of deferred devices */ 1029 device_unblock_probing(); 1030 trace_suspend_resume(TPS("dpm_complete"), state.event, false); 1031 } 1032 1033 /** 1034 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 1035 * @state: PM transition of the system being carried out. 1036 * 1037 * Execute "resume" callbacks for all devices and complete the PM transition of 1038 * the system. 1039 */ 1040 void dpm_resume_end(pm_message_t state) 1041 { 1042 dpm_resume(state); 1043 dpm_complete(state); 1044 } 1045 EXPORT_SYMBOL_GPL(dpm_resume_end); 1046 1047 1048 /*------------------------- Suspend routines -------------------------*/ 1049 1050 /** 1051 * resume_event - Return a "resume" message for given "suspend" sleep state. 1052 * @sleep_state: PM message representing a sleep state. 1053 * 1054 * Return a PM message representing the resume event corresponding to given 1055 * sleep state. 1056 */ 1057 static pm_message_t resume_event(pm_message_t sleep_state) 1058 { 1059 switch (sleep_state.event) { 1060 case PM_EVENT_SUSPEND: 1061 return PMSG_RESUME; 1062 case PM_EVENT_FREEZE: 1063 case PM_EVENT_QUIESCE: 1064 return PMSG_RECOVER; 1065 case PM_EVENT_HIBERNATE: 1066 return PMSG_RESTORE; 1067 } 1068 return PMSG_ON; 1069 } 1070 1071 /** 1072 * device_suspend_noirq - Execute a "late suspend" callback for given device. 1073 * @dev: Device to handle. 1074 * @state: PM transition of the system being carried out. 1075 * @async: If true, the device is being suspended asynchronously. 1076 * 1077 * The driver of @dev will not receive interrupts while this function is being 1078 * executed. 1079 */ 1080 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1081 { 1082 pm_callback_t callback = NULL; 1083 char *info = NULL; 1084 int error = 0; 1085 1086 TRACE_DEVICE(dev); 1087 TRACE_SUSPEND(0); 1088 1089 dpm_wait_for_subordinate(dev, async); 1090 1091 if (async_error) 1092 goto Complete; 1093 1094 if (dev->power.syscore || dev->power.direct_complete) 1095 goto Complete; 1096 1097 if (dev->pm_domain) { 1098 info = "noirq power domain "; 1099 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1100 } else if (dev->type && dev->type->pm) { 1101 info = "noirq type "; 1102 callback = pm_noirq_op(dev->type->pm, state); 1103 } else if (dev->class && dev->class->pm) { 1104 info = "noirq class "; 1105 callback = pm_noirq_op(dev->class->pm, state); 1106 } else if (dev->bus && dev->bus->pm) { 1107 info = "noirq bus "; 1108 callback = pm_noirq_op(dev->bus->pm, state); 1109 } 1110 1111 if (!callback && dev->driver && dev->driver->pm) { 1112 info = "noirq driver "; 1113 callback = pm_noirq_op(dev->driver->pm, state); 1114 } 1115 1116 error = dpm_run_callback(callback, dev, state, info); 1117 if (!error) 1118 dev->power.is_noirq_suspended = true; 1119 else 1120 async_error = error; 1121 1122 Complete: 1123 complete_all(&dev->power.completion); 1124 TRACE_SUSPEND(error); 1125 return error; 1126 } 1127 1128 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1129 { 1130 struct device *dev = (struct device *)data; 1131 int error; 1132 1133 error = __device_suspend_noirq(dev, pm_transition, true); 1134 if (error) { 1135 dpm_save_failed_dev(dev_name(dev)); 1136 pm_dev_err(dev, pm_transition, " async", error); 1137 } 1138 1139 put_device(dev); 1140 } 1141 1142 static int device_suspend_noirq(struct device *dev) 1143 { 1144 reinit_completion(&dev->power.completion); 1145 1146 if (is_async(dev)) { 1147 get_device(dev); 1148 async_schedule(async_suspend_noirq, dev); 1149 return 0; 1150 } 1151 return __device_suspend_noirq(dev, pm_transition, false); 1152 } 1153 1154 /** 1155 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1156 * @state: PM transition of the system being carried out. 1157 * 1158 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1159 * handlers for all non-sysdev devices. 1160 */ 1161 int dpm_suspend_noirq(pm_message_t state) 1162 { 1163 ktime_t starttime = ktime_get(); 1164 int error = 0; 1165 1166 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1167 cpuidle_pause(); 1168 device_wakeup_arm_wake_irqs(); 1169 suspend_device_irqs(); 1170 mutex_lock(&dpm_list_mtx); 1171 pm_transition = state; 1172 async_error = 0; 1173 1174 while (!list_empty(&dpm_late_early_list)) { 1175 struct device *dev = to_device(dpm_late_early_list.prev); 1176 1177 get_device(dev); 1178 mutex_unlock(&dpm_list_mtx); 1179 1180 error = device_suspend_noirq(dev); 1181 1182 mutex_lock(&dpm_list_mtx); 1183 if (error) { 1184 pm_dev_err(dev, state, " noirq", error); 1185 dpm_save_failed_dev(dev_name(dev)); 1186 put_device(dev); 1187 break; 1188 } 1189 if (!list_empty(&dev->power.entry)) 1190 list_move(&dev->power.entry, &dpm_noirq_list); 1191 put_device(dev); 1192 1193 if (async_error) 1194 break; 1195 } 1196 mutex_unlock(&dpm_list_mtx); 1197 async_synchronize_full(); 1198 if (!error) 1199 error = async_error; 1200 1201 if (error) { 1202 suspend_stats.failed_suspend_noirq++; 1203 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1204 dpm_resume_noirq(resume_event(state)); 1205 } else { 1206 dpm_show_time(starttime, state, "noirq"); 1207 } 1208 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1209 return error; 1210 } 1211 1212 /** 1213 * device_suspend_late - Execute a "late suspend" callback for given device. 1214 * @dev: Device to handle. 1215 * @state: PM transition of the system being carried out. 1216 * @async: If true, the device is being suspended asynchronously. 1217 * 1218 * Runtime PM is disabled for @dev while this function is being executed. 1219 */ 1220 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) 1221 { 1222 pm_callback_t callback = NULL; 1223 char *info = NULL; 1224 int error = 0; 1225 1226 TRACE_DEVICE(dev); 1227 TRACE_SUSPEND(0); 1228 1229 __pm_runtime_disable(dev, false); 1230 1231 dpm_wait_for_subordinate(dev, async); 1232 1233 if (async_error) 1234 goto Complete; 1235 1236 if (pm_wakeup_pending()) { 1237 async_error = -EBUSY; 1238 goto Complete; 1239 } 1240 1241 if (dev->power.syscore || dev->power.direct_complete) 1242 goto Complete; 1243 1244 if (dev->pm_domain) { 1245 info = "late power domain "; 1246 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1247 } else if (dev->type && dev->type->pm) { 1248 info = "late type "; 1249 callback = pm_late_early_op(dev->type->pm, state); 1250 } else if (dev->class && dev->class->pm) { 1251 info = "late class "; 1252 callback = pm_late_early_op(dev->class->pm, state); 1253 } else if (dev->bus && dev->bus->pm) { 1254 info = "late bus "; 1255 callback = pm_late_early_op(dev->bus->pm, state); 1256 } 1257 1258 if (!callback && dev->driver && dev->driver->pm) { 1259 info = "late driver "; 1260 callback = pm_late_early_op(dev->driver->pm, state); 1261 } 1262 1263 error = dpm_run_callback(callback, dev, state, info); 1264 if (!error) 1265 dev->power.is_late_suspended = true; 1266 else 1267 async_error = error; 1268 1269 Complete: 1270 TRACE_SUSPEND(error); 1271 complete_all(&dev->power.completion); 1272 return error; 1273 } 1274 1275 static void async_suspend_late(void *data, async_cookie_t cookie) 1276 { 1277 struct device *dev = (struct device *)data; 1278 int error; 1279 1280 error = __device_suspend_late(dev, pm_transition, true); 1281 if (error) { 1282 dpm_save_failed_dev(dev_name(dev)); 1283 pm_dev_err(dev, pm_transition, " async", error); 1284 } 1285 put_device(dev); 1286 } 1287 1288 static int device_suspend_late(struct device *dev) 1289 { 1290 reinit_completion(&dev->power.completion); 1291 1292 if (is_async(dev)) { 1293 get_device(dev); 1294 async_schedule(async_suspend_late, dev); 1295 return 0; 1296 } 1297 1298 return __device_suspend_late(dev, pm_transition, false); 1299 } 1300 1301 /** 1302 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1303 * @state: PM transition of the system being carried out. 1304 */ 1305 int dpm_suspend_late(pm_message_t state) 1306 { 1307 ktime_t starttime = ktime_get(); 1308 int error = 0; 1309 1310 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); 1311 mutex_lock(&dpm_list_mtx); 1312 pm_transition = state; 1313 async_error = 0; 1314 1315 while (!list_empty(&dpm_suspended_list)) { 1316 struct device *dev = to_device(dpm_suspended_list.prev); 1317 1318 get_device(dev); 1319 mutex_unlock(&dpm_list_mtx); 1320 1321 error = device_suspend_late(dev); 1322 1323 mutex_lock(&dpm_list_mtx); 1324 if (!list_empty(&dev->power.entry)) 1325 list_move(&dev->power.entry, &dpm_late_early_list); 1326 1327 if (error) { 1328 pm_dev_err(dev, state, " late", error); 1329 dpm_save_failed_dev(dev_name(dev)); 1330 put_device(dev); 1331 break; 1332 } 1333 put_device(dev); 1334 1335 if (async_error) 1336 break; 1337 } 1338 mutex_unlock(&dpm_list_mtx); 1339 async_synchronize_full(); 1340 if (!error) 1341 error = async_error; 1342 if (error) { 1343 suspend_stats.failed_suspend_late++; 1344 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1345 dpm_resume_early(resume_event(state)); 1346 } else { 1347 dpm_show_time(starttime, state, "late"); 1348 } 1349 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1350 return error; 1351 } 1352 1353 /** 1354 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1355 * @state: PM transition of the system being carried out. 1356 */ 1357 int dpm_suspend_end(pm_message_t state) 1358 { 1359 int error = dpm_suspend_late(state); 1360 if (error) 1361 return error; 1362 1363 error = dpm_suspend_noirq(state); 1364 if (error) { 1365 dpm_resume_early(resume_event(state)); 1366 return error; 1367 } 1368 1369 return 0; 1370 } 1371 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1372 1373 /** 1374 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1375 * @dev: Device to suspend. 1376 * @state: PM transition of the system being carried out. 1377 * @cb: Suspend callback to execute. 1378 * @info: string description of caller. 1379 */ 1380 static int legacy_suspend(struct device *dev, pm_message_t state, 1381 int (*cb)(struct device *dev, pm_message_t state), 1382 char *info) 1383 { 1384 int error; 1385 ktime_t calltime; 1386 1387 calltime = initcall_debug_start(dev); 1388 1389 trace_device_pm_callback_start(dev, info, state.event); 1390 error = cb(dev, state); 1391 trace_device_pm_callback_end(dev, error); 1392 suspend_report_result(cb, error); 1393 1394 initcall_debug_report(dev, calltime, error, state, info); 1395 1396 return error; 1397 } 1398 1399 static void dpm_clear_suppliers_direct_complete(struct device *dev) 1400 { 1401 struct device_link *link; 1402 int idx; 1403 1404 idx = device_links_read_lock(); 1405 1406 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 1407 spin_lock_irq(&link->supplier->power.lock); 1408 link->supplier->power.direct_complete = false; 1409 spin_unlock_irq(&link->supplier->power.lock); 1410 } 1411 1412 device_links_read_unlock(idx); 1413 } 1414 1415 /** 1416 * device_suspend - Execute "suspend" callbacks for given device. 1417 * @dev: Device to handle. 1418 * @state: PM transition of the system being carried out. 1419 * @async: If true, the device is being suspended asynchronously. 1420 */ 1421 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1422 { 1423 pm_callback_t callback = NULL; 1424 char *info = NULL; 1425 int error = 0; 1426 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1427 1428 TRACE_DEVICE(dev); 1429 TRACE_SUSPEND(0); 1430 1431 dpm_wait_for_subordinate(dev, async); 1432 1433 if (async_error) 1434 goto Complete; 1435 1436 /* 1437 * If a device configured to wake up the system from sleep states 1438 * has been suspended at run time and there's a resume request pending 1439 * for it, this is equivalent to the device signaling wakeup, so the 1440 * system suspend operation should be aborted. 1441 */ 1442 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1443 pm_wakeup_event(dev, 0); 1444 1445 if (pm_wakeup_pending()) { 1446 async_error = -EBUSY; 1447 goto Complete; 1448 } 1449 1450 if (dev->power.syscore) 1451 goto Complete; 1452 1453 if (dev->power.direct_complete) { 1454 if (pm_runtime_status_suspended(dev)) { 1455 pm_runtime_disable(dev); 1456 if (pm_runtime_status_suspended(dev)) 1457 goto Complete; 1458 1459 pm_runtime_enable(dev); 1460 } 1461 dev->power.direct_complete = false; 1462 } 1463 1464 dpm_watchdog_set(&wd, dev); 1465 device_lock(dev); 1466 1467 if (dev->pm_domain) { 1468 info = "power domain "; 1469 callback = pm_op(&dev->pm_domain->ops, state); 1470 goto Run; 1471 } 1472 1473 if (dev->type && dev->type->pm) { 1474 info = "type "; 1475 callback = pm_op(dev->type->pm, state); 1476 goto Run; 1477 } 1478 1479 if (dev->class) { 1480 if (dev->class->pm) { 1481 info = "class "; 1482 callback = pm_op(dev->class->pm, state); 1483 goto Run; 1484 } else if (dev->class->suspend) { 1485 pm_dev_dbg(dev, state, "legacy class "); 1486 error = legacy_suspend(dev, state, dev->class->suspend, 1487 "legacy class "); 1488 goto End; 1489 } 1490 } 1491 1492 if (dev->bus) { 1493 if (dev->bus->pm) { 1494 info = "bus "; 1495 callback = pm_op(dev->bus->pm, state); 1496 } else if (dev->bus->suspend) { 1497 pm_dev_dbg(dev, state, "legacy bus "); 1498 error = legacy_suspend(dev, state, dev->bus->suspend, 1499 "legacy bus "); 1500 goto End; 1501 } 1502 } 1503 1504 Run: 1505 if (!callback && dev->driver && dev->driver->pm) { 1506 info = "driver "; 1507 callback = pm_op(dev->driver->pm, state); 1508 } 1509 1510 error = dpm_run_callback(callback, dev, state, info); 1511 1512 End: 1513 if (!error) { 1514 struct device *parent = dev->parent; 1515 1516 dev->power.is_suspended = true; 1517 if (parent) { 1518 spin_lock_irq(&parent->power.lock); 1519 1520 dev->parent->power.direct_complete = false; 1521 if (dev->power.wakeup_path 1522 && !dev->parent->power.ignore_children) 1523 dev->parent->power.wakeup_path = true; 1524 1525 spin_unlock_irq(&parent->power.lock); 1526 } 1527 dpm_clear_suppliers_direct_complete(dev); 1528 } 1529 1530 device_unlock(dev); 1531 dpm_watchdog_clear(&wd); 1532 1533 Complete: 1534 if (error) 1535 async_error = error; 1536 1537 complete_all(&dev->power.completion); 1538 TRACE_SUSPEND(error); 1539 return error; 1540 } 1541 1542 static void async_suspend(void *data, async_cookie_t cookie) 1543 { 1544 struct device *dev = (struct device *)data; 1545 int error; 1546 1547 error = __device_suspend(dev, pm_transition, true); 1548 if (error) { 1549 dpm_save_failed_dev(dev_name(dev)); 1550 pm_dev_err(dev, pm_transition, " async", error); 1551 } 1552 1553 put_device(dev); 1554 } 1555 1556 static int device_suspend(struct device *dev) 1557 { 1558 reinit_completion(&dev->power.completion); 1559 1560 if (is_async(dev)) { 1561 get_device(dev); 1562 async_schedule(async_suspend, dev); 1563 return 0; 1564 } 1565 1566 return __device_suspend(dev, pm_transition, false); 1567 } 1568 1569 /** 1570 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1571 * @state: PM transition of the system being carried out. 1572 */ 1573 int dpm_suspend(pm_message_t state) 1574 { 1575 ktime_t starttime = ktime_get(); 1576 int error = 0; 1577 1578 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1579 might_sleep(); 1580 1581 cpufreq_suspend(); 1582 1583 mutex_lock(&dpm_list_mtx); 1584 pm_transition = state; 1585 async_error = 0; 1586 while (!list_empty(&dpm_prepared_list)) { 1587 struct device *dev = to_device(dpm_prepared_list.prev); 1588 1589 get_device(dev); 1590 mutex_unlock(&dpm_list_mtx); 1591 1592 error = device_suspend(dev); 1593 1594 mutex_lock(&dpm_list_mtx); 1595 if (error) { 1596 pm_dev_err(dev, state, "", error); 1597 dpm_save_failed_dev(dev_name(dev)); 1598 put_device(dev); 1599 break; 1600 } 1601 if (!list_empty(&dev->power.entry)) 1602 list_move(&dev->power.entry, &dpm_suspended_list); 1603 put_device(dev); 1604 if (async_error) 1605 break; 1606 } 1607 mutex_unlock(&dpm_list_mtx); 1608 async_synchronize_full(); 1609 if (!error) 1610 error = async_error; 1611 if (error) { 1612 suspend_stats.failed_suspend++; 1613 dpm_save_failed_step(SUSPEND_SUSPEND); 1614 } else 1615 dpm_show_time(starttime, state, NULL); 1616 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1617 return error; 1618 } 1619 1620 /** 1621 * device_prepare - Prepare a device for system power transition. 1622 * @dev: Device to handle. 1623 * @state: PM transition of the system being carried out. 1624 * 1625 * Execute the ->prepare() callback(s) for given device. No new children of the 1626 * device may be registered after this function has returned. 1627 */ 1628 static int device_prepare(struct device *dev, pm_message_t state) 1629 { 1630 int (*callback)(struct device *) = NULL; 1631 int ret = 0; 1632 1633 if (dev->power.syscore) 1634 return 0; 1635 1636 /* 1637 * If a device's parent goes into runtime suspend at the wrong time, 1638 * it won't be possible to resume the device. To prevent this we 1639 * block runtime suspend here, during the prepare phase, and allow 1640 * it again during the complete phase. 1641 */ 1642 pm_runtime_get_noresume(dev); 1643 1644 device_lock(dev); 1645 1646 dev->power.wakeup_path = device_may_wakeup(dev); 1647 1648 if (dev->power.no_pm_callbacks) { 1649 ret = 1; /* Let device go direct_complete */ 1650 goto unlock; 1651 } 1652 1653 if (dev->pm_domain) 1654 callback = dev->pm_domain->ops.prepare; 1655 else if (dev->type && dev->type->pm) 1656 callback = dev->type->pm->prepare; 1657 else if (dev->class && dev->class->pm) 1658 callback = dev->class->pm->prepare; 1659 else if (dev->bus && dev->bus->pm) 1660 callback = dev->bus->pm->prepare; 1661 1662 if (!callback && dev->driver && dev->driver->pm) 1663 callback = dev->driver->pm->prepare; 1664 1665 if (callback) 1666 ret = callback(dev); 1667 1668 unlock: 1669 device_unlock(dev); 1670 1671 if (ret < 0) { 1672 suspend_report_result(callback, ret); 1673 pm_runtime_put(dev); 1674 return ret; 1675 } 1676 /* 1677 * A positive return value from ->prepare() means "this device appears 1678 * to be runtime-suspended and its state is fine, so if it really is 1679 * runtime-suspended, you can leave it in that state provided that you 1680 * will do the same thing with all of its descendants". This only 1681 * applies to suspend transitions, however. 1682 */ 1683 spin_lock_irq(&dev->power.lock); 1684 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; 1685 spin_unlock_irq(&dev->power.lock); 1686 return 0; 1687 } 1688 1689 /** 1690 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1691 * @state: PM transition of the system being carried out. 1692 * 1693 * Execute the ->prepare() callback(s) for all devices. 1694 */ 1695 int dpm_prepare(pm_message_t state) 1696 { 1697 int error = 0; 1698 1699 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 1700 might_sleep(); 1701 1702 /* 1703 * Give a chance for the known devices to complete their probes, before 1704 * disable probing of devices. This sync point is important at least 1705 * at boot time + hibernation restore. 1706 */ 1707 wait_for_device_probe(); 1708 /* 1709 * It is unsafe if probing of devices will happen during suspend or 1710 * hibernation and system behavior will be unpredictable in this case. 1711 * So, let's prohibit device's probing here and defer their probes 1712 * instead. The normal behavior will be restored in dpm_complete(). 1713 */ 1714 device_block_probing(); 1715 1716 mutex_lock(&dpm_list_mtx); 1717 while (!list_empty(&dpm_list)) { 1718 struct device *dev = to_device(dpm_list.next); 1719 1720 get_device(dev); 1721 mutex_unlock(&dpm_list_mtx); 1722 1723 trace_device_pm_callback_start(dev, "", state.event); 1724 error = device_prepare(dev, state); 1725 trace_device_pm_callback_end(dev, error); 1726 1727 mutex_lock(&dpm_list_mtx); 1728 if (error) { 1729 if (error == -EAGAIN) { 1730 put_device(dev); 1731 error = 0; 1732 continue; 1733 } 1734 printk(KERN_INFO "PM: Device %s not prepared " 1735 "for power transition: code %d\n", 1736 dev_name(dev), error); 1737 put_device(dev); 1738 break; 1739 } 1740 dev->power.is_prepared = true; 1741 if (!list_empty(&dev->power.entry)) 1742 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1743 put_device(dev); 1744 } 1745 mutex_unlock(&dpm_list_mtx); 1746 trace_suspend_resume(TPS("dpm_prepare"), state.event, false); 1747 return error; 1748 } 1749 1750 /** 1751 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1752 * @state: PM transition of the system being carried out. 1753 * 1754 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1755 * callbacks for them. 1756 */ 1757 int dpm_suspend_start(pm_message_t state) 1758 { 1759 int error; 1760 1761 error = dpm_prepare(state); 1762 if (error) { 1763 suspend_stats.failed_prepare++; 1764 dpm_save_failed_step(SUSPEND_PREPARE); 1765 } else 1766 error = dpm_suspend(state); 1767 return error; 1768 } 1769 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1770 1771 void __suspend_report_result(const char *function, void *fn, int ret) 1772 { 1773 if (ret) 1774 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1775 } 1776 EXPORT_SYMBOL_GPL(__suspend_report_result); 1777 1778 /** 1779 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1780 * @dev: Device to wait for. 1781 * @subordinate: Device that needs to wait for @dev. 1782 */ 1783 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1784 { 1785 dpm_wait(dev, subordinate->power.async_suspend); 1786 return async_error; 1787 } 1788 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1789 1790 /** 1791 * dpm_for_each_dev - device iterator. 1792 * @data: data for the callback. 1793 * @fn: function to be called for each device. 1794 * 1795 * Iterate over devices in dpm_list, and call @fn for each device, 1796 * passing it @data. 1797 */ 1798 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 1799 { 1800 struct device *dev; 1801 1802 if (!fn) 1803 return; 1804 1805 device_pm_lock(); 1806 list_for_each_entry(dev, &dpm_list, power.entry) 1807 fn(dev, data); 1808 device_pm_unlock(); 1809 } 1810 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 1811 1812 static bool pm_ops_is_empty(const struct dev_pm_ops *ops) 1813 { 1814 if (!ops) 1815 return true; 1816 1817 return !ops->prepare && 1818 !ops->suspend && 1819 !ops->suspend_late && 1820 !ops->suspend_noirq && 1821 !ops->resume_noirq && 1822 !ops->resume_early && 1823 !ops->resume && 1824 !ops->complete; 1825 } 1826 1827 void device_pm_check_callbacks(struct device *dev) 1828 { 1829 spin_lock_irq(&dev->power.lock); 1830 dev->power.no_pm_callbacks = 1831 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && 1832 (!dev->class || pm_ops_is_empty(dev->class->pm)) && 1833 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 1834 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 1835 (!dev->driver || pm_ops_is_empty(dev->driver->pm)); 1836 spin_unlock_irq(&dev->power.lock); 1837 } 1838