1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm-trace.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/interrupt.h> 29 #include <linux/sched.h> 30 #include <linux/async.h> 31 #include <linux/suspend.h> 32 #include <trace/events/power.h> 33 #include <linux/cpufreq.h> 34 #include <linux/cpuidle.h> 35 #include <linux/timer.h> 36 37 #include "../base.h" 38 #include "power.h" 39 40 typedef int (*pm_callback_t)(struct device *); 41 42 /* 43 * The entries in the dpm_list list are in a depth first order, simply 44 * because children are guaranteed to be discovered after parents, and 45 * are inserted at the back of the list on discovery. 46 * 47 * Since device_pm_add() may be called with a device lock held, 48 * we must never try to acquire a device lock while holding 49 * dpm_list_mutex. 50 */ 51 52 LIST_HEAD(dpm_list); 53 static LIST_HEAD(dpm_prepared_list); 54 static LIST_HEAD(dpm_suspended_list); 55 static LIST_HEAD(dpm_late_early_list); 56 static LIST_HEAD(dpm_noirq_list); 57 58 struct suspend_stats suspend_stats; 59 static DEFINE_MUTEX(dpm_list_mtx); 60 static pm_message_t pm_transition; 61 62 static int async_error; 63 64 static char *pm_verb(int event) 65 { 66 switch (event) { 67 case PM_EVENT_SUSPEND: 68 return "suspend"; 69 case PM_EVENT_RESUME: 70 return "resume"; 71 case PM_EVENT_FREEZE: 72 return "freeze"; 73 case PM_EVENT_QUIESCE: 74 return "quiesce"; 75 case PM_EVENT_HIBERNATE: 76 return "hibernate"; 77 case PM_EVENT_THAW: 78 return "thaw"; 79 case PM_EVENT_RESTORE: 80 return "restore"; 81 case PM_EVENT_RECOVER: 82 return "recover"; 83 default: 84 return "(unknown PM event)"; 85 } 86 } 87 88 /** 89 * device_pm_sleep_init - Initialize system suspend-related device fields. 90 * @dev: Device object being initialized. 91 */ 92 void device_pm_sleep_init(struct device *dev) 93 { 94 dev->power.is_prepared = false; 95 dev->power.is_suspended = false; 96 dev->power.is_noirq_suspended = false; 97 dev->power.is_late_suspended = false; 98 init_completion(&dev->power.completion); 99 complete_all(&dev->power.completion); 100 dev->power.wakeup = NULL; 101 INIT_LIST_HEAD(&dev->power.entry); 102 } 103 104 /** 105 * device_pm_lock - Lock the list of active devices used by the PM core. 106 */ 107 void device_pm_lock(void) 108 { 109 mutex_lock(&dpm_list_mtx); 110 } 111 112 /** 113 * device_pm_unlock - Unlock the list of active devices used by the PM core. 114 */ 115 void device_pm_unlock(void) 116 { 117 mutex_unlock(&dpm_list_mtx); 118 } 119 120 /** 121 * device_pm_add - Add a device to the PM core's list of active devices. 122 * @dev: Device to add to the list. 123 */ 124 void device_pm_add(struct device *dev) 125 { 126 pr_debug("PM: Adding info for %s:%s\n", 127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 128 device_pm_check_callbacks(dev); 129 mutex_lock(&dpm_list_mtx); 130 if (dev->parent && dev->parent->power.is_prepared) 131 dev_warn(dev, "parent %s should not be sleeping\n", 132 dev_name(dev->parent)); 133 list_add_tail(&dev->power.entry, &dpm_list); 134 dev->power.in_dpm_list = true; 135 mutex_unlock(&dpm_list_mtx); 136 } 137 138 /** 139 * device_pm_remove - Remove a device from the PM core's list of active devices. 140 * @dev: Device to be removed from the list. 141 */ 142 void device_pm_remove(struct device *dev) 143 { 144 pr_debug("PM: Removing info for %s:%s\n", 145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 146 complete_all(&dev->power.completion); 147 mutex_lock(&dpm_list_mtx); 148 list_del_init(&dev->power.entry); 149 dev->power.in_dpm_list = false; 150 mutex_unlock(&dpm_list_mtx); 151 device_wakeup_disable(dev); 152 pm_runtime_remove(dev); 153 device_pm_check_callbacks(dev); 154 } 155 156 /** 157 * device_pm_move_before - Move device in the PM core's list of active devices. 158 * @deva: Device to move in dpm_list. 159 * @devb: Device @deva should come before. 160 */ 161 void device_pm_move_before(struct device *deva, struct device *devb) 162 { 163 pr_debug("PM: Moving %s:%s before %s:%s\n", 164 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 165 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 166 /* Delete deva from dpm_list and reinsert before devb. */ 167 list_move_tail(&deva->power.entry, &devb->power.entry); 168 } 169 170 /** 171 * device_pm_move_after - Move device in the PM core's list of active devices. 172 * @deva: Device to move in dpm_list. 173 * @devb: Device @deva should come after. 174 */ 175 void device_pm_move_after(struct device *deva, struct device *devb) 176 { 177 pr_debug("PM: Moving %s:%s after %s:%s\n", 178 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 179 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 180 /* Delete deva from dpm_list and reinsert after devb. */ 181 list_move(&deva->power.entry, &devb->power.entry); 182 } 183 184 /** 185 * device_pm_move_last - Move device to end of the PM core's list of devices. 186 * @dev: Device to move in dpm_list. 187 */ 188 void device_pm_move_last(struct device *dev) 189 { 190 pr_debug("PM: Moving %s:%s to end of list\n", 191 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 192 list_move_tail(&dev->power.entry, &dpm_list); 193 } 194 195 static ktime_t initcall_debug_start(struct device *dev) 196 { 197 ktime_t calltime = 0; 198 199 if (pm_print_times_enabled) { 200 pr_info("calling %s+ @ %i, parent: %s\n", 201 dev_name(dev), task_pid_nr(current), 202 dev->parent ? dev_name(dev->parent) : "none"); 203 calltime = ktime_get(); 204 } 205 206 return calltime; 207 } 208 209 static void initcall_debug_report(struct device *dev, ktime_t calltime, 210 int error, pm_message_t state, char *info) 211 { 212 ktime_t rettime; 213 s64 nsecs; 214 215 rettime = ktime_get(); 216 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 217 218 if (pm_print_times_enabled) { 219 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 220 error, (unsigned long long)nsecs >> 10); 221 } 222 } 223 224 /** 225 * dpm_wait - Wait for a PM operation to complete. 226 * @dev: Device to wait for. 227 * @async: If unset, wait only if the device's power.async_suspend flag is set. 228 */ 229 static void dpm_wait(struct device *dev, bool async) 230 { 231 if (!dev) 232 return; 233 234 if (async || (pm_async_enabled && dev->power.async_suspend)) 235 wait_for_completion(&dev->power.completion); 236 } 237 238 static int dpm_wait_fn(struct device *dev, void *async_ptr) 239 { 240 dpm_wait(dev, *((bool *)async_ptr)); 241 return 0; 242 } 243 244 static void dpm_wait_for_children(struct device *dev, bool async) 245 { 246 device_for_each_child(dev, &async, dpm_wait_fn); 247 } 248 249 static void dpm_wait_for_suppliers(struct device *dev, bool async) 250 { 251 struct device_link *link; 252 int idx; 253 254 idx = device_links_read_lock(); 255 256 /* 257 * If the supplier goes away right after we've checked the link to it, 258 * we'll wait for its completion to change the state, but that's fine, 259 * because the only things that will block as a result are the SRCU 260 * callbacks freeing the link objects for the links in the list we're 261 * walking. 262 */ 263 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 264 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 265 dpm_wait(link->supplier, async); 266 267 device_links_read_unlock(idx); 268 } 269 270 static void dpm_wait_for_superior(struct device *dev, bool async) 271 { 272 dpm_wait(dev->parent, async); 273 dpm_wait_for_suppliers(dev, async); 274 } 275 276 static void dpm_wait_for_consumers(struct device *dev, bool async) 277 { 278 struct device_link *link; 279 int idx; 280 281 idx = device_links_read_lock(); 282 283 /* 284 * The status of a device link can only be changed from "dormant" by a 285 * probe, but that cannot happen during system suspend/resume. In 286 * theory it can change to "dormant" at that time, but then it is 287 * reasonable to wait for the target device anyway (eg. if it goes 288 * away, it's better to wait for it to go away completely and then 289 * continue instead of trying to continue in parallel with its 290 * unregistration). 291 */ 292 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) 293 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 294 dpm_wait(link->consumer, async); 295 296 device_links_read_unlock(idx); 297 } 298 299 static void dpm_wait_for_subordinate(struct device *dev, bool async) 300 { 301 dpm_wait_for_children(dev, async); 302 dpm_wait_for_consumers(dev, async); 303 } 304 305 /** 306 * pm_op - Return the PM operation appropriate for given PM event. 307 * @ops: PM operations to choose from. 308 * @state: PM transition of the system being carried out. 309 */ 310 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 311 { 312 switch (state.event) { 313 #ifdef CONFIG_SUSPEND 314 case PM_EVENT_SUSPEND: 315 return ops->suspend; 316 case PM_EVENT_RESUME: 317 return ops->resume; 318 #endif /* CONFIG_SUSPEND */ 319 #ifdef CONFIG_HIBERNATE_CALLBACKS 320 case PM_EVENT_FREEZE: 321 case PM_EVENT_QUIESCE: 322 return ops->freeze; 323 case PM_EVENT_HIBERNATE: 324 return ops->poweroff; 325 case PM_EVENT_THAW: 326 case PM_EVENT_RECOVER: 327 return ops->thaw; 328 break; 329 case PM_EVENT_RESTORE: 330 return ops->restore; 331 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 332 } 333 334 return NULL; 335 } 336 337 /** 338 * pm_late_early_op - Return the PM operation appropriate for given PM event. 339 * @ops: PM operations to choose from. 340 * @state: PM transition of the system being carried out. 341 * 342 * Runtime PM is disabled for @dev while this function is being executed. 343 */ 344 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 345 pm_message_t state) 346 { 347 switch (state.event) { 348 #ifdef CONFIG_SUSPEND 349 case PM_EVENT_SUSPEND: 350 return ops->suspend_late; 351 case PM_EVENT_RESUME: 352 return ops->resume_early; 353 #endif /* CONFIG_SUSPEND */ 354 #ifdef CONFIG_HIBERNATE_CALLBACKS 355 case PM_EVENT_FREEZE: 356 case PM_EVENT_QUIESCE: 357 return ops->freeze_late; 358 case PM_EVENT_HIBERNATE: 359 return ops->poweroff_late; 360 case PM_EVENT_THAW: 361 case PM_EVENT_RECOVER: 362 return ops->thaw_early; 363 case PM_EVENT_RESTORE: 364 return ops->restore_early; 365 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 366 } 367 368 return NULL; 369 } 370 371 /** 372 * pm_noirq_op - Return the PM operation appropriate for given PM event. 373 * @ops: PM operations to choose from. 374 * @state: PM transition of the system being carried out. 375 * 376 * The driver of @dev will not receive interrupts while this function is being 377 * executed. 378 */ 379 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 380 { 381 switch (state.event) { 382 #ifdef CONFIG_SUSPEND 383 case PM_EVENT_SUSPEND: 384 return ops->suspend_noirq; 385 case PM_EVENT_RESUME: 386 return ops->resume_noirq; 387 #endif /* CONFIG_SUSPEND */ 388 #ifdef CONFIG_HIBERNATE_CALLBACKS 389 case PM_EVENT_FREEZE: 390 case PM_EVENT_QUIESCE: 391 return ops->freeze_noirq; 392 case PM_EVENT_HIBERNATE: 393 return ops->poweroff_noirq; 394 case PM_EVENT_THAW: 395 case PM_EVENT_RECOVER: 396 return ops->thaw_noirq; 397 case PM_EVENT_RESTORE: 398 return ops->restore_noirq; 399 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 400 } 401 402 return NULL; 403 } 404 405 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 406 { 407 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 408 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 409 ", may wakeup" : ""); 410 } 411 412 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 413 int error) 414 { 415 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 416 dev_name(dev), pm_verb(state.event), info, error); 417 } 418 419 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 420 { 421 ktime_t calltime; 422 u64 usecs64; 423 int usecs; 424 425 calltime = ktime_get(); 426 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 427 do_div(usecs64, NSEC_PER_USEC); 428 usecs = usecs64; 429 if (usecs == 0) 430 usecs = 1; 431 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 432 info ?: "", info ? " " : "", pm_verb(state.event), 433 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 434 } 435 436 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 437 pm_message_t state, char *info) 438 { 439 ktime_t calltime; 440 int error; 441 442 if (!cb) 443 return 0; 444 445 calltime = initcall_debug_start(dev); 446 447 pm_dev_dbg(dev, state, info); 448 trace_device_pm_callback_start(dev, info, state.event); 449 error = cb(dev); 450 trace_device_pm_callback_end(dev, error); 451 suspend_report_result(cb, error); 452 453 initcall_debug_report(dev, calltime, error, state, info); 454 455 return error; 456 } 457 458 #ifdef CONFIG_DPM_WATCHDOG 459 struct dpm_watchdog { 460 struct device *dev; 461 struct task_struct *tsk; 462 struct timer_list timer; 463 }; 464 465 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 466 struct dpm_watchdog wd 467 468 /** 469 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 470 * @data: Watchdog object address. 471 * 472 * Called when a driver has timed out suspending or resuming. 473 * There's not much we can do here to recover so panic() to 474 * capture a crash-dump in pstore. 475 */ 476 static void dpm_watchdog_handler(unsigned long data) 477 { 478 struct dpm_watchdog *wd = (void *)data; 479 480 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 481 show_stack(wd->tsk, NULL); 482 panic("%s %s: unrecoverable failure\n", 483 dev_driver_string(wd->dev), dev_name(wd->dev)); 484 } 485 486 /** 487 * dpm_watchdog_set - Enable pm watchdog for given device. 488 * @wd: Watchdog. Must be allocated on the stack. 489 * @dev: Device to handle. 490 */ 491 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 492 { 493 struct timer_list *timer = &wd->timer; 494 495 wd->dev = dev; 496 wd->tsk = current; 497 498 init_timer_on_stack(timer); 499 /* use same timeout value for both suspend and resume */ 500 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 501 timer->function = dpm_watchdog_handler; 502 timer->data = (unsigned long)wd; 503 add_timer(timer); 504 } 505 506 /** 507 * dpm_watchdog_clear - Disable suspend/resume watchdog. 508 * @wd: Watchdog to disable. 509 */ 510 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 511 { 512 struct timer_list *timer = &wd->timer; 513 514 del_timer_sync(timer); 515 destroy_timer_on_stack(timer); 516 } 517 #else 518 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 519 #define dpm_watchdog_set(x, y) 520 #define dpm_watchdog_clear(x) 521 #endif 522 523 /*------------------------- Resume routines -------------------------*/ 524 525 /** 526 * device_resume_noirq - Execute an "early resume" callback for given device. 527 * @dev: Device to handle. 528 * @state: PM transition of the system being carried out. 529 * @async: If true, the device is being resumed asynchronously. 530 * 531 * The driver of @dev will not receive interrupts while this function is being 532 * executed. 533 */ 534 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) 535 { 536 pm_callback_t callback = NULL; 537 char *info = NULL; 538 int error = 0; 539 540 TRACE_DEVICE(dev); 541 TRACE_RESUME(0); 542 543 if (dev->power.syscore || dev->power.direct_complete) 544 goto Out; 545 546 if (!dev->power.is_noirq_suspended) 547 goto Out; 548 549 dpm_wait_for_superior(dev, async); 550 551 if (dev->pm_domain) { 552 info = "noirq power domain "; 553 callback = pm_noirq_op(&dev->pm_domain->ops, state); 554 } else if (dev->type && dev->type->pm) { 555 info = "noirq type "; 556 callback = pm_noirq_op(dev->type->pm, state); 557 } else if (dev->class && dev->class->pm) { 558 info = "noirq class "; 559 callback = pm_noirq_op(dev->class->pm, state); 560 } else if (dev->bus && dev->bus->pm) { 561 info = "noirq bus "; 562 callback = pm_noirq_op(dev->bus->pm, state); 563 } 564 565 if (!callback && dev->driver && dev->driver->pm) { 566 info = "noirq driver "; 567 callback = pm_noirq_op(dev->driver->pm, state); 568 } 569 570 error = dpm_run_callback(callback, dev, state, info); 571 dev->power.is_noirq_suspended = false; 572 573 Out: 574 complete_all(&dev->power.completion); 575 TRACE_RESUME(error); 576 return error; 577 } 578 579 static bool is_async(struct device *dev) 580 { 581 return dev->power.async_suspend && pm_async_enabled 582 && !pm_trace_is_enabled(); 583 } 584 585 static void async_resume_noirq(void *data, async_cookie_t cookie) 586 { 587 struct device *dev = (struct device *)data; 588 int error; 589 590 error = device_resume_noirq(dev, pm_transition, true); 591 if (error) 592 pm_dev_err(dev, pm_transition, " async", error); 593 594 put_device(dev); 595 } 596 597 /** 598 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 599 * @state: PM transition of the system being carried out. 600 * 601 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 602 * enable device drivers to receive interrupts. 603 */ 604 void dpm_resume_noirq(pm_message_t state) 605 { 606 struct device *dev; 607 ktime_t starttime = ktime_get(); 608 609 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); 610 mutex_lock(&dpm_list_mtx); 611 pm_transition = state; 612 613 /* 614 * Advanced the async threads upfront, 615 * in case the starting of async threads is 616 * delayed by non-async resuming devices. 617 */ 618 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 619 reinit_completion(&dev->power.completion); 620 if (is_async(dev)) { 621 get_device(dev); 622 async_schedule(async_resume_noirq, dev); 623 } 624 } 625 626 while (!list_empty(&dpm_noirq_list)) { 627 dev = to_device(dpm_noirq_list.next); 628 get_device(dev); 629 list_move_tail(&dev->power.entry, &dpm_late_early_list); 630 mutex_unlock(&dpm_list_mtx); 631 632 if (!is_async(dev)) { 633 int error; 634 635 error = device_resume_noirq(dev, state, false); 636 if (error) { 637 suspend_stats.failed_resume_noirq++; 638 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 639 dpm_save_failed_dev(dev_name(dev)); 640 pm_dev_err(dev, state, " noirq", error); 641 } 642 } 643 644 mutex_lock(&dpm_list_mtx); 645 put_device(dev); 646 } 647 mutex_unlock(&dpm_list_mtx); 648 async_synchronize_full(); 649 dpm_show_time(starttime, state, "noirq"); 650 resume_device_irqs(); 651 device_wakeup_disarm_wake_irqs(); 652 cpuidle_resume(); 653 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 654 } 655 656 /** 657 * device_resume_early - Execute an "early resume" callback for given device. 658 * @dev: Device to handle. 659 * @state: PM transition of the system being carried out. 660 * @async: If true, the device is being resumed asynchronously. 661 * 662 * Runtime PM is disabled for @dev while this function is being executed. 663 */ 664 static int device_resume_early(struct device *dev, pm_message_t state, bool async) 665 { 666 pm_callback_t callback = NULL; 667 char *info = NULL; 668 int error = 0; 669 670 TRACE_DEVICE(dev); 671 TRACE_RESUME(0); 672 673 if (dev->power.syscore || dev->power.direct_complete) 674 goto Out; 675 676 if (!dev->power.is_late_suspended) 677 goto Out; 678 679 dpm_wait_for_superior(dev, async); 680 681 if (dev->pm_domain) { 682 info = "early power domain "; 683 callback = pm_late_early_op(&dev->pm_domain->ops, state); 684 } else if (dev->type && dev->type->pm) { 685 info = "early type "; 686 callback = pm_late_early_op(dev->type->pm, state); 687 } else if (dev->class && dev->class->pm) { 688 info = "early class "; 689 callback = pm_late_early_op(dev->class->pm, state); 690 } else if (dev->bus && dev->bus->pm) { 691 info = "early bus "; 692 callback = pm_late_early_op(dev->bus->pm, state); 693 } 694 695 if (!callback && dev->driver && dev->driver->pm) { 696 info = "early driver "; 697 callback = pm_late_early_op(dev->driver->pm, state); 698 } 699 700 error = dpm_run_callback(callback, dev, state, info); 701 dev->power.is_late_suspended = false; 702 703 Out: 704 TRACE_RESUME(error); 705 706 pm_runtime_enable(dev); 707 complete_all(&dev->power.completion); 708 return error; 709 } 710 711 static void async_resume_early(void *data, async_cookie_t cookie) 712 { 713 struct device *dev = (struct device *)data; 714 int error; 715 716 error = device_resume_early(dev, pm_transition, true); 717 if (error) 718 pm_dev_err(dev, pm_transition, " async", error); 719 720 put_device(dev); 721 } 722 723 /** 724 * dpm_resume_early - Execute "early resume" callbacks for all devices. 725 * @state: PM transition of the system being carried out. 726 */ 727 void dpm_resume_early(pm_message_t state) 728 { 729 struct device *dev; 730 ktime_t starttime = ktime_get(); 731 732 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); 733 mutex_lock(&dpm_list_mtx); 734 pm_transition = state; 735 736 /* 737 * Advanced the async threads upfront, 738 * in case the starting of async threads is 739 * delayed by non-async resuming devices. 740 */ 741 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 742 reinit_completion(&dev->power.completion); 743 if (is_async(dev)) { 744 get_device(dev); 745 async_schedule(async_resume_early, dev); 746 } 747 } 748 749 while (!list_empty(&dpm_late_early_list)) { 750 dev = to_device(dpm_late_early_list.next); 751 get_device(dev); 752 list_move_tail(&dev->power.entry, &dpm_suspended_list); 753 mutex_unlock(&dpm_list_mtx); 754 755 if (!is_async(dev)) { 756 int error; 757 758 error = device_resume_early(dev, state, false); 759 if (error) { 760 suspend_stats.failed_resume_early++; 761 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 762 dpm_save_failed_dev(dev_name(dev)); 763 pm_dev_err(dev, state, " early", error); 764 } 765 } 766 mutex_lock(&dpm_list_mtx); 767 put_device(dev); 768 } 769 mutex_unlock(&dpm_list_mtx); 770 async_synchronize_full(); 771 dpm_show_time(starttime, state, "early"); 772 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 773 } 774 775 /** 776 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 777 * @state: PM transition of the system being carried out. 778 */ 779 void dpm_resume_start(pm_message_t state) 780 { 781 dpm_resume_noirq(state); 782 dpm_resume_early(state); 783 } 784 EXPORT_SYMBOL_GPL(dpm_resume_start); 785 786 /** 787 * device_resume - Execute "resume" callbacks for given device. 788 * @dev: Device to handle. 789 * @state: PM transition of the system being carried out. 790 * @async: If true, the device is being resumed asynchronously. 791 */ 792 static int device_resume(struct device *dev, pm_message_t state, bool async) 793 { 794 pm_callback_t callback = NULL; 795 char *info = NULL; 796 int error = 0; 797 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 798 799 TRACE_DEVICE(dev); 800 TRACE_RESUME(0); 801 802 if (dev->power.syscore) 803 goto Complete; 804 805 if (dev->power.direct_complete) { 806 /* Match the pm_runtime_disable() in __device_suspend(). */ 807 pm_runtime_enable(dev); 808 goto Complete; 809 } 810 811 dpm_wait_for_superior(dev, async); 812 dpm_watchdog_set(&wd, dev); 813 device_lock(dev); 814 815 /* 816 * This is a fib. But we'll allow new children to be added below 817 * a resumed device, even if the device hasn't been completed yet. 818 */ 819 dev->power.is_prepared = false; 820 821 if (!dev->power.is_suspended) 822 goto Unlock; 823 824 if (dev->pm_domain) { 825 info = "power domain "; 826 callback = pm_op(&dev->pm_domain->ops, state); 827 goto Driver; 828 } 829 830 if (dev->type && dev->type->pm) { 831 info = "type "; 832 callback = pm_op(dev->type->pm, state); 833 goto Driver; 834 } 835 836 if (dev->class) { 837 if (dev->class->pm) { 838 info = "class "; 839 callback = pm_op(dev->class->pm, state); 840 goto Driver; 841 } else if (dev->class->resume) { 842 info = "legacy class "; 843 callback = dev->class->resume; 844 goto End; 845 } 846 } 847 848 if (dev->bus) { 849 if (dev->bus->pm) { 850 info = "bus "; 851 callback = pm_op(dev->bus->pm, state); 852 } else if (dev->bus->resume) { 853 info = "legacy bus "; 854 callback = dev->bus->resume; 855 goto End; 856 } 857 } 858 859 Driver: 860 if (!callback && dev->driver && dev->driver->pm) { 861 info = "driver "; 862 callback = pm_op(dev->driver->pm, state); 863 } 864 865 End: 866 error = dpm_run_callback(callback, dev, state, info); 867 dev->power.is_suspended = false; 868 869 Unlock: 870 device_unlock(dev); 871 dpm_watchdog_clear(&wd); 872 873 Complete: 874 complete_all(&dev->power.completion); 875 876 TRACE_RESUME(error); 877 878 return error; 879 } 880 881 static void async_resume(void *data, async_cookie_t cookie) 882 { 883 struct device *dev = (struct device *)data; 884 int error; 885 886 error = device_resume(dev, pm_transition, true); 887 if (error) 888 pm_dev_err(dev, pm_transition, " async", error); 889 put_device(dev); 890 } 891 892 /** 893 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 894 * @state: PM transition of the system being carried out. 895 * 896 * Execute the appropriate "resume" callback for all devices whose status 897 * indicates that they are suspended. 898 */ 899 void dpm_resume(pm_message_t state) 900 { 901 struct device *dev; 902 ktime_t starttime = ktime_get(); 903 904 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 905 might_sleep(); 906 907 mutex_lock(&dpm_list_mtx); 908 pm_transition = state; 909 async_error = 0; 910 911 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 912 reinit_completion(&dev->power.completion); 913 if (is_async(dev)) { 914 get_device(dev); 915 async_schedule(async_resume, dev); 916 } 917 } 918 919 while (!list_empty(&dpm_suspended_list)) { 920 dev = to_device(dpm_suspended_list.next); 921 get_device(dev); 922 if (!is_async(dev)) { 923 int error; 924 925 mutex_unlock(&dpm_list_mtx); 926 927 error = device_resume(dev, state, false); 928 if (error) { 929 suspend_stats.failed_resume++; 930 dpm_save_failed_step(SUSPEND_RESUME); 931 dpm_save_failed_dev(dev_name(dev)); 932 pm_dev_err(dev, state, "", error); 933 } 934 935 mutex_lock(&dpm_list_mtx); 936 } 937 if (!list_empty(&dev->power.entry)) 938 list_move_tail(&dev->power.entry, &dpm_prepared_list); 939 put_device(dev); 940 } 941 mutex_unlock(&dpm_list_mtx); 942 async_synchronize_full(); 943 dpm_show_time(starttime, state, NULL); 944 945 cpufreq_resume(); 946 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 947 } 948 949 /** 950 * device_complete - Complete a PM transition for given device. 951 * @dev: Device to handle. 952 * @state: PM transition of the system being carried out. 953 */ 954 static void device_complete(struct device *dev, pm_message_t state) 955 { 956 void (*callback)(struct device *) = NULL; 957 char *info = NULL; 958 959 if (dev->power.syscore) 960 return; 961 962 device_lock(dev); 963 964 if (dev->pm_domain) { 965 info = "completing power domain "; 966 callback = dev->pm_domain->ops.complete; 967 } else if (dev->type && dev->type->pm) { 968 info = "completing type "; 969 callback = dev->type->pm->complete; 970 } else if (dev->class && dev->class->pm) { 971 info = "completing class "; 972 callback = dev->class->pm->complete; 973 } else if (dev->bus && dev->bus->pm) { 974 info = "completing bus "; 975 callback = dev->bus->pm->complete; 976 } 977 978 if (!callback && dev->driver && dev->driver->pm) { 979 info = "completing driver "; 980 callback = dev->driver->pm->complete; 981 } 982 983 if (callback) { 984 pm_dev_dbg(dev, state, info); 985 callback(dev); 986 } 987 988 device_unlock(dev); 989 990 pm_runtime_put(dev); 991 } 992 993 /** 994 * dpm_complete - Complete a PM transition for all non-sysdev devices. 995 * @state: PM transition of the system being carried out. 996 * 997 * Execute the ->complete() callbacks for all devices whose PM status is not 998 * DPM_ON (this allows new devices to be registered). 999 */ 1000 void dpm_complete(pm_message_t state) 1001 { 1002 struct list_head list; 1003 1004 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1005 might_sleep(); 1006 1007 INIT_LIST_HEAD(&list); 1008 mutex_lock(&dpm_list_mtx); 1009 while (!list_empty(&dpm_prepared_list)) { 1010 struct device *dev = to_device(dpm_prepared_list.prev); 1011 1012 get_device(dev); 1013 dev->power.is_prepared = false; 1014 list_move(&dev->power.entry, &list); 1015 mutex_unlock(&dpm_list_mtx); 1016 1017 trace_device_pm_callback_start(dev, "", state.event); 1018 device_complete(dev, state); 1019 trace_device_pm_callback_end(dev, 0); 1020 1021 mutex_lock(&dpm_list_mtx); 1022 put_device(dev); 1023 } 1024 list_splice(&list, &dpm_list); 1025 mutex_unlock(&dpm_list_mtx); 1026 1027 /* Allow device probing and trigger re-probing of deferred devices */ 1028 device_unblock_probing(); 1029 trace_suspend_resume(TPS("dpm_complete"), state.event, false); 1030 } 1031 1032 /** 1033 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 1034 * @state: PM transition of the system being carried out. 1035 * 1036 * Execute "resume" callbacks for all devices and complete the PM transition of 1037 * the system. 1038 */ 1039 void dpm_resume_end(pm_message_t state) 1040 { 1041 dpm_resume(state); 1042 dpm_complete(state); 1043 } 1044 EXPORT_SYMBOL_GPL(dpm_resume_end); 1045 1046 1047 /*------------------------- Suspend routines -------------------------*/ 1048 1049 /** 1050 * resume_event - Return a "resume" message for given "suspend" sleep state. 1051 * @sleep_state: PM message representing a sleep state. 1052 * 1053 * Return a PM message representing the resume event corresponding to given 1054 * sleep state. 1055 */ 1056 static pm_message_t resume_event(pm_message_t sleep_state) 1057 { 1058 switch (sleep_state.event) { 1059 case PM_EVENT_SUSPEND: 1060 return PMSG_RESUME; 1061 case PM_EVENT_FREEZE: 1062 case PM_EVENT_QUIESCE: 1063 return PMSG_RECOVER; 1064 case PM_EVENT_HIBERNATE: 1065 return PMSG_RESTORE; 1066 } 1067 return PMSG_ON; 1068 } 1069 1070 /** 1071 * device_suspend_noirq - Execute a "late suspend" callback for given device. 1072 * @dev: Device to handle. 1073 * @state: PM transition of the system being carried out. 1074 * @async: If true, the device is being suspended asynchronously. 1075 * 1076 * The driver of @dev will not receive interrupts while this function is being 1077 * executed. 1078 */ 1079 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1080 { 1081 pm_callback_t callback = NULL; 1082 char *info = NULL; 1083 int error = 0; 1084 1085 TRACE_DEVICE(dev); 1086 TRACE_SUSPEND(0); 1087 1088 dpm_wait_for_subordinate(dev, async); 1089 1090 if (async_error) 1091 goto Complete; 1092 1093 if (pm_wakeup_pending()) { 1094 async_error = -EBUSY; 1095 goto Complete; 1096 } 1097 1098 if (dev->power.syscore || dev->power.direct_complete) 1099 goto Complete; 1100 1101 if (dev->pm_domain) { 1102 info = "noirq power domain "; 1103 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1104 } else if (dev->type && dev->type->pm) { 1105 info = "noirq type "; 1106 callback = pm_noirq_op(dev->type->pm, state); 1107 } else if (dev->class && dev->class->pm) { 1108 info = "noirq class "; 1109 callback = pm_noirq_op(dev->class->pm, state); 1110 } else if (dev->bus && dev->bus->pm) { 1111 info = "noirq bus "; 1112 callback = pm_noirq_op(dev->bus->pm, state); 1113 } 1114 1115 if (!callback && dev->driver && dev->driver->pm) { 1116 info = "noirq driver "; 1117 callback = pm_noirq_op(dev->driver->pm, state); 1118 } 1119 1120 error = dpm_run_callback(callback, dev, state, info); 1121 if (!error) 1122 dev->power.is_noirq_suspended = true; 1123 else 1124 async_error = error; 1125 1126 Complete: 1127 complete_all(&dev->power.completion); 1128 TRACE_SUSPEND(error); 1129 return error; 1130 } 1131 1132 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1133 { 1134 struct device *dev = (struct device *)data; 1135 int error; 1136 1137 error = __device_suspend_noirq(dev, pm_transition, true); 1138 if (error) { 1139 dpm_save_failed_dev(dev_name(dev)); 1140 pm_dev_err(dev, pm_transition, " async", error); 1141 } 1142 1143 put_device(dev); 1144 } 1145 1146 static int device_suspend_noirq(struct device *dev) 1147 { 1148 reinit_completion(&dev->power.completion); 1149 1150 if (is_async(dev)) { 1151 get_device(dev); 1152 async_schedule(async_suspend_noirq, dev); 1153 return 0; 1154 } 1155 return __device_suspend_noirq(dev, pm_transition, false); 1156 } 1157 1158 /** 1159 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1160 * @state: PM transition of the system being carried out. 1161 * 1162 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1163 * handlers for all non-sysdev devices. 1164 */ 1165 int dpm_suspend_noirq(pm_message_t state) 1166 { 1167 ktime_t starttime = ktime_get(); 1168 int error = 0; 1169 1170 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1171 cpuidle_pause(); 1172 device_wakeup_arm_wake_irqs(); 1173 suspend_device_irqs(); 1174 mutex_lock(&dpm_list_mtx); 1175 pm_transition = state; 1176 async_error = 0; 1177 1178 while (!list_empty(&dpm_late_early_list)) { 1179 struct device *dev = to_device(dpm_late_early_list.prev); 1180 1181 get_device(dev); 1182 mutex_unlock(&dpm_list_mtx); 1183 1184 error = device_suspend_noirq(dev); 1185 1186 mutex_lock(&dpm_list_mtx); 1187 if (error) { 1188 pm_dev_err(dev, state, " noirq", error); 1189 dpm_save_failed_dev(dev_name(dev)); 1190 put_device(dev); 1191 break; 1192 } 1193 if (!list_empty(&dev->power.entry)) 1194 list_move(&dev->power.entry, &dpm_noirq_list); 1195 put_device(dev); 1196 1197 if (async_error) 1198 break; 1199 } 1200 mutex_unlock(&dpm_list_mtx); 1201 async_synchronize_full(); 1202 if (!error) 1203 error = async_error; 1204 1205 if (error) { 1206 suspend_stats.failed_suspend_noirq++; 1207 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1208 dpm_resume_noirq(resume_event(state)); 1209 } else { 1210 dpm_show_time(starttime, state, "noirq"); 1211 } 1212 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1213 return error; 1214 } 1215 1216 /** 1217 * device_suspend_late - Execute a "late suspend" callback for given device. 1218 * @dev: Device to handle. 1219 * @state: PM transition of the system being carried out. 1220 * @async: If true, the device is being suspended asynchronously. 1221 * 1222 * Runtime PM is disabled for @dev while this function is being executed. 1223 */ 1224 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) 1225 { 1226 pm_callback_t callback = NULL; 1227 char *info = NULL; 1228 int error = 0; 1229 1230 TRACE_DEVICE(dev); 1231 TRACE_SUSPEND(0); 1232 1233 __pm_runtime_disable(dev, false); 1234 1235 dpm_wait_for_subordinate(dev, async); 1236 1237 if (async_error) 1238 goto Complete; 1239 1240 if (pm_wakeup_pending()) { 1241 async_error = -EBUSY; 1242 goto Complete; 1243 } 1244 1245 if (dev->power.syscore || dev->power.direct_complete) 1246 goto Complete; 1247 1248 if (dev->pm_domain) { 1249 info = "late power domain "; 1250 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1251 } else if (dev->type && dev->type->pm) { 1252 info = "late type "; 1253 callback = pm_late_early_op(dev->type->pm, state); 1254 } else if (dev->class && dev->class->pm) { 1255 info = "late class "; 1256 callback = pm_late_early_op(dev->class->pm, state); 1257 } else if (dev->bus && dev->bus->pm) { 1258 info = "late bus "; 1259 callback = pm_late_early_op(dev->bus->pm, state); 1260 } 1261 1262 if (!callback && dev->driver && dev->driver->pm) { 1263 info = "late driver "; 1264 callback = pm_late_early_op(dev->driver->pm, state); 1265 } 1266 1267 error = dpm_run_callback(callback, dev, state, info); 1268 if (!error) 1269 dev->power.is_late_suspended = true; 1270 else 1271 async_error = error; 1272 1273 Complete: 1274 TRACE_SUSPEND(error); 1275 complete_all(&dev->power.completion); 1276 return error; 1277 } 1278 1279 static void async_suspend_late(void *data, async_cookie_t cookie) 1280 { 1281 struct device *dev = (struct device *)data; 1282 int error; 1283 1284 error = __device_suspend_late(dev, pm_transition, true); 1285 if (error) { 1286 dpm_save_failed_dev(dev_name(dev)); 1287 pm_dev_err(dev, pm_transition, " async", error); 1288 } 1289 put_device(dev); 1290 } 1291 1292 static int device_suspend_late(struct device *dev) 1293 { 1294 reinit_completion(&dev->power.completion); 1295 1296 if (is_async(dev)) { 1297 get_device(dev); 1298 async_schedule(async_suspend_late, dev); 1299 return 0; 1300 } 1301 1302 return __device_suspend_late(dev, pm_transition, false); 1303 } 1304 1305 /** 1306 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1307 * @state: PM transition of the system being carried out. 1308 */ 1309 int dpm_suspend_late(pm_message_t state) 1310 { 1311 ktime_t starttime = ktime_get(); 1312 int error = 0; 1313 1314 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); 1315 mutex_lock(&dpm_list_mtx); 1316 pm_transition = state; 1317 async_error = 0; 1318 1319 while (!list_empty(&dpm_suspended_list)) { 1320 struct device *dev = to_device(dpm_suspended_list.prev); 1321 1322 get_device(dev); 1323 mutex_unlock(&dpm_list_mtx); 1324 1325 error = device_suspend_late(dev); 1326 1327 mutex_lock(&dpm_list_mtx); 1328 if (!list_empty(&dev->power.entry)) 1329 list_move(&dev->power.entry, &dpm_late_early_list); 1330 1331 if (error) { 1332 pm_dev_err(dev, state, " late", error); 1333 dpm_save_failed_dev(dev_name(dev)); 1334 put_device(dev); 1335 break; 1336 } 1337 put_device(dev); 1338 1339 if (async_error) 1340 break; 1341 } 1342 mutex_unlock(&dpm_list_mtx); 1343 async_synchronize_full(); 1344 if (!error) 1345 error = async_error; 1346 if (error) { 1347 suspend_stats.failed_suspend_late++; 1348 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1349 dpm_resume_early(resume_event(state)); 1350 } else { 1351 dpm_show_time(starttime, state, "late"); 1352 } 1353 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1354 return error; 1355 } 1356 1357 /** 1358 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1359 * @state: PM transition of the system being carried out. 1360 */ 1361 int dpm_suspend_end(pm_message_t state) 1362 { 1363 int error = dpm_suspend_late(state); 1364 if (error) 1365 return error; 1366 1367 error = dpm_suspend_noirq(state); 1368 if (error) { 1369 dpm_resume_early(resume_event(state)); 1370 return error; 1371 } 1372 1373 return 0; 1374 } 1375 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1376 1377 /** 1378 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1379 * @dev: Device to suspend. 1380 * @state: PM transition of the system being carried out. 1381 * @cb: Suspend callback to execute. 1382 * @info: string description of caller. 1383 */ 1384 static int legacy_suspend(struct device *dev, pm_message_t state, 1385 int (*cb)(struct device *dev, pm_message_t state), 1386 char *info) 1387 { 1388 int error; 1389 ktime_t calltime; 1390 1391 calltime = initcall_debug_start(dev); 1392 1393 trace_device_pm_callback_start(dev, info, state.event); 1394 error = cb(dev, state); 1395 trace_device_pm_callback_end(dev, error); 1396 suspend_report_result(cb, error); 1397 1398 initcall_debug_report(dev, calltime, error, state, info); 1399 1400 return error; 1401 } 1402 1403 static void dpm_clear_suppliers_direct_complete(struct device *dev) 1404 { 1405 struct device_link *link; 1406 int idx; 1407 1408 idx = device_links_read_lock(); 1409 1410 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 1411 spin_lock_irq(&link->supplier->power.lock); 1412 link->supplier->power.direct_complete = false; 1413 spin_unlock_irq(&link->supplier->power.lock); 1414 } 1415 1416 device_links_read_unlock(idx); 1417 } 1418 1419 /** 1420 * device_suspend - Execute "suspend" callbacks for given device. 1421 * @dev: Device to handle. 1422 * @state: PM transition of the system being carried out. 1423 * @async: If true, the device is being suspended asynchronously. 1424 */ 1425 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1426 { 1427 pm_callback_t callback = NULL; 1428 char *info = NULL; 1429 int error = 0; 1430 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1431 1432 TRACE_DEVICE(dev); 1433 TRACE_SUSPEND(0); 1434 1435 dpm_wait_for_subordinate(dev, async); 1436 1437 if (async_error) 1438 goto Complete; 1439 1440 /* 1441 * If a device configured to wake up the system from sleep states 1442 * has been suspended at run time and there's a resume request pending 1443 * for it, this is equivalent to the device signaling wakeup, so the 1444 * system suspend operation should be aborted. 1445 */ 1446 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1447 pm_wakeup_event(dev, 0); 1448 1449 if (pm_wakeup_pending()) { 1450 async_error = -EBUSY; 1451 goto Complete; 1452 } 1453 1454 if (dev->power.syscore) 1455 goto Complete; 1456 1457 if (dev->power.direct_complete) { 1458 if (pm_runtime_status_suspended(dev)) { 1459 pm_runtime_disable(dev); 1460 if (pm_runtime_status_suspended(dev)) 1461 goto Complete; 1462 1463 pm_runtime_enable(dev); 1464 } 1465 dev->power.direct_complete = false; 1466 } 1467 1468 dpm_watchdog_set(&wd, dev); 1469 device_lock(dev); 1470 1471 if (dev->pm_domain) { 1472 info = "power domain "; 1473 callback = pm_op(&dev->pm_domain->ops, state); 1474 goto Run; 1475 } 1476 1477 if (dev->type && dev->type->pm) { 1478 info = "type "; 1479 callback = pm_op(dev->type->pm, state); 1480 goto Run; 1481 } 1482 1483 if (dev->class) { 1484 if (dev->class->pm) { 1485 info = "class "; 1486 callback = pm_op(dev->class->pm, state); 1487 goto Run; 1488 } else if (dev->class->suspend) { 1489 pm_dev_dbg(dev, state, "legacy class "); 1490 error = legacy_suspend(dev, state, dev->class->suspend, 1491 "legacy class "); 1492 goto End; 1493 } 1494 } 1495 1496 if (dev->bus) { 1497 if (dev->bus->pm) { 1498 info = "bus "; 1499 callback = pm_op(dev->bus->pm, state); 1500 } else if (dev->bus->suspend) { 1501 pm_dev_dbg(dev, state, "legacy bus "); 1502 error = legacy_suspend(dev, state, dev->bus->suspend, 1503 "legacy bus "); 1504 goto End; 1505 } 1506 } 1507 1508 Run: 1509 if (!callback && dev->driver && dev->driver->pm) { 1510 info = "driver "; 1511 callback = pm_op(dev->driver->pm, state); 1512 } 1513 1514 error = dpm_run_callback(callback, dev, state, info); 1515 1516 End: 1517 if (!error) { 1518 struct device *parent = dev->parent; 1519 1520 dev->power.is_suspended = true; 1521 if (parent) { 1522 spin_lock_irq(&parent->power.lock); 1523 1524 dev->parent->power.direct_complete = false; 1525 if (dev->power.wakeup_path 1526 && !dev->parent->power.ignore_children) 1527 dev->parent->power.wakeup_path = true; 1528 1529 spin_unlock_irq(&parent->power.lock); 1530 } 1531 dpm_clear_suppliers_direct_complete(dev); 1532 } 1533 1534 device_unlock(dev); 1535 dpm_watchdog_clear(&wd); 1536 1537 Complete: 1538 if (error) 1539 async_error = error; 1540 1541 complete_all(&dev->power.completion); 1542 TRACE_SUSPEND(error); 1543 return error; 1544 } 1545 1546 static void async_suspend(void *data, async_cookie_t cookie) 1547 { 1548 struct device *dev = (struct device *)data; 1549 int error; 1550 1551 error = __device_suspend(dev, pm_transition, true); 1552 if (error) { 1553 dpm_save_failed_dev(dev_name(dev)); 1554 pm_dev_err(dev, pm_transition, " async", error); 1555 } 1556 1557 put_device(dev); 1558 } 1559 1560 static int device_suspend(struct device *dev) 1561 { 1562 reinit_completion(&dev->power.completion); 1563 1564 if (is_async(dev)) { 1565 get_device(dev); 1566 async_schedule(async_suspend, dev); 1567 return 0; 1568 } 1569 1570 return __device_suspend(dev, pm_transition, false); 1571 } 1572 1573 /** 1574 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1575 * @state: PM transition of the system being carried out. 1576 */ 1577 int dpm_suspend(pm_message_t state) 1578 { 1579 ktime_t starttime = ktime_get(); 1580 int error = 0; 1581 1582 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1583 might_sleep(); 1584 1585 cpufreq_suspend(); 1586 1587 mutex_lock(&dpm_list_mtx); 1588 pm_transition = state; 1589 async_error = 0; 1590 while (!list_empty(&dpm_prepared_list)) { 1591 struct device *dev = to_device(dpm_prepared_list.prev); 1592 1593 get_device(dev); 1594 mutex_unlock(&dpm_list_mtx); 1595 1596 error = device_suspend(dev); 1597 1598 mutex_lock(&dpm_list_mtx); 1599 if (error) { 1600 pm_dev_err(dev, state, "", error); 1601 dpm_save_failed_dev(dev_name(dev)); 1602 put_device(dev); 1603 break; 1604 } 1605 if (!list_empty(&dev->power.entry)) 1606 list_move(&dev->power.entry, &dpm_suspended_list); 1607 put_device(dev); 1608 if (async_error) 1609 break; 1610 } 1611 mutex_unlock(&dpm_list_mtx); 1612 async_synchronize_full(); 1613 if (!error) 1614 error = async_error; 1615 if (error) { 1616 suspend_stats.failed_suspend++; 1617 dpm_save_failed_step(SUSPEND_SUSPEND); 1618 } else 1619 dpm_show_time(starttime, state, NULL); 1620 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1621 return error; 1622 } 1623 1624 /** 1625 * device_prepare - Prepare a device for system power transition. 1626 * @dev: Device to handle. 1627 * @state: PM transition of the system being carried out. 1628 * 1629 * Execute the ->prepare() callback(s) for given device. No new children of the 1630 * device may be registered after this function has returned. 1631 */ 1632 static int device_prepare(struct device *dev, pm_message_t state) 1633 { 1634 int (*callback)(struct device *) = NULL; 1635 int ret = 0; 1636 1637 if (dev->power.syscore) 1638 return 0; 1639 1640 /* 1641 * If a device's parent goes into runtime suspend at the wrong time, 1642 * it won't be possible to resume the device. To prevent this we 1643 * block runtime suspend here, during the prepare phase, and allow 1644 * it again during the complete phase. 1645 */ 1646 pm_runtime_get_noresume(dev); 1647 1648 device_lock(dev); 1649 1650 dev->power.wakeup_path = device_may_wakeup(dev); 1651 1652 if (dev->power.no_pm_callbacks) { 1653 ret = 1; /* Let device go direct_complete */ 1654 goto unlock; 1655 } 1656 1657 if (dev->pm_domain) 1658 callback = dev->pm_domain->ops.prepare; 1659 else if (dev->type && dev->type->pm) 1660 callback = dev->type->pm->prepare; 1661 else if (dev->class && dev->class->pm) 1662 callback = dev->class->pm->prepare; 1663 else if (dev->bus && dev->bus->pm) 1664 callback = dev->bus->pm->prepare; 1665 1666 if (!callback && dev->driver && dev->driver->pm) 1667 callback = dev->driver->pm->prepare; 1668 1669 if (callback) 1670 ret = callback(dev); 1671 1672 unlock: 1673 device_unlock(dev); 1674 1675 if (ret < 0) { 1676 suspend_report_result(callback, ret); 1677 pm_runtime_put(dev); 1678 return ret; 1679 } 1680 /* 1681 * A positive return value from ->prepare() means "this device appears 1682 * to be runtime-suspended and its state is fine, so if it really is 1683 * runtime-suspended, you can leave it in that state provided that you 1684 * will do the same thing with all of its descendants". This only 1685 * applies to suspend transitions, however. 1686 */ 1687 spin_lock_irq(&dev->power.lock); 1688 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; 1689 spin_unlock_irq(&dev->power.lock); 1690 return 0; 1691 } 1692 1693 /** 1694 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1695 * @state: PM transition of the system being carried out. 1696 * 1697 * Execute the ->prepare() callback(s) for all devices. 1698 */ 1699 int dpm_prepare(pm_message_t state) 1700 { 1701 int error = 0; 1702 1703 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 1704 might_sleep(); 1705 1706 /* 1707 * Give a chance for the known devices to complete their probes, before 1708 * disable probing of devices. This sync point is important at least 1709 * at boot time + hibernation restore. 1710 */ 1711 wait_for_device_probe(); 1712 /* 1713 * It is unsafe if probing of devices will happen during suspend or 1714 * hibernation and system behavior will be unpredictable in this case. 1715 * So, let's prohibit device's probing here and defer their probes 1716 * instead. The normal behavior will be restored in dpm_complete(). 1717 */ 1718 device_block_probing(); 1719 1720 mutex_lock(&dpm_list_mtx); 1721 while (!list_empty(&dpm_list)) { 1722 struct device *dev = to_device(dpm_list.next); 1723 1724 get_device(dev); 1725 mutex_unlock(&dpm_list_mtx); 1726 1727 trace_device_pm_callback_start(dev, "", state.event); 1728 error = device_prepare(dev, state); 1729 trace_device_pm_callback_end(dev, error); 1730 1731 mutex_lock(&dpm_list_mtx); 1732 if (error) { 1733 if (error == -EAGAIN) { 1734 put_device(dev); 1735 error = 0; 1736 continue; 1737 } 1738 printk(KERN_INFO "PM: Device %s not prepared " 1739 "for power transition: code %d\n", 1740 dev_name(dev), error); 1741 put_device(dev); 1742 break; 1743 } 1744 dev->power.is_prepared = true; 1745 if (!list_empty(&dev->power.entry)) 1746 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1747 put_device(dev); 1748 } 1749 mutex_unlock(&dpm_list_mtx); 1750 trace_suspend_resume(TPS("dpm_prepare"), state.event, false); 1751 return error; 1752 } 1753 1754 /** 1755 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1756 * @state: PM transition of the system being carried out. 1757 * 1758 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1759 * callbacks for them. 1760 */ 1761 int dpm_suspend_start(pm_message_t state) 1762 { 1763 int error; 1764 1765 error = dpm_prepare(state); 1766 if (error) { 1767 suspend_stats.failed_prepare++; 1768 dpm_save_failed_step(SUSPEND_PREPARE); 1769 } else 1770 error = dpm_suspend(state); 1771 return error; 1772 } 1773 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1774 1775 void __suspend_report_result(const char *function, void *fn, int ret) 1776 { 1777 if (ret) 1778 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1779 } 1780 EXPORT_SYMBOL_GPL(__suspend_report_result); 1781 1782 /** 1783 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1784 * @dev: Device to wait for. 1785 * @subordinate: Device that needs to wait for @dev. 1786 */ 1787 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1788 { 1789 dpm_wait(dev, subordinate->power.async_suspend); 1790 return async_error; 1791 } 1792 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1793 1794 /** 1795 * dpm_for_each_dev - device iterator. 1796 * @data: data for the callback. 1797 * @fn: function to be called for each device. 1798 * 1799 * Iterate over devices in dpm_list, and call @fn for each device, 1800 * passing it @data. 1801 */ 1802 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 1803 { 1804 struct device *dev; 1805 1806 if (!fn) 1807 return; 1808 1809 device_pm_lock(); 1810 list_for_each_entry(dev, &dpm_list, power.entry) 1811 fn(dev, data); 1812 device_pm_unlock(); 1813 } 1814 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 1815 1816 static bool pm_ops_is_empty(const struct dev_pm_ops *ops) 1817 { 1818 if (!ops) 1819 return true; 1820 1821 return !ops->prepare && 1822 !ops->suspend && 1823 !ops->suspend_late && 1824 !ops->suspend_noirq && 1825 !ops->resume_noirq && 1826 !ops->resume_early && 1827 !ops->resume && 1828 !ops->complete; 1829 } 1830 1831 void device_pm_check_callbacks(struct device *dev) 1832 { 1833 spin_lock_irq(&dev->power.lock); 1834 dev->power.no_pm_callbacks = 1835 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && 1836 (!dev->class || pm_ops_is_empty(dev->class->pm)) && 1837 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 1838 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 1839 (!dev->driver || pm_ops_is_empty(dev->driver->pm)); 1840 spin_unlock_irq(&dev->power.lock); 1841 } 1842