1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/pm-trace.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/interrupt.h> 29 #include <linux/sched.h> 30 #include <linux/sched/debug.h> 31 #include <linux/async.h> 32 #include <linux/suspend.h> 33 #include <trace/events/power.h> 34 #include <linux/cpufreq.h> 35 #include <linux/cpuidle.h> 36 #include <linux/timer.h> 37 38 #include "../base.h" 39 #include "power.h" 40 41 typedef int (*pm_callback_t)(struct device *); 42 43 /* 44 * The entries in the dpm_list list are in a depth first order, simply 45 * because children are guaranteed to be discovered after parents, and 46 * are inserted at the back of the list on discovery. 47 * 48 * Since device_pm_add() may be called with a device lock held, 49 * we must never try to acquire a device lock while holding 50 * dpm_list_mutex. 51 */ 52 53 LIST_HEAD(dpm_list); 54 static LIST_HEAD(dpm_prepared_list); 55 static LIST_HEAD(dpm_suspended_list); 56 static LIST_HEAD(dpm_late_early_list); 57 static LIST_HEAD(dpm_noirq_list); 58 59 struct suspend_stats suspend_stats; 60 static DEFINE_MUTEX(dpm_list_mtx); 61 static pm_message_t pm_transition; 62 63 static int async_error; 64 65 static const char *pm_verb(int event) 66 { 67 switch (event) { 68 case PM_EVENT_SUSPEND: 69 return "suspend"; 70 case PM_EVENT_RESUME: 71 return "resume"; 72 case PM_EVENT_FREEZE: 73 return "freeze"; 74 case PM_EVENT_QUIESCE: 75 return "quiesce"; 76 case PM_EVENT_HIBERNATE: 77 return "hibernate"; 78 case PM_EVENT_THAW: 79 return "thaw"; 80 case PM_EVENT_RESTORE: 81 return "restore"; 82 case PM_EVENT_RECOVER: 83 return "recover"; 84 default: 85 return "(unknown PM event)"; 86 } 87 } 88 89 /** 90 * device_pm_sleep_init - Initialize system suspend-related device fields. 91 * @dev: Device object being initialized. 92 */ 93 void device_pm_sleep_init(struct device *dev) 94 { 95 dev->power.is_prepared = false; 96 dev->power.is_suspended = false; 97 dev->power.is_noirq_suspended = false; 98 dev->power.is_late_suspended = false; 99 init_completion(&dev->power.completion); 100 complete_all(&dev->power.completion); 101 dev->power.wakeup = NULL; 102 INIT_LIST_HEAD(&dev->power.entry); 103 } 104 105 /** 106 * device_pm_lock - Lock the list of active devices used by the PM core. 107 */ 108 void device_pm_lock(void) 109 { 110 mutex_lock(&dpm_list_mtx); 111 } 112 113 /** 114 * device_pm_unlock - Unlock the list of active devices used by the PM core. 115 */ 116 void device_pm_unlock(void) 117 { 118 mutex_unlock(&dpm_list_mtx); 119 } 120 121 /** 122 * device_pm_add - Add a device to the PM core's list of active devices. 123 * @dev: Device to add to the list. 124 */ 125 void device_pm_add(struct device *dev) 126 { 127 pr_debug("PM: Adding info for %s:%s\n", 128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 129 device_pm_check_callbacks(dev); 130 mutex_lock(&dpm_list_mtx); 131 if (dev->parent && dev->parent->power.is_prepared) 132 dev_warn(dev, "parent %s should not be sleeping\n", 133 dev_name(dev->parent)); 134 list_add_tail(&dev->power.entry, &dpm_list); 135 dev->power.in_dpm_list = true; 136 mutex_unlock(&dpm_list_mtx); 137 } 138 139 /** 140 * device_pm_remove - Remove a device from the PM core's list of active devices. 141 * @dev: Device to be removed from the list. 142 */ 143 void device_pm_remove(struct device *dev) 144 { 145 pr_debug("PM: Removing info for %s:%s\n", 146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 147 complete_all(&dev->power.completion); 148 mutex_lock(&dpm_list_mtx); 149 list_del_init(&dev->power.entry); 150 dev->power.in_dpm_list = false; 151 mutex_unlock(&dpm_list_mtx); 152 device_wakeup_disable(dev); 153 pm_runtime_remove(dev); 154 device_pm_check_callbacks(dev); 155 } 156 157 /** 158 * device_pm_move_before - Move device in the PM core's list of active devices. 159 * @deva: Device to move in dpm_list. 160 * @devb: Device @deva should come before. 161 */ 162 void device_pm_move_before(struct device *deva, struct device *devb) 163 { 164 pr_debug("PM: Moving %s:%s before %s:%s\n", 165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 167 /* Delete deva from dpm_list and reinsert before devb. */ 168 list_move_tail(&deva->power.entry, &devb->power.entry); 169 } 170 171 /** 172 * device_pm_move_after - Move device in the PM core's list of active devices. 173 * @deva: Device to move in dpm_list. 174 * @devb: Device @deva should come after. 175 */ 176 void device_pm_move_after(struct device *deva, struct device *devb) 177 { 178 pr_debug("PM: Moving %s:%s after %s:%s\n", 179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 181 /* Delete deva from dpm_list and reinsert after devb. */ 182 list_move(&deva->power.entry, &devb->power.entry); 183 } 184 185 /** 186 * device_pm_move_last - Move device to end of the PM core's list of devices. 187 * @dev: Device to move in dpm_list. 188 */ 189 void device_pm_move_last(struct device *dev) 190 { 191 pr_debug("PM: Moving %s:%s to end of list\n", 192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 193 list_move_tail(&dev->power.entry, &dpm_list); 194 } 195 196 static ktime_t initcall_debug_start(struct device *dev) 197 { 198 ktime_t calltime = 0; 199 200 if (pm_print_times_enabled) { 201 pr_info("calling %s+ @ %i, parent: %s\n", 202 dev_name(dev), task_pid_nr(current), 203 dev->parent ? dev_name(dev->parent) : "none"); 204 calltime = ktime_get(); 205 } 206 207 return calltime; 208 } 209 210 static void initcall_debug_report(struct device *dev, ktime_t calltime, 211 int error, pm_message_t state, 212 const char *info) 213 { 214 ktime_t rettime; 215 s64 nsecs; 216 217 rettime = ktime_get(); 218 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 219 220 if (pm_print_times_enabled) { 221 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 222 error, (unsigned long long)nsecs >> 10); 223 } 224 } 225 226 /** 227 * dpm_wait - Wait for a PM operation to complete. 228 * @dev: Device to wait for. 229 * @async: If unset, wait only if the device's power.async_suspend flag is set. 230 */ 231 static void dpm_wait(struct device *dev, bool async) 232 { 233 if (!dev) 234 return; 235 236 if (async || (pm_async_enabled && dev->power.async_suspend)) 237 wait_for_completion(&dev->power.completion); 238 } 239 240 static int dpm_wait_fn(struct device *dev, void *async_ptr) 241 { 242 dpm_wait(dev, *((bool *)async_ptr)); 243 return 0; 244 } 245 246 static void dpm_wait_for_children(struct device *dev, bool async) 247 { 248 device_for_each_child(dev, &async, dpm_wait_fn); 249 } 250 251 static void dpm_wait_for_suppliers(struct device *dev, bool async) 252 { 253 struct device_link *link; 254 int idx; 255 256 idx = device_links_read_lock(); 257 258 /* 259 * If the supplier goes away right after we've checked the link to it, 260 * we'll wait for its completion to change the state, but that's fine, 261 * because the only things that will block as a result are the SRCU 262 * callbacks freeing the link objects for the links in the list we're 263 * walking. 264 */ 265 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 266 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 267 dpm_wait(link->supplier, async); 268 269 device_links_read_unlock(idx); 270 } 271 272 static void dpm_wait_for_superior(struct device *dev, bool async) 273 { 274 dpm_wait(dev->parent, async); 275 dpm_wait_for_suppliers(dev, async); 276 } 277 278 static void dpm_wait_for_consumers(struct device *dev, bool async) 279 { 280 struct device_link *link; 281 int idx; 282 283 idx = device_links_read_lock(); 284 285 /* 286 * The status of a device link can only be changed from "dormant" by a 287 * probe, but that cannot happen during system suspend/resume. In 288 * theory it can change to "dormant" at that time, but then it is 289 * reasonable to wait for the target device anyway (eg. if it goes 290 * away, it's better to wait for it to go away completely and then 291 * continue instead of trying to continue in parallel with its 292 * unregistration). 293 */ 294 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) 295 if (READ_ONCE(link->status) != DL_STATE_DORMANT) 296 dpm_wait(link->consumer, async); 297 298 device_links_read_unlock(idx); 299 } 300 301 static void dpm_wait_for_subordinate(struct device *dev, bool async) 302 { 303 dpm_wait_for_children(dev, async); 304 dpm_wait_for_consumers(dev, async); 305 } 306 307 /** 308 * pm_op - Return the PM operation appropriate for given PM event. 309 * @ops: PM operations to choose from. 310 * @state: PM transition of the system being carried out. 311 */ 312 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 313 { 314 switch (state.event) { 315 #ifdef CONFIG_SUSPEND 316 case PM_EVENT_SUSPEND: 317 return ops->suspend; 318 case PM_EVENT_RESUME: 319 return ops->resume; 320 #endif /* CONFIG_SUSPEND */ 321 #ifdef CONFIG_HIBERNATE_CALLBACKS 322 case PM_EVENT_FREEZE: 323 case PM_EVENT_QUIESCE: 324 return ops->freeze; 325 case PM_EVENT_HIBERNATE: 326 return ops->poweroff; 327 case PM_EVENT_THAW: 328 case PM_EVENT_RECOVER: 329 return ops->thaw; 330 break; 331 case PM_EVENT_RESTORE: 332 return ops->restore; 333 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 334 } 335 336 return NULL; 337 } 338 339 /** 340 * pm_late_early_op - Return the PM operation appropriate for given PM event. 341 * @ops: PM operations to choose from. 342 * @state: PM transition of the system being carried out. 343 * 344 * Runtime PM is disabled for @dev while this function is being executed. 345 */ 346 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 347 pm_message_t state) 348 { 349 switch (state.event) { 350 #ifdef CONFIG_SUSPEND 351 case PM_EVENT_SUSPEND: 352 return ops->suspend_late; 353 case PM_EVENT_RESUME: 354 return ops->resume_early; 355 #endif /* CONFIG_SUSPEND */ 356 #ifdef CONFIG_HIBERNATE_CALLBACKS 357 case PM_EVENT_FREEZE: 358 case PM_EVENT_QUIESCE: 359 return ops->freeze_late; 360 case PM_EVENT_HIBERNATE: 361 return ops->poweroff_late; 362 case PM_EVENT_THAW: 363 case PM_EVENT_RECOVER: 364 return ops->thaw_early; 365 case PM_EVENT_RESTORE: 366 return ops->restore_early; 367 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 368 } 369 370 return NULL; 371 } 372 373 /** 374 * pm_noirq_op - Return the PM operation appropriate for given PM event. 375 * @ops: PM operations to choose from. 376 * @state: PM transition of the system being carried out. 377 * 378 * The driver of @dev will not receive interrupts while this function is being 379 * executed. 380 */ 381 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 382 { 383 switch (state.event) { 384 #ifdef CONFIG_SUSPEND 385 case PM_EVENT_SUSPEND: 386 return ops->suspend_noirq; 387 case PM_EVENT_RESUME: 388 return ops->resume_noirq; 389 #endif /* CONFIG_SUSPEND */ 390 #ifdef CONFIG_HIBERNATE_CALLBACKS 391 case PM_EVENT_FREEZE: 392 case PM_EVENT_QUIESCE: 393 return ops->freeze_noirq; 394 case PM_EVENT_HIBERNATE: 395 return ops->poweroff_noirq; 396 case PM_EVENT_THAW: 397 case PM_EVENT_RECOVER: 398 return ops->thaw_noirq; 399 case PM_EVENT_RESTORE: 400 return ops->restore_noirq; 401 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 402 } 403 404 return NULL; 405 } 406 407 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) 408 { 409 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 410 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 411 ", may wakeup" : ""); 412 } 413 414 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, 415 int error) 416 { 417 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 418 dev_name(dev), pm_verb(state.event), info, error); 419 } 420 421 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, 422 const char *info) 423 { 424 ktime_t calltime; 425 u64 usecs64; 426 int usecs; 427 428 calltime = ktime_get(); 429 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 430 do_div(usecs64, NSEC_PER_USEC); 431 usecs = usecs64; 432 if (usecs == 0) 433 usecs = 1; 434 435 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", 436 info ?: "", info ? " " : "", pm_verb(state.event), 437 error ? "aborted" : "complete", 438 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 439 } 440 441 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 442 pm_message_t state, const char *info) 443 { 444 ktime_t calltime; 445 int error; 446 447 if (!cb) 448 return 0; 449 450 calltime = initcall_debug_start(dev); 451 452 pm_dev_dbg(dev, state, info); 453 trace_device_pm_callback_start(dev, info, state.event); 454 error = cb(dev); 455 trace_device_pm_callback_end(dev, error); 456 suspend_report_result(cb, error); 457 458 initcall_debug_report(dev, calltime, error, state, info); 459 460 return error; 461 } 462 463 #ifdef CONFIG_DPM_WATCHDOG 464 struct dpm_watchdog { 465 struct device *dev; 466 struct task_struct *tsk; 467 struct timer_list timer; 468 }; 469 470 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 471 struct dpm_watchdog wd 472 473 /** 474 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 475 * @data: Watchdog object address. 476 * 477 * Called when a driver has timed out suspending or resuming. 478 * There's not much we can do here to recover so panic() to 479 * capture a crash-dump in pstore. 480 */ 481 static void dpm_watchdog_handler(struct timer_list *t) 482 { 483 struct dpm_watchdog *wd = from_timer(wd, t, timer); 484 485 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 486 show_stack(wd->tsk, NULL); 487 panic("%s %s: unrecoverable failure\n", 488 dev_driver_string(wd->dev), dev_name(wd->dev)); 489 } 490 491 /** 492 * dpm_watchdog_set - Enable pm watchdog for given device. 493 * @wd: Watchdog. Must be allocated on the stack. 494 * @dev: Device to handle. 495 */ 496 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 497 { 498 struct timer_list *timer = &wd->timer; 499 500 wd->dev = dev; 501 wd->tsk = current; 502 503 timer_setup_on_stack(timer, dpm_watchdog_handler, 0); 504 /* use same timeout value for both suspend and resume */ 505 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 506 add_timer(timer); 507 } 508 509 /** 510 * dpm_watchdog_clear - Disable suspend/resume watchdog. 511 * @wd: Watchdog to disable. 512 */ 513 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 514 { 515 struct timer_list *timer = &wd->timer; 516 517 del_timer_sync(timer); 518 destroy_timer_on_stack(timer); 519 } 520 #else 521 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 522 #define dpm_watchdog_set(x, y) 523 #define dpm_watchdog_clear(x) 524 #endif 525 526 /*------------------------- Resume routines -------------------------*/ 527 528 /** 529 * device_resume_noirq - Execute an "early resume" callback for given device. 530 * @dev: Device to handle. 531 * @state: PM transition of the system being carried out. 532 * @async: If true, the device is being resumed asynchronously. 533 * 534 * The driver of @dev will not receive interrupts while this function is being 535 * executed. 536 */ 537 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) 538 { 539 pm_callback_t callback = NULL; 540 const char *info = NULL; 541 int error = 0; 542 543 TRACE_DEVICE(dev); 544 TRACE_RESUME(0); 545 546 if (dev->power.syscore || dev->power.direct_complete) 547 goto Out; 548 549 if (!dev->power.is_noirq_suspended) 550 goto Out; 551 552 dpm_wait_for_superior(dev, async); 553 554 if (dev->pm_domain) { 555 info = "noirq power domain "; 556 callback = pm_noirq_op(&dev->pm_domain->ops, state); 557 } else if (dev->type && dev->type->pm) { 558 info = "noirq type "; 559 callback = pm_noirq_op(dev->type->pm, state); 560 } else if (dev->class && dev->class->pm) { 561 info = "noirq class "; 562 callback = pm_noirq_op(dev->class->pm, state); 563 } else if (dev->bus && dev->bus->pm) { 564 info = "noirq bus "; 565 callback = pm_noirq_op(dev->bus->pm, state); 566 } 567 568 if (!callback && dev->driver && dev->driver->pm) { 569 info = "noirq driver "; 570 callback = pm_noirq_op(dev->driver->pm, state); 571 } 572 573 error = dpm_run_callback(callback, dev, state, info); 574 dev->power.is_noirq_suspended = false; 575 576 Out: 577 complete_all(&dev->power.completion); 578 TRACE_RESUME(error); 579 return error; 580 } 581 582 static bool is_async(struct device *dev) 583 { 584 return dev->power.async_suspend && pm_async_enabled 585 && !pm_trace_is_enabled(); 586 } 587 588 static void async_resume_noirq(void *data, async_cookie_t cookie) 589 { 590 struct device *dev = (struct device *)data; 591 int error; 592 593 error = device_resume_noirq(dev, pm_transition, true); 594 if (error) 595 pm_dev_err(dev, pm_transition, " async", error); 596 597 put_device(dev); 598 } 599 600 void dpm_noirq_resume_devices(pm_message_t state) 601 { 602 struct device *dev; 603 ktime_t starttime = ktime_get(); 604 605 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); 606 mutex_lock(&dpm_list_mtx); 607 pm_transition = state; 608 609 /* 610 * Advanced the async threads upfront, 611 * in case the starting of async threads is 612 * delayed by non-async resuming devices. 613 */ 614 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 615 reinit_completion(&dev->power.completion); 616 if (is_async(dev)) { 617 get_device(dev); 618 async_schedule(async_resume_noirq, dev); 619 } 620 } 621 622 while (!list_empty(&dpm_noirq_list)) { 623 dev = to_device(dpm_noirq_list.next); 624 get_device(dev); 625 list_move_tail(&dev->power.entry, &dpm_late_early_list); 626 mutex_unlock(&dpm_list_mtx); 627 628 if (!is_async(dev)) { 629 int error; 630 631 error = device_resume_noirq(dev, state, false); 632 if (error) { 633 suspend_stats.failed_resume_noirq++; 634 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 635 dpm_save_failed_dev(dev_name(dev)); 636 pm_dev_err(dev, state, " noirq", error); 637 } 638 } 639 640 mutex_lock(&dpm_list_mtx); 641 put_device(dev); 642 } 643 mutex_unlock(&dpm_list_mtx); 644 async_synchronize_full(); 645 dpm_show_time(starttime, state, 0, "noirq"); 646 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 647 } 648 649 void dpm_noirq_end(void) 650 { 651 resume_device_irqs(); 652 device_wakeup_disarm_wake_irqs(); 653 cpuidle_resume(); 654 } 655 656 /** 657 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 658 * @state: PM transition of the system being carried out. 659 * 660 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and 661 * allow device drivers' interrupt handlers to be called. 662 */ 663 void dpm_resume_noirq(pm_message_t state) 664 { 665 dpm_noirq_resume_devices(state); 666 dpm_noirq_end(); 667 } 668 669 /** 670 * device_resume_early - Execute an "early resume" callback for given device. 671 * @dev: Device to handle. 672 * @state: PM transition of the system being carried out. 673 * @async: If true, the device is being resumed asynchronously. 674 * 675 * Runtime PM is disabled for @dev while this function is being executed. 676 */ 677 static int device_resume_early(struct device *dev, pm_message_t state, bool async) 678 { 679 pm_callback_t callback = NULL; 680 const char *info = NULL; 681 int error = 0; 682 683 TRACE_DEVICE(dev); 684 TRACE_RESUME(0); 685 686 if (dev->power.syscore || dev->power.direct_complete) 687 goto Out; 688 689 if (!dev->power.is_late_suspended) 690 goto Out; 691 692 dpm_wait_for_superior(dev, async); 693 694 if (dev->pm_domain) { 695 info = "early power domain "; 696 callback = pm_late_early_op(&dev->pm_domain->ops, state); 697 } else if (dev->type && dev->type->pm) { 698 info = "early type "; 699 callback = pm_late_early_op(dev->type->pm, state); 700 } else if (dev->class && dev->class->pm) { 701 info = "early class "; 702 callback = pm_late_early_op(dev->class->pm, state); 703 } else if (dev->bus && dev->bus->pm) { 704 info = "early bus "; 705 callback = pm_late_early_op(dev->bus->pm, state); 706 } 707 708 if (!callback && dev->driver && dev->driver->pm) { 709 info = "early driver "; 710 callback = pm_late_early_op(dev->driver->pm, state); 711 } 712 713 error = dpm_run_callback(callback, dev, state, info); 714 dev->power.is_late_suspended = false; 715 716 Out: 717 TRACE_RESUME(error); 718 719 pm_runtime_enable(dev); 720 complete_all(&dev->power.completion); 721 return error; 722 } 723 724 static void async_resume_early(void *data, async_cookie_t cookie) 725 { 726 struct device *dev = (struct device *)data; 727 int error; 728 729 error = device_resume_early(dev, pm_transition, true); 730 if (error) 731 pm_dev_err(dev, pm_transition, " async", error); 732 733 put_device(dev); 734 } 735 736 /** 737 * dpm_resume_early - Execute "early resume" callbacks for all devices. 738 * @state: PM transition of the system being carried out. 739 */ 740 void dpm_resume_early(pm_message_t state) 741 { 742 struct device *dev; 743 ktime_t starttime = ktime_get(); 744 745 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); 746 mutex_lock(&dpm_list_mtx); 747 pm_transition = state; 748 749 /* 750 * Advanced the async threads upfront, 751 * in case the starting of async threads is 752 * delayed by non-async resuming devices. 753 */ 754 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 755 reinit_completion(&dev->power.completion); 756 if (is_async(dev)) { 757 get_device(dev); 758 async_schedule(async_resume_early, dev); 759 } 760 } 761 762 while (!list_empty(&dpm_late_early_list)) { 763 dev = to_device(dpm_late_early_list.next); 764 get_device(dev); 765 list_move_tail(&dev->power.entry, &dpm_suspended_list); 766 mutex_unlock(&dpm_list_mtx); 767 768 if (!is_async(dev)) { 769 int error; 770 771 error = device_resume_early(dev, state, false); 772 if (error) { 773 suspend_stats.failed_resume_early++; 774 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 775 dpm_save_failed_dev(dev_name(dev)); 776 pm_dev_err(dev, state, " early", error); 777 } 778 } 779 mutex_lock(&dpm_list_mtx); 780 put_device(dev); 781 } 782 mutex_unlock(&dpm_list_mtx); 783 async_synchronize_full(); 784 dpm_show_time(starttime, state, 0, "early"); 785 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 786 } 787 788 /** 789 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 790 * @state: PM transition of the system being carried out. 791 */ 792 void dpm_resume_start(pm_message_t state) 793 { 794 dpm_resume_noirq(state); 795 dpm_resume_early(state); 796 } 797 EXPORT_SYMBOL_GPL(dpm_resume_start); 798 799 /** 800 * device_resume - Execute "resume" callbacks for given device. 801 * @dev: Device to handle. 802 * @state: PM transition of the system being carried out. 803 * @async: If true, the device is being resumed asynchronously. 804 */ 805 static int device_resume(struct device *dev, pm_message_t state, bool async) 806 { 807 pm_callback_t callback = NULL; 808 const char *info = NULL; 809 int error = 0; 810 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 811 812 TRACE_DEVICE(dev); 813 TRACE_RESUME(0); 814 815 if (dev->power.syscore) 816 goto Complete; 817 818 if (dev->power.direct_complete) { 819 /* Match the pm_runtime_disable() in __device_suspend(). */ 820 pm_runtime_enable(dev); 821 goto Complete; 822 } 823 824 dpm_wait_for_superior(dev, async); 825 dpm_watchdog_set(&wd, dev); 826 device_lock(dev); 827 828 /* 829 * This is a fib. But we'll allow new children to be added below 830 * a resumed device, even if the device hasn't been completed yet. 831 */ 832 dev->power.is_prepared = false; 833 834 if (!dev->power.is_suspended) 835 goto Unlock; 836 837 if (dev->pm_domain) { 838 info = "power domain "; 839 callback = pm_op(&dev->pm_domain->ops, state); 840 goto Driver; 841 } 842 843 if (dev->type && dev->type->pm) { 844 info = "type "; 845 callback = pm_op(dev->type->pm, state); 846 goto Driver; 847 } 848 849 if (dev->class) { 850 if (dev->class->pm) { 851 info = "class "; 852 callback = pm_op(dev->class->pm, state); 853 goto Driver; 854 } else if (dev->class->resume) { 855 info = "legacy class "; 856 callback = dev->class->resume; 857 goto End; 858 } 859 } 860 861 if (dev->bus) { 862 if (dev->bus->pm) { 863 info = "bus "; 864 callback = pm_op(dev->bus->pm, state); 865 } else if (dev->bus->resume) { 866 info = "legacy bus "; 867 callback = dev->bus->resume; 868 goto End; 869 } 870 } 871 872 Driver: 873 if (!callback && dev->driver && dev->driver->pm) { 874 info = "driver "; 875 callback = pm_op(dev->driver->pm, state); 876 } 877 878 End: 879 error = dpm_run_callback(callback, dev, state, info); 880 dev->power.is_suspended = false; 881 882 Unlock: 883 device_unlock(dev); 884 dpm_watchdog_clear(&wd); 885 886 Complete: 887 complete_all(&dev->power.completion); 888 889 TRACE_RESUME(error); 890 891 return error; 892 } 893 894 static void async_resume(void *data, async_cookie_t cookie) 895 { 896 struct device *dev = (struct device *)data; 897 int error; 898 899 error = device_resume(dev, pm_transition, true); 900 if (error) 901 pm_dev_err(dev, pm_transition, " async", error); 902 put_device(dev); 903 } 904 905 /** 906 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 907 * @state: PM transition of the system being carried out. 908 * 909 * Execute the appropriate "resume" callback for all devices whose status 910 * indicates that they are suspended. 911 */ 912 void dpm_resume(pm_message_t state) 913 { 914 struct device *dev; 915 ktime_t starttime = ktime_get(); 916 917 trace_suspend_resume(TPS("dpm_resume"), state.event, true); 918 might_sleep(); 919 920 mutex_lock(&dpm_list_mtx); 921 pm_transition = state; 922 async_error = 0; 923 924 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 925 reinit_completion(&dev->power.completion); 926 if (is_async(dev)) { 927 get_device(dev); 928 async_schedule(async_resume, dev); 929 } 930 } 931 932 while (!list_empty(&dpm_suspended_list)) { 933 dev = to_device(dpm_suspended_list.next); 934 get_device(dev); 935 if (!is_async(dev)) { 936 int error; 937 938 mutex_unlock(&dpm_list_mtx); 939 940 error = device_resume(dev, state, false); 941 if (error) { 942 suspend_stats.failed_resume++; 943 dpm_save_failed_step(SUSPEND_RESUME); 944 dpm_save_failed_dev(dev_name(dev)); 945 pm_dev_err(dev, state, "", error); 946 } 947 948 mutex_lock(&dpm_list_mtx); 949 } 950 if (!list_empty(&dev->power.entry)) 951 list_move_tail(&dev->power.entry, &dpm_prepared_list); 952 put_device(dev); 953 } 954 mutex_unlock(&dpm_list_mtx); 955 async_synchronize_full(); 956 dpm_show_time(starttime, state, 0, NULL); 957 958 cpufreq_resume(); 959 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 960 } 961 962 /** 963 * device_complete - Complete a PM transition for given device. 964 * @dev: Device to handle. 965 * @state: PM transition of the system being carried out. 966 */ 967 static void device_complete(struct device *dev, pm_message_t state) 968 { 969 void (*callback)(struct device *) = NULL; 970 const char *info = NULL; 971 972 if (dev->power.syscore) 973 return; 974 975 device_lock(dev); 976 977 if (dev->pm_domain) { 978 info = "completing power domain "; 979 callback = dev->pm_domain->ops.complete; 980 } else if (dev->type && dev->type->pm) { 981 info = "completing type "; 982 callback = dev->type->pm->complete; 983 } else if (dev->class && dev->class->pm) { 984 info = "completing class "; 985 callback = dev->class->pm->complete; 986 } else if (dev->bus && dev->bus->pm) { 987 info = "completing bus "; 988 callback = dev->bus->pm->complete; 989 } 990 991 if (!callback && dev->driver && dev->driver->pm) { 992 info = "completing driver "; 993 callback = dev->driver->pm->complete; 994 } 995 996 if (callback) { 997 pm_dev_dbg(dev, state, info); 998 callback(dev); 999 } 1000 1001 device_unlock(dev); 1002 1003 pm_runtime_put(dev); 1004 } 1005 1006 /** 1007 * dpm_complete - Complete a PM transition for all non-sysdev devices. 1008 * @state: PM transition of the system being carried out. 1009 * 1010 * Execute the ->complete() callbacks for all devices whose PM status is not 1011 * DPM_ON (this allows new devices to be registered). 1012 */ 1013 void dpm_complete(pm_message_t state) 1014 { 1015 struct list_head list; 1016 1017 trace_suspend_resume(TPS("dpm_complete"), state.event, true); 1018 might_sleep(); 1019 1020 INIT_LIST_HEAD(&list); 1021 mutex_lock(&dpm_list_mtx); 1022 while (!list_empty(&dpm_prepared_list)) { 1023 struct device *dev = to_device(dpm_prepared_list.prev); 1024 1025 get_device(dev); 1026 dev->power.is_prepared = false; 1027 list_move(&dev->power.entry, &list); 1028 mutex_unlock(&dpm_list_mtx); 1029 1030 trace_device_pm_callback_start(dev, "", state.event); 1031 device_complete(dev, state); 1032 trace_device_pm_callback_end(dev, 0); 1033 1034 mutex_lock(&dpm_list_mtx); 1035 put_device(dev); 1036 } 1037 list_splice(&list, &dpm_list); 1038 mutex_unlock(&dpm_list_mtx); 1039 1040 /* Allow device probing and trigger re-probing of deferred devices */ 1041 device_unblock_probing(); 1042 trace_suspend_resume(TPS("dpm_complete"), state.event, false); 1043 } 1044 1045 /** 1046 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 1047 * @state: PM transition of the system being carried out. 1048 * 1049 * Execute "resume" callbacks for all devices and complete the PM transition of 1050 * the system. 1051 */ 1052 void dpm_resume_end(pm_message_t state) 1053 { 1054 dpm_resume(state); 1055 dpm_complete(state); 1056 } 1057 EXPORT_SYMBOL_GPL(dpm_resume_end); 1058 1059 1060 /*------------------------- Suspend routines -------------------------*/ 1061 1062 /** 1063 * resume_event - Return a "resume" message for given "suspend" sleep state. 1064 * @sleep_state: PM message representing a sleep state. 1065 * 1066 * Return a PM message representing the resume event corresponding to given 1067 * sleep state. 1068 */ 1069 static pm_message_t resume_event(pm_message_t sleep_state) 1070 { 1071 switch (sleep_state.event) { 1072 case PM_EVENT_SUSPEND: 1073 return PMSG_RESUME; 1074 case PM_EVENT_FREEZE: 1075 case PM_EVENT_QUIESCE: 1076 return PMSG_RECOVER; 1077 case PM_EVENT_HIBERNATE: 1078 return PMSG_RESTORE; 1079 } 1080 return PMSG_ON; 1081 } 1082 1083 /** 1084 * device_suspend_noirq - Execute a "late suspend" callback for given device. 1085 * @dev: Device to handle. 1086 * @state: PM transition of the system being carried out. 1087 * @async: If true, the device is being suspended asynchronously. 1088 * 1089 * The driver of @dev will not receive interrupts while this function is being 1090 * executed. 1091 */ 1092 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1093 { 1094 pm_callback_t callback = NULL; 1095 const char *info = NULL; 1096 int error = 0; 1097 1098 TRACE_DEVICE(dev); 1099 TRACE_SUSPEND(0); 1100 1101 dpm_wait_for_subordinate(dev, async); 1102 1103 if (async_error) 1104 goto Complete; 1105 1106 if (pm_wakeup_pending()) { 1107 async_error = -EBUSY; 1108 goto Complete; 1109 } 1110 1111 if (dev->power.syscore || dev->power.direct_complete) 1112 goto Complete; 1113 1114 if (dev->pm_domain) { 1115 info = "noirq power domain "; 1116 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1117 } else if (dev->type && dev->type->pm) { 1118 info = "noirq type "; 1119 callback = pm_noirq_op(dev->type->pm, state); 1120 } else if (dev->class && dev->class->pm) { 1121 info = "noirq class "; 1122 callback = pm_noirq_op(dev->class->pm, state); 1123 } else if (dev->bus && dev->bus->pm) { 1124 info = "noirq bus "; 1125 callback = pm_noirq_op(dev->bus->pm, state); 1126 } 1127 1128 if (!callback && dev->driver && dev->driver->pm) { 1129 info = "noirq driver "; 1130 callback = pm_noirq_op(dev->driver->pm, state); 1131 } 1132 1133 error = dpm_run_callback(callback, dev, state, info); 1134 if (!error) 1135 dev->power.is_noirq_suspended = true; 1136 else 1137 async_error = error; 1138 1139 Complete: 1140 complete_all(&dev->power.completion); 1141 TRACE_SUSPEND(error); 1142 return error; 1143 } 1144 1145 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1146 { 1147 struct device *dev = (struct device *)data; 1148 int error; 1149 1150 error = __device_suspend_noirq(dev, pm_transition, true); 1151 if (error) { 1152 dpm_save_failed_dev(dev_name(dev)); 1153 pm_dev_err(dev, pm_transition, " async", error); 1154 } 1155 1156 put_device(dev); 1157 } 1158 1159 static int device_suspend_noirq(struct device *dev) 1160 { 1161 reinit_completion(&dev->power.completion); 1162 1163 if (is_async(dev)) { 1164 get_device(dev); 1165 async_schedule(async_suspend_noirq, dev); 1166 return 0; 1167 } 1168 return __device_suspend_noirq(dev, pm_transition, false); 1169 } 1170 1171 void dpm_noirq_begin(void) 1172 { 1173 cpuidle_pause(); 1174 device_wakeup_arm_wake_irqs(); 1175 suspend_device_irqs(); 1176 } 1177 1178 int dpm_noirq_suspend_devices(pm_message_t state) 1179 { 1180 ktime_t starttime = ktime_get(); 1181 int error = 0; 1182 1183 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1184 mutex_lock(&dpm_list_mtx); 1185 pm_transition = state; 1186 async_error = 0; 1187 1188 while (!list_empty(&dpm_late_early_list)) { 1189 struct device *dev = to_device(dpm_late_early_list.prev); 1190 1191 get_device(dev); 1192 mutex_unlock(&dpm_list_mtx); 1193 1194 error = device_suspend_noirq(dev); 1195 1196 mutex_lock(&dpm_list_mtx); 1197 if (error) { 1198 pm_dev_err(dev, state, " noirq", error); 1199 dpm_save_failed_dev(dev_name(dev)); 1200 put_device(dev); 1201 break; 1202 } 1203 if (!list_empty(&dev->power.entry)) 1204 list_move(&dev->power.entry, &dpm_noirq_list); 1205 put_device(dev); 1206 1207 if (async_error) 1208 break; 1209 } 1210 mutex_unlock(&dpm_list_mtx); 1211 async_synchronize_full(); 1212 if (!error) 1213 error = async_error; 1214 1215 if (error) { 1216 suspend_stats.failed_suspend_noirq++; 1217 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1218 } 1219 dpm_show_time(starttime, state, error, "noirq"); 1220 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1221 return error; 1222 } 1223 1224 /** 1225 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1226 * @state: PM transition of the system being carried out. 1227 * 1228 * Prevent device drivers' interrupt handlers from being called and invoke 1229 * "noirq" suspend callbacks for all non-sysdev devices. 1230 */ 1231 int dpm_suspend_noirq(pm_message_t state) 1232 { 1233 int ret; 1234 1235 dpm_noirq_begin(); 1236 ret = dpm_noirq_suspend_devices(state); 1237 if (ret) 1238 dpm_resume_noirq(resume_event(state)); 1239 1240 return ret; 1241 } 1242 1243 /** 1244 * device_suspend_late - Execute a "late suspend" callback for given device. 1245 * @dev: Device to handle. 1246 * @state: PM transition of the system being carried out. 1247 * @async: If true, the device is being suspended asynchronously. 1248 * 1249 * Runtime PM is disabled for @dev while this function is being executed. 1250 */ 1251 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) 1252 { 1253 pm_callback_t callback = NULL; 1254 const char *info = NULL; 1255 int error = 0; 1256 1257 TRACE_DEVICE(dev); 1258 TRACE_SUSPEND(0); 1259 1260 __pm_runtime_disable(dev, false); 1261 1262 dpm_wait_for_subordinate(dev, async); 1263 1264 if (async_error) 1265 goto Complete; 1266 1267 if (pm_wakeup_pending()) { 1268 async_error = -EBUSY; 1269 goto Complete; 1270 } 1271 1272 if (dev->power.syscore || dev->power.direct_complete) 1273 goto Complete; 1274 1275 if (dev->pm_domain) { 1276 info = "late power domain "; 1277 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1278 } else if (dev->type && dev->type->pm) { 1279 info = "late type "; 1280 callback = pm_late_early_op(dev->type->pm, state); 1281 } else if (dev->class && dev->class->pm) { 1282 info = "late class "; 1283 callback = pm_late_early_op(dev->class->pm, state); 1284 } else if (dev->bus && dev->bus->pm) { 1285 info = "late bus "; 1286 callback = pm_late_early_op(dev->bus->pm, state); 1287 } 1288 1289 if (!callback && dev->driver && dev->driver->pm) { 1290 info = "late driver "; 1291 callback = pm_late_early_op(dev->driver->pm, state); 1292 } 1293 1294 error = dpm_run_callback(callback, dev, state, info); 1295 if (!error) 1296 dev->power.is_late_suspended = true; 1297 else 1298 async_error = error; 1299 1300 Complete: 1301 TRACE_SUSPEND(error); 1302 complete_all(&dev->power.completion); 1303 return error; 1304 } 1305 1306 static void async_suspend_late(void *data, async_cookie_t cookie) 1307 { 1308 struct device *dev = (struct device *)data; 1309 int error; 1310 1311 error = __device_suspend_late(dev, pm_transition, true); 1312 if (error) { 1313 dpm_save_failed_dev(dev_name(dev)); 1314 pm_dev_err(dev, pm_transition, " async", error); 1315 } 1316 put_device(dev); 1317 } 1318 1319 static int device_suspend_late(struct device *dev) 1320 { 1321 reinit_completion(&dev->power.completion); 1322 1323 if (is_async(dev)) { 1324 get_device(dev); 1325 async_schedule(async_suspend_late, dev); 1326 return 0; 1327 } 1328 1329 return __device_suspend_late(dev, pm_transition, false); 1330 } 1331 1332 /** 1333 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1334 * @state: PM transition of the system being carried out. 1335 */ 1336 int dpm_suspend_late(pm_message_t state) 1337 { 1338 ktime_t starttime = ktime_get(); 1339 int error = 0; 1340 1341 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); 1342 mutex_lock(&dpm_list_mtx); 1343 pm_transition = state; 1344 async_error = 0; 1345 1346 while (!list_empty(&dpm_suspended_list)) { 1347 struct device *dev = to_device(dpm_suspended_list.prev); 1348 1349 get_device(dev); 1350 mutex_unlock(&dpm_list_mtx); 1351 1352 error = device_suspend_late(dev); 1353 1354 mutex_lock(&dpm_list_mtx); 1355 if (!list_empty(&dev->power.entry)) 1356 list_move(&dev->power.entry, &dpm_late_early_list); 1357 1358 if (error) { 1359 pm_dev_err(dev, state, " late", error); 1360 dpm_save_failed_dev(dev_name(dev)); 1361 put_device(dev); 1362 break; 1363 } 1364 put_device(dev); 1365 1366 if (async_error) 1367 break; 1368 } 1369 mutex_unlock(&dpm_list_mtx); 1370 async_synchronize_full(); 1371 if (!error) 1372 error = async_error; 1373 if (error) { 1374 suspend_stats.failed_suspend_late++; 1375 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1376 dpm_resume_early(resume_event(state)); 1377 } 1378 dpm_show_time(starttime, state, error, "late"); 1379 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1380 return error; 1381 } 1382 1383 /** 1384 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1385 * @state: PM transition of the system being carried out. 1386 */ 1387 int dpm_suspend_end(pm_message_t state) 1388 { 1389 int error = dpm_suspend_late(state); 1390 if (error) 1391 return error; 1392 1393 error = dpm_suspend_noirq(state); 1394 if (error) { 1395 dpm_resume_early(resume_event(state)); 1396 return error; 1397 } 1398 1399 return 0; 1400 } 1401 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1402 1403 /** 1404 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1405 * @dev: Device to suspend. 1406 * @state: PM transition of the system being carried out. 1407 * @cb: Suspend callback to execute. 1408 * @info: string description of caller. 1409 */ 1410 static int legacy_suspend(struct device *dev, pm_message_t state, 1411 int (*cb)(struct device *dev, pm_message_t state), 1412 const char *info) 1413 { 1414 int error; 1415 ktime_t calltime; 1416 1417 calltime = initcall_debug_start(dev); 1418 1419 trace_device_pm_callback_start(dev, info, state.event); 1420 error = cb(dev, state); 1421 trace_device_pm_callback_end(dev, error); 1422 suspend_report_result(cb, error); 1423 1424 initcall_debug_report(dev, calltime, error, state, info); 1425 1426 return error; 1427 } 1428 1429 static void dpm_clear_suppliers_direct_complete(struct device *dev) 1430 { 1431 struct device_link *link; 1432 int idx; 1433 1434 idx = device_links_read_lock(); 1435 1436 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 1437 spin_lock_irq(&link->supplier->power.lock); 1438 link->supplier->power.direct_complete = false; 1439 spin_unlock_irq(&link->supplier->power.lock); 1440 } 1441 1442 device_links_read_unlock(idx); 1443 } 1444 1445 /** 1446 * device_suspend - Execute "suspend" callbacks for given device. 1447 * @dev: Device to handle. 1448 * @state: PM transition of the system being carried out. 1449 * @async: If true, the device is being suspended asynchronously. 1450 */ 1451 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1452 { 1453 pm_callback_t callback = NULL; 1454 const char *info = NULL; 1455 int error = 0; 1456 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1457 1458 TRACE_DEVICE(dev); 1459 TRACE_SUSPEND(0); 1460 1461 dpm_wait_for_subordinate(dev, async); 1462 1463 if (async_error) 1464 goto Complete; 1465 1466 /* 1467 * If a device configured to wake up the system from sleep states 1468 * has been suspended at run time and there's a resume request pending 1469 * for it, this is equivalent to the device signaling wakeup, so the 1470 * system suspend operation should be aborted. 1471 */ 1472 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1473 pm_wakeup_event(dev, 0); 1474 1475 if (pm_wakeup_pending()) { 1476 async_error = -EBUSY; 1477 goto Complete; 1478 } 1479 1480 if (dev->power.syscore) 1481 goto Complete; 1482 1483 if (dev->power.direct_complete) { 1484 if (pm_runtime_status_suspended(dev)) { 1485 pm_runtime_disable(dev); 1486 if (pm_runtime_status_suspended(dev)) 1487 goto Complete; 1488 1489 pm_runtime_enable(dev); 1490 } 1491 dev->power.direct_complete = false; 1492 } 1493 1494 dpm_watchdog_set(&wd, dev); 1495 device_lock(dev); 1496 1497 if (dev->pm_domain) { 1498 info = "power domain "; 1499 callback = pm_op(&dev->pm_domain->ops, state); 1500 goto Run; 1501 } 1502 1503 if (dev->type && dev->type->pm) { 1504 info = "type "; 1505 callback = pm_op(dev->type->pm, state); 1506 goto Run; 1507 } 1508 1509 if (dev->class) { 1510 if (dev->class->pm) { 1511 info = "class "; 1512 callback = pm_op(dev->class->pm, state); 1513 goto Run; 1514 } else if (dev->class->suspend) { 1515 pm_dev_dbg(dev, state, "legacy class "); 1516 error = legacy_suspend(dev, state, dev->class->suspend, 1517 "legacy class "); 1518 goto End; 1519 } 1520 } 1521 1522 if (dev->bus) { 1523 if (dev->bus->pm) { 1524 info = "bus "; 1525 callback = pm_op(dev->bus->pm, state); 1526 } else if (dev->bus->suspend) { 1527 pm_dev_dbg(dev, state, "legacy bus "); 1528 error = legacy_suspend(dev, state, dev->bus->suspend, 1529 "legacy bus "); 1530 goto End; 1531 } 1532 } 1533 1534 Run: 1535 if (!callback && dev->driver && dev->driver->pm) { 1536 info = "driver "; 1537 callback = pm_op(dev->driver->pm, state); 1538 } 1539 1540 error = dpm_run_callback(callback, dev, state, info); 1541 1542 End: 1543 if (!error) { 1544 struct device *parent = dev->parent; 1545 1546 dev->power.is_suspended = true; 1547 if (parent) { 1548 spin_lock_irq(&parent->power.lock); 1549 1550 dev->parent->power.direct_complete = false; 1551 if (dev->power.wakeup_path 1552 && !dev->parent->power.ignore_children) 1553 dev->parent->power.wakeup_path = true; 1554 1555 spin_unlock_irq(&parent->power.lock); 1556 } 1557 dpm_clear_suppliers_direct_complete(dev); 1558 } 1559 1560 device_unlock(dev); 1561 dpm_watchdog_clear(&wd); 1562 1563 Complete: 1564 if (error) 1565 async_error = error; 1566 1567 complete_all(&dev->power.completion); 1568 TRACE_SUSPEND(error); 1569 return error; 1570 } 1571 1572 static void async_suspend(void *data, async_cookie_t cookie) 1573 { 1574 struct device *dev = (struct device *)data; 1575 int error; 1576 1577 error = __device_suspend(dev, pm_transition, true); 1578 if (error) { 1579 dpm_save_failed_dev(dev_name(dev)); 1580 pm_dev_err(dev, pm_transition, " async", error); 1581 } 1582 1583 put_device(dev); 1584 } 1585 1586 static int device_suspend(struct device *dev) 1587 { 1588 reinit_completion(&dev->power.completion); 1589 1590 if (is_async(dev)) { 1591 get_device(dev); 1592 async_schedule(async_suspend, dev); 1593 return 0; 1594 } 1595 1596 return __device_suspend(dev, pm_transition, false); 1597 } 1598 1599 /** 1600 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1601 * @state: PM transition of the system being carried out. 1602 */ 1603 int dpm_suspend(pm_message_t state) 1604 { 1605 ktime_t starttime = ktime_get(); 1606 int error = 0; 1607 1608 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1609 might_sleep(); 1610 1611 cpufreq_suspend(); 1612 1613 mutex_lock(&dpm_list_mtx); 1614 pm_transition = state; 1615 async_error = 0; 1616 while (!list_empty(&dpm_prepared_list)) { 1617 struct device *dev = to_device(dpm_prepared_list.prev); 1618 1619 get_device(dev); 1620 mutex_unlock(&dpm_list_mtx); 1621 1622 error = device_suspend(dev); 1623 1624 mutex_lock(&dpm_list_mtx); 1625 if (error) { 1626 pm_dev_err(dev, state, "", error); 1627 dpm_save_failed_dev(dev_name(dev)); 1628 put_device(dev); 1629 break; 1630 } 1631 if (!list_empty(&dev->power.entry)) 1632 list_move(&dev->power.entry, &dpm_suspended_list); 1633 put_device(dev); 1634 if (async_error) 1635 break; 1636 } 1637 mutex_unlock(&dpm_list_mtx); 1638 async_synchronize_full(); 1639 if (!error) 1640 error = async_error; 1641 if (error) { 1642 suspend_stats.failed_suspend++; 1643 dpm_save_failed_step(SUSPEND_SUSPEND); 1644 } 1645 dpm_show_time(starttime, state, error, NULL); 1646 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1647 return error; 1648 } 1649 1650 /** 1651 * device_prepare - Prepare a device for system power transition. 1652 * @dev: Device to handle. 1653 * @state: PM transition of the system being carried out. 1654 * 1655 * Execute the ->prepare() callback(s) for given device. No new children of the 1656 * device may be registered after this function has returned. 1657 */ 1658 static int device_prepare(struct device *dev, pm_message_t state) 1659 { 1660 int (*callback)(struct device *) = NULL; 1661 int ret = 0; 1662 1663 if (dev->power.syscore) 1664 return 0; 1665 1666 /* 1667 * If a device's parent goes into runtime suspend at the wrong time, 1668 * it won't be possible to resume the device. To prevent this we 1669 * block runtime suspend here, during the prepare phase, and allow 1670 * it again during the complete phase. 1671 */ 1672 pm_runtime_get_noresume(dev); 1673 1674 device_lock(dev); 1675 1676 dev->power.wakeup_path = device_may_wakeup(dev); 1677 1678 if (dev->power.no_pm_callbacks) { 1679 ret = 1; /* Let device go direct_complete */ 1680 goto unlock; 1681 } 1682 1683 if (dev->pm_domain) 1684 callback = dev->pm_domain->ops.prepare; 1685 else if (dev->type && dev->type->pm) 1686 callback = dev->type->pm->prepare; 1687 else if (dev->class && dev->class->pm) 1688 callback = dev->class->pm->prepare; 1689 else if (dev->bus && dev->bus->pm) 1690 callback = dev->bus->pm->prepare; 1691 1692 if (!callback && dev->driver && dev->driver->pm) 1693 callback = dev->driver->pm->prepare; 1694 1695 if (callback) 1696 ret = callback(dev); 1697 1698 unlock: 1699 device_unlock(dev); 1700 1701 if (ret < 0) { 1702 suspend_report_result(callback, ret); 1703 pm_runtime_put(dev); 1704 return ret; 1705 } 1706 /* 1707 * A positive return value from ->prepare() means "this device appears 1708 * to be runtime-suspended and its state is fine, so if it really is 1709 * runtime-suspended, you can leave it in that state provided that you 1710 * will do the same thing with all of its descendants". This only 1711 * applies to suspend transitions, however. 1712 */ 1713 spin_lock_irq(&dev->power.lock); 1714 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; 1715 spin_unlock_irq(&dev->power.lock); 1716 return 0; 1717 } 1718 1719 /** 1720 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1721 * @state: PM transition of the system being carried out. 1722 * 1723 * Execute the ->prepare() callback(s) for all devices. 1724 */ 1725 int dpm_prepare(pm_message_t state) 1726 { 1727 int error = 0; 1728 1729 trace_suspend_resume(TPS("dpm_prepare"), state.event, true); 1730 might_sleep(); 1731 1732 /* 1733 * Give a chance for the known devices to complete their probes, before 1734 * disable probing of devices. This sync point is important at least 1735 * at boot time + hibernation restore. 1736 */ 1737 wait_for_device_probe(); 1738 /* 1739 * It is unsafe if probing of devices will happen during suspend or 1740 * hibernation and system behavior will be unpredictable in this case. 1741 * So, let's prohibit device's probing here and defer their probes 1742 * instead. The normal behavior will be restored in dpm_complete(). 1743 */ 1744 device_block_probing(); 1745 1746 mutex_lock(&dpm_list_mtx); 1747 while (!list_empty(&dpm_list)) { 1748 struct device *dev = to_device(dpm_list.next); 1749 1750 get_device(dev); 1751 mutex_unlock(&dpm_list_mtx); 1752 1753 trace_device_pm_callback_start(dev, "", state.event); 1754 error = device_prepare(dev, state); 1755 trace_device_pm_callback_end(dev, error); 1756 1757 mutex_lock(&dpm_list_mtx); 1758 if (error) { 1759 if (error == -EAGAIN) { 1760 put_device(dev); 1761 error = 0; 1762 continue; 1763 } 1764 printk(KERN_INFO "PM: Device %s not prepared " 1765 "for power transition: code %d\n", 1766 dev_name(dev), error); 1767 put_device(dev); 1768 break; 1769 } 1770 dev->power.is_prepared = true; 1771 if (!list_empty(&dev->power.entry)) 1772 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1773 put_device(dev); 1774 } 1775 mutex_unlock(&dpm_list_mtx); 1776 trace_suspend_resume(TPS("dpm_prepare"), state.event, false); 1777 return error; 1778 } 1779 1780 /** 1781 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1782 * @state: PM transition of the system being carried out. 1783 * 1784 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1785 * callbacks for them. 1786 */ 1787 int dpm_suspend_start(pm_message_t state) 1788 { 1789 int error; 1790 1791 error = dpm_prepare(state); 1792 if (error) { 1793 suspend_stats.failed_prepare++; 1794 dpm_save_failed_step(SUSPEND_PREPARE); 1795 } else 1796 error = dpm_suspend(state); 1797 return error; 1798 } 1799 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1800 1801 void __suspend_report_result(const char *function, void *fn, int ret) 1802 { 1803 if (ret) 1804 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1805 } 1806 EXPORT_SYMBOL_GPL(__suspend_report_result); 1807 1808 /** 1809 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1810 * @dev: Device to wait for. 1811 * @subordinate: Device that needs to wait for @dev. 1812 */ 1813 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1814 { 1815 dpm_wait(dev, subordinate->power.async_suspend); 1816 return async_error; 1817 } 1818 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1819 1820 /** 1821 * dpm_for_each_dev - device iterator. 1822 * @data: data for the callback. 1823 * @fn: function to be called for each device. 1824 * 1825 * Iterate over devices in dpm_list, and call @fn for each device, 1826 * passing it @data. 1827 */ 1828 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 1829 { 1830 struct device *dev; 1831 1832 if (!fn) 1833 return; 1834 1835 device_pm_lock(); 1836 list_for_each_entry(dev, &dpm_list, power.entry) 1837 fn(dev, data); 1838 device_pm_unlock(); 1839 } 1840 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 1841 1842 static bool pm_ops_is_empty(const struct dev_pm_ops *ops) 1843 { 1844 if (!ops) 1845 return true; 1846 1847 return !ops->prepare && 1848 !ops->suspend && 1849 !ops->suspend_late && 1850 !ops->suspend_noirq && 1851 !ops->resume_noirq && 1852 !ops->resume_early && 1853 !ops->resume && 1854 !ops->complete; 1855 } 1856 1857 void device_pm_check_callbacks(struct device *dev) 1858 { 1859 spin_lock_irq(&dev->power.lock); 1860 dev->power.no_pm_callbacks = 1861 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && 1862 !dev->bus->suspend && !dev->bus->resume)) && 1863 (!dev->class || (pm_ops_is_empty(dev->class->pm) && 1864 !dev->class->suspend && !dev->class->resume)) && 1865 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 1866 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 1867 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && 1868 !dev->driver->suspend && !dev->driver->resume)); 1869 spin_unlock_irq(&dev->power.lock); 1870 } 1871