1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/resume-trace.h> 27 #include <linux/interrupt.h> 28 #include <linux/sched.h> 29 #include <linux/async.h> 30 #include <linux/suspend.h> 31 #include <trace/events/power.h> 32 #include <linux/cpufreq.h> 33 #include <linux/cpuidle.h> 34 #include <linux/timer.h> 35 36 #include "../base.h" 37 #include "power.h" 38 39 typedef int (*pm_callback_t)(struct device *); 40 41 /* 42 * The entries in the dpm_list list are in a depth first order, simply 43 * because children are guaranteed to be discovered after parents, and 44 * are inserted at the back of the list on discovery. 45 * 46 * Since device_pm_add() may be called with a device lock held, 47 * we must never try to acquire a device lock while holding 48 * dpm_list_mutex. 49 */ 50 51 LIST_HEAD(dpm_list); 52 static LIST_HEAD(dpm_prepared_list); 53 static LIST_HEAD(dpm_suspended_list); 54 static LIST_HEAD(dpm_late_early_list); 55 static LIST_HEAD(dpm_noirq_list); 56 57 struct suspend_stats suspend_stats; 58 static DEFINE_MUTEX(dpm_list_mtx); 59 static pm_message_t pm_transition; 60 61 static int async_error; 62 63 static char *pm_verb(int event) 64 { 65 switch (event) { 66 case PM_EVENT_SUSPEND: 67 return "suspend"; 68 case PM_EVENT_RESUME: 69 return "resume"; 70 case PM_EVENT_FREEZE: 71 return "freeze"; 72 case PM_EVENT_QUIESCE: 73 return "quiesce"; 74 case PM_EVENT_HIBERNATE: 75 return "hibernate"; 76 case PM_EVENT_THAW: 77 return "thaw"; 78 case PM_EVENT_RESTORE: 79 return "restore"; 80 case PM_EVENT_RECOVER: 81 return "recover"; 82 default: 83 return "(unknown PM event)"; 84 } 85 } 86 87 /** 88 * device_pm_sleep_init - Initialize system suspend-related device fields. 89 * @dev: Device object being initialized. 90 */ 91 void device_pm_sleep_init(struct device *dev) 92 { 93 dev->power.is_prepared = false; 94 dev->power.is_suspended = false; 95 dev->power.is_noirq_suspended = false; 96 dev->power.is_late_suspended = false; 97 init_completion(&dev->power.completion); 98 complete_all(&dev->power.completion); 99 dev->power.wakeup = NULL; 100 INIT_LIST_HEAD(&dev->power.entry); 101 } 102 103 /** 104 * device_pm_lock - Lock the list of active devices used by the PM core. 105 */ 106 void device_pm_lock(void) 107 { 108 mutex_lock(&dpm_list_mtx); 109 } 110 111 /** 112 * device_pm_unlock - Unlock the list of active devices used by the PM core. 113 */ 114 void device_pm_unlock(void) 115 { 116 mutex_unlock(&dpm_list_mtx); 117 } 118 119 /** 120 * device_pm_add - Add a device to the PM core's list of active devices. 121 * @dev: Device to add to the list. 122 */ 123 void device_pm_add(struct device *dev) 124 { 125 pr_debug("PM: Adding info for %s:%s\n", 126 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 127 mutex_lock(&dpm_list_mtx); 128 if (dev->parent && dev->parent->power.is_prepared) 129 dev_warn(dev, "parent %s should not be sleeping\n", 130 dev_name(dev->parent)); 131 list_add_tail(&dev->power.entry, &dpm_list); 132 mutex_unlock(&dpm_list_mtx); 133 } 134 135 /** 136 * device_pm_remove - Remove a device from the PM core's list of active devices. 137 * @dev: Device to be removed from the list. 138 */ 139 void device_pm_remove(struct device *dev) 140 { 141 pr_debug("PM: Removing info for %s:%s\n", 142 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 143 complete_all(&dev->power.completion); 144 mutex_lock(&dpm_list_mtx); 145 list_del_init(&dev->power.entry); 146 mutex_unlock(&dpm_list_mtx); 147 device_wakeup_disable(dev); 148 pm_runtime_remove(dev); 149 } 150 151 /** 152 * device_pm_move_before - Move device in the PM core's list of active devices. 153 * @deva: Device to move in dpm_list. 154 * @devb: Device @deva should come before. 155 */ 156 void device_pm_move_before(struct device *deva, struct device *devb) 157 { 158 pr_debug("PM: Moving %s:%s before %s:%s\n", 159 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 160 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 161 /* Delete deva from dpm_list and reinsert before devb. */ 162 list_move_tail(&deva->power.entry, &devb->power.entry); 163 } 164 165 /** 166 * device_pm_move_after - Move device in the PM core's list of active devices. 167 * @deva: Device to move in dpm_list. 168 * @devb: Device @deva should come after. 169 */ 170 void device_pm_move_after(struct device *deva, struct device *devb) 171 { 172 pr_debug("PM: Moving %s:%s after %s:%s\n", 173 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 174 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 175 /* Delete deva from dpm_list and reinsert after devb. */ 176 list_move(&deva->power.entry, &devb->power.entry); 177 } 178 179 /** 180 * device_pm_move_last - Move device to end of the PM core's list of devices. 181 * @dev: Device to move in dpm_list. 182 */ 183 void device_pm_move_last(struct device *dev) 184 { 185 pr_debug("PM: Moving %s:%s to end of list\n", 186 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 187 list_move_tail(&dev->power.entry, &dpm_list); 188 } 189 190 static ktime_t initcall_debug_start(struct device *dev) 191 { 192 ktime_t calltime = ktime_set(0, 0); 193 194 if (pm_print_times_enabled) { 195 pr_info("calling %s+ @ %i, parent: %s\n", 196 dev_name(dev), task_pid_nr(current), 197 dev->parent ? dev_name(dev->parent) : "none"); 198 calltime = ktime_get(); 199 } 200 201 return calltime; 202 } 203 204 static void initcall_debug_report(struct device *dev, ktime_t calltime, 205 int error, pm_message_t state, char *info) 206 { 207 ktime_t rettime; 208 s64 nsecs; 209 210 rettime = ktime_get(); 211 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 212 213 if (pm_print_times_enabled) { 214 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 215 error, (unsigned long long)nsecs >> 10); 216 } 217 218 trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event), 219 error); 220 } 221 222 /** 223 * dpm_wait - Wait for a PM operation to complete. 224 * @dev: Device to wait for. 225 * @async: If unset, wait only if the device's power.async_suspend flag is set. 226 */ 227 static void dpm_wait(struct device *dev, bool async) 228 { 229 if (!dev) 230 return; 231 232 if (async || (pm_async_enabled && dev->power.async_suspend)) 233 wait_for_completion(&dev->power.completion); 234 } 235 236 static int dpm_wait_fn(struct device *dev, void *async_ptr) 237 { 238 dpm_wait(dev, *((bool *)async_ptr)); 239 return 0; 240 } 241 242 static void dpm_wait_for_children(struct device *dev, bool async) 243 { 244 device_for_each_child(dev, &async, dpm_wait_fn); 245 } 246 247 /** 248 * pm_op - Return the PM operation appropriate for given PM event. 249 * @ops: PM operations to choose from. 250 * @state: PM transition of the system being carried out. 251 */ 252 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 253 { 254 switch (state.event) { 255 #ifdef CONFIG_SUSPEND 256 case PM_EVENT_SUSPEND: 257 return ops->suspend; 258 case PM_EVENT_RESUME: 259 return ops->resume; 260 #endif /* CONFIG_SUSPEND */ 261 #ifdef CONFIG_HIBERNATE_CALLBACKS 262 case PM_EVENT_FREEZE: 263 case PM_EVENT_QUIESCE: 264 return ops->freeze; 265 case PM_EVENT_HIBERNATE: 266 return ops->poweroff; 267 case PM_EVENT_THAW: 268 case PM_EVENT_RECOVER: 269 return ops->thaw; 270 break; 271 case PM_EVENT_RESTORE: 272 return ops->restore; 273 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 274 } 275 276 return NULL; 277 } 278 279 /** 280 * pm_late_early_op - Return the PM operation appropriate for given PM event. 281 * @ops: PM operations to choose from. 282 * @state: PM transition of the system being carried out. 283 * 284 * Runtime PM is disabled for @dev while this function is being executed. 285 */ 286 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 287 pm_message_t state) 288 { 289 switch (state.event) { 290 #ifdef CONFIG_SUSPEND 291 case PM_EVENT_SUSPEND: 292 return ops->suspend_late; 293 case PM_EVENT_RESUME: 294 return ops->resume_early; 295 #endif /* CONFIG_SUSPEND */ 296 #ifdef CONFIG_HIBERNATE_CALLBACKS 297 case PM_EVENT_FREEZE: 298 case PM_EVENT_QUIESCE: 299 return ops->freeze_late; 300 case PM_EVENT_HIBERNATE: 301 return ops->poweroff_late; 302 case PM_EVENT_THAW: 303 case PM_EVENT_RECOVER: 304 return ops->thaw_early; 305 case PM_EVENT_RESTORE: 306 return ops->restore_early; 307 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 308 } 309 310 return NULL; 311 } 312 313 /** 314 * pm_noirq_op - Return the PM operation appropriate for given PM event. 315 * @ops: PM operations to choose from. 316 * @state: PM transition of the system being carried out. 317 * 318 * The driver of @dev will not receive interrupts while this function is being 319 * executed. 320 */ 321 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 322 { 323 switch (state.event) { 324 #ifdef CONFIG_SUSPEND 325 case PM_EVENT_SUSPEND: 326 return ops->suspend_noirq; 327 case PM_EVENT_RESUME: 328 return ops->resume_noirq; 329 #endif /* CONFIG_SUSPEND */ 330 #ifdef CONFIG_HIBERNATE_CALLBACKS 331 case PM_EVENT_FREEZE: 332 case PM_EVENT_QUIESCE: 333 return ops->freeze_noirq; 334 case PM_EVENT_HIBERNATE: 335 return ops->poweroff_noirq; 336 case PM_EVENT_THAW: 337 case PM_EVENT_RECOVER: 338 return ops->thaw_noirq; 339 case PM_EVENT_RESTORE: 340 return ops->restore_noirq; 341 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 342 } 343 344 return NULL; 345 } 346 347 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 348 { 349 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 350 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 351 ", may wakeup" : ""); 352 } 353 354 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 355 int error) 356 { 357 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 358 dev_name(dev), pm_verb(state.event), info, error); 359 } 360 361 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 362 { 363 ktime_t calltime; 364 u64 usecs64; 365 int usecs; 366 367 calltime = ktime_get(); 368 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 369 do_div(usecs64, NSEC_PER_USEC); 370 usecs = usecs64; 371 if (usecs == 0) 372 usecs = 1; 373 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 374 info ?: "", info ? " " : "", pm_verb(state.event), 375 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 376 } 377 378 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 379 pm_message_t state, char *info) 380 { 381 ktime_t calltime; 382 int error; 383 384 if (!cb) 385 return 0; 386 387 calltime = initcall_debug_start(dev); 388 389 pm_dev_dbg(dev, state, info); 390 error = cb(dev); 391 suspend_report_result(cb, error); 392 393 initcall_debug_report(dev, calltime, error, state, info); 394 395 return error; 396 } 397 398 #ifdef CONFIG_DPM_WATCHDOG 399 struct dpm_watchdog { 400 struct device *dev; 401 struct task_struct *tsk; 402 struct timer_list timer; 403 }; 404 405 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ 406 struct dpm_watchdog wd 407 408 /** 409 * dpm_watchdog_handler - Driver suspend / resume watchdog handler. 410 * @data: Watchdog object address. 411 * 412 * Called when a driver has timed out suspending or resuming. 413 * There's not much we can do here to recover so panic() to 414 * capture a crash-dump in pstore. 415 */ 416 static void dpm_watchdog_handler(unsigned long data) 417 { 418 struct dpm_watchdog *wd = (void *)data; 419 420 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 421 show_stack(wd->tsk, NULL); 422 panic("%s %s: unrecoverable failure\n", 423 dev_driver_string(wd->dev), dev_name(wd->dev)); 424 } 425 426 /** 427 * dpm_watchdog_set - Enable pm watchdog for given device. 428 * @wd: Watchdog. Must be allocated on the stack. 429 * @dev: Device to handle. 430 */ 431 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) 432 { 433 struct timer_list *timer = &wd->timer; 434 435 wd->dev = dev; 436 wd->tsk = current; 437 438 init_timer_on_stack(timer); 439 /* use same timeout value for both suspend and resume */ 440 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 441 timer->function = dpm_watchdog_handler; 442 timer->data = (unsigned long)wd; 443 add_timer(timer); 444 } 445 446 /** 447 * dpm_watchdog_clear - Disable suspend/resume watchdog. 448 * @wd: Watchdog to disable. 449 */ 450 static void dpm_watchdog_clear(struct dpm_watchdog *wd) 451 { 452 struct timer_list *timer = &wd->timer; 453 454 del_timer_sync(timer); 455 destroy_timer_on_stack(timer); 456 } 457 #else 458 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) 459 #define dpm_watchdog_set(x, y) 460 #define dpm_watchdog_clear(x) 461 #endif 462 463 /*------------------------- Resume routines -------------------------*/ 464 465 /** 466 * device_resume_noirq - Execute an "early resume" callback for given device. 467 * @dev: Device to handle. 468 * @state: PM transition of the system being carried out. 469 * 470 * The driver of @dev will not receive interrupts while this function is being 471 * executed. 472 */ 473 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) 474 { 475 pm_callback_t callback = NULL; 476 char *info = NULL; 477 int error = 0; 478 479 TRACE_DEVICE(dev); 480 TRACE_RESUME(0); 481 482 if (dev->power.syscore || dev->power.direct_complete) 483 goto Out; 484 485 if (!dev->power.is_noirq_suspended) 486 goto Out; 487 488 dpm_wait(dev->parent, async); 489 490 if (dev->pm_domain) { 491 info = "noirq power domain "; 492 callback = pm_noirq_op(&dev->pm_domain->ops, state); 493 } else if (dev->type && dev->type->pm) { 494 info = "noirq type "; 495 callback = pm_noirq_op(dev->type->pm, state); 496 } else if (dev->class && dev->class->pm) { 497 info = "noirq class "; 498 callback = pm_noirq_op(dev->class->pm, state); 499 } else if (dev->bus && dev->bus->pm) { 500 info = "noirq bus "; 501 callback = pm_noirq_op(dev->bus->pm, state); 502 } 503 504 if (!callback && dev->driver && dev->driver->pm) { 505 info = "noirq driver "; 506 callback = pm_noirq_op(dev->driver->pm, state); 507 } 508 509 error = dpm_run_callback(callback, dev, state, info); 510 dev->power.is_noirq_suspended = false; 511 512 Out: 513 complete_all(&dev->power.completion); 514 TRACE_RESUME(error); 515 return error; 516 } 517 518 static bool is_async(struct device *dev) 519 { 520 return dev->power.async_suspend && pm_async_enabled 521 && !pm_trace_is_enabled(); 522 } 523 524 static void async_resume_noirq(void *data, async_cookie_t cookie) 525 { 526 struct device *dev = (struct device *)data; 527 int error; 528 529 error = device_resume_noirq(dev, pm_transition, true); 530 if (error) 531 pm_dev_err(dev, pm_transition, " async", error); 532 533 put_device(dev); 534 } 535 536 /** 537 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 538 * @state: PM transition of the system being carried out. 539 * 540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 541 * enable device drivers to receive interrupts. 542 */ 543 static void dpm_resume_noirq(pm_message_t state) 544 { 545 struct device *dev; 546 ktime_t starttime = ktime_get(); 547 548 mutex_lock(&dpm_list_mtx); 549 pm_transition = state; 550 551 /* 552 * Advanced the async threads upfront, 553 * in case the starting of async threads is 554 * delayed by non-async resuming devices. 555 */ 556 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { 557 reinit_completion(&dev->power.completion); 558 if (is_async(dev)) { 559 get_device(dev); 560 async_schedule(async_resume_noirq, dev); 561 } 562 } 563 564 while (!list_empty(&dpm_noirq_list)) { 565 dev = to_device(dpm_noirq_list.next); 566 get_device(dev); 567 list_move_tail(&dev->power.entry, &dpm_late_early_list); 568 mutex_unlock(&dpm_list_mtx); 569 570 if (!is_async(dev)) { 571 int error; 572 573 error = device_resume_noirq(dev, state, false); 574 if (error) { 575 suspend_stats.failed_resume_noirq++; 576 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 577 dpm_save_failed_dev(dev_name(dev)); 578 pm_dev_err(dev, state, " noirq", error); 579 } 580 } 581 582 mutex_lock(&dpm_list_mtx); 583 put_device(dev); 584 } 585 mutex_unlock(&dpm_list_mtx); 586 async_synchronize_full(); 587 dpm_show_time(starttime, state, "noirq"); 588 resume_device_irqs(); 589 cpuidle_resume(); 590 } 591 592 /** 593 * device_resume_early - Execute an "early resume" callback for given device. 594 * @dev: Device to handle. 595 * @state: PM transition of the system being carried out. 596 * 597 * Runtime PM is disabled for @dev while this function is being executed. 598 */ 599 static int device_resume_early(struct device *dev, pm_message_t state, bool async) 600 { 601 pm_callback_t callback = NULL; 602 char *info = NULL; 603 int error = 0; 604 605 TRACE_DEVICE(dev); 606 TRACE_RESUME(0); 607 608 if (dev->power.syscore || dev->power.direct_complete) 609 goto Out; 610 611 if (!dev->power.is_late_suspended) 612 goto Out; 613 614 dpm_wait(dev->parent, async); 615 616 if (dev->pm_domain) { 617 info = "early power domain "; 618 callback = pm_late_early_op(&dev->pm_domain->ops, state); 619 } else if (dev->type && dev->type->pm) { 620 info = "early type "; 621 callback = pm_late_early_op(dev->type->pm, state); 622 } else if (dev->class && dev->class->pm) { 623 info = "early class "; 624 callback = pm_late_early_op(dev->class->pm, state); 625 } else if (dev->bus && dev->bus->pm) { 626 info = "early bus "; 627 callback = pm_late_early_op(dev->bus->pm, state); 628 } 629 630 if (!callback && dev->driver && dev->driver->pm) { 631 info = "early driver "; 632 callback = pm_late_early_op(dev->driver->pm, state); 633 } 634 635 error = dpm_run_callback(callback, dev, state, info); 636 dev->power.is_late_suspended = false; 637 638 Out: 639 TRACE_RESUME(error); 640 641 pm_runtime_enable(dev); 642 complete_all(&dev->power.completion); 643 return error; 644 } 645 646 static void async_resume_early(void *data, async_cookie_t cookie) 647 { 648 struct device *dev = (struct device *)data; 649 int error; 650 651 error = device_resume_early(dev, pm_transition, true); 652 if (error) 653 pm_dev_err(dev, pm_transition, " async", error); 654 655 put_device(dev); 656 } 657 658 /** 659 * dpm_resume_early - Execute "early resume" callbacks for all devices. 660 * @state: PM transition of the system being carried out. 661 */ 662 static void dpm_resume_early(pm_message_t state) 663 { 664 struct device *dev; 665 ktime_t starttime = ktime_get(); 666 667 mutex_lock(&dpm_list_mtx); 668 pm_transition = state; 669 670 /* 671 * Advanced the async threads upfront, 672 * in case the starting of async threads is 673 * delayed by non-async resuming devices. 674 */ 675 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { 676 reinit_completion(&dev->power.completion); 677 if (is_async(dev)) { 678 get_device(dev); 679 async_schedule(async_resume_early, dev); 680 } 681 } 682 683 while (!list_empty(&dpm_late_early_list)) { 684 dev = to_device(dpm_late_early_list.next); 685 get_device(dev); 686 list_move_tail(&dev->power.entry, &dpm_suspended_list); 687 mutex_unlock(&dpm_list_mtx); 688 689 if (!is_async(dev)) { 690 int error; 691 692 error = device_resume_early(dev, state, false); 693 if (error) { 694 suspend_stats.failed_resume_early++; 695 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 696 dpm_save_failed_dev(dev_name(dev)); 697 pm_dev_err(dev, state, " early", error); 698 } 699 } 700 mutex_lock(&dpm_list_mtx); 701 put_device(dev); 702 } 703 mutex_unlock(&dpm_list_mtx); 704 async_synchronize_full(); 705 dpm_show_time(starttime, state, "early"); 706 } 707 708 /** 709 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 710 * @state: PM transition of the system being carried out. 711 */ 712 void dpm_resume_start(pm_message_t state) 713 { 714 dpm_resume_noirq(state); 715 dpm_resume_early(state); 716 } 717 EXPORT_SYMBOL_GPL(dpm_resume_start); 718 719 /** 720 * device_resume - Execute "resume" callbacks for given device. 721 * @dev: Device to handle. 722 * @state: PM transition of the system being carried out. 723 * @async: If true, the device is being resumed asynchronously. 724 */ 725 static int device_resume(struct device *dev, pm_message_t state, bool async) 726 { 727 pm_callback_t callback = NULL; 728 char *info = NULL; 729 int error = 0; 730 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 731 732 TRACE_DEVICE(dev); 733 TRACE_RESUME(0); 734 735 if (dev->power.syscore) 736 goto Complete; 737 738 if (dev->power.direct_complete) { 739 /* Match the pm_runtime_disable() in __device_suspend(). */ 740 pm_runtime_enable(dev); 741 goto Complete; 742 } 743 744 dpm_wait(dev->parent, async); 745 dpm_watchdog_set(&wd, dev); 746 device_lock(dev); 747 748 /* 749 * This is a fib. But we'll allow new children to be added below 750 * a resumed device, even if the device hasn't been completed yet. 751 */ 752 dev->power.is_prepared = false; 753 754 if (!dev->power.is_suspended) 755 goto Unlock; 756 757 if (dev->pm_domain) { 758 info = "power domain "; 759 callback = pm_op(&dev->pm_domain->ops, state); 760 goto Driver; 761 } 762 763 if (dev->type && dev->type->pm) { 764 info = "type "; 765 callback = pm_op(dev->type->pm, state); 766 goto Driver; 767 } 768 769 if (dev->class) { 770 if (dev->class->pm) { 771 info = "class "; 772 callback = pm_op(dev->class->pm, state); 773 goto Driver; 774 } else if (dev->class->resume) { 775 info = "legacy class "; 776 callback = dev->class->resume; 777 goto End; 778 } 779 } 780 781 if (dev->bus) { 782 if (dev->bus->pm) { 783 info = "bus "; 784 callback = pm_op(dev->bus->pm, state); 785 } else if (dev->bus->resume) { 786 info = "legacy bus "; 787 callback = dev->bus->resume; 788 goto End; 789 } 790 } 791 792 Driver: 793 if (!callback && dev->driver && dev->driver->pm) { 794 info = "driver "; 795 callback = pm_op(dev->driver->pm, state); 796 } 797 798 End: 799 error = dpm_run_callback(callback, dev, state, info); 800 dev->power.is_suspended = false; 801 802 Unlock: 803 device_unlock(dev); 804 dpm_watchdog_clear(&wd); 805 806 Complete: 807 complete_all(&dev->power.completion); 808 809 TRACE_RESUME(error); 810 811 return error; 812 } 813 814 static void async_resume(void *data, async_cookie_t cookie) 815 { 816 struct device *dev = (struct device *)data; 817 int error; 818 819 error = device_resume(dev, pm_transition, true); 820 if (error) 821 pm_dev_err(dev, pm_transition, " async", error); 822 put_device(dev); 823 } 824 825 /** 826 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 827 * @state: PM transition of the system being carried out. 828 * 829 * Execute the appropriate "resume" callback for all devices whose status 830 * indicates that they are suspended. 831 */ 832 void dpm_resume(pm_message_t state) 833 { 834 struct device *dev; 835 ktime_t starttime = ktime_get(); 836 837 might_sleep(); 838 839 mutex_lock(&dpm_list_mtx); 840 pm_transition = state; 841 async_error = 0; 842 843 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 844 reinit_completion(&dev->power.completion); 845 if (is_async(dev)) { 846 get_device(dev); 847 async_schedule(async_resume, dev); 848 } 849 } 850 851 while (!list_empty(&dpm_suspended_list)) { 852 dev = to_device(dpm_suspended_list.next); 853 get_device(dev); 854 if (!is_async(dev)) { 855 int error; 856 857 mutex_unlock(&dpm_list_mtx); 858 859 error = device_resume(dev, state, false); 860 if (error) { 861 suspend_stats.failed_resume++; 862 dpm_save_failed_step(SUSPEND_RESUME); 863 dpm_save_failed_dev(dev_name(dev)); 864 pm_dev_err(dev, state, "", error); 865 } 866 867 mutex_lock(&dpm_list_mtx); 868 } 869 if (!list_empty(&dev->power.entry)) 870 list_move_tail(&dev->power.entry, &dpm_prepared_list); 871 put_device(dev); 872 } 873 mutex_unlock(&dpm_list_mtx); 874 async_synchronize_full(); 875 dpm_show_time(starttime, state, NULL); 876 877 cpufreq_resume(); 878 } 879 880 /** 881 * device_complete - Complete a PM transition for given device. 882 * @dev: Device to handle. 883 * @state: PM transition of the system being carried out. 884 */ 885 static void device_complete(struct device *dev, pm_message_t state) 886 { 887 void (*callback)(struct device *) = NULL; 888 char *info = NULL; 889 890 if (dev->power.syscore) 891 return; 892 893 device_lock(dev); 894 895 if (dev->pm_domain) { 896 info = "completing power domain "; 897 callback = dev->pm_domain->ops.complete; 898 } else if (dev->type && dev->type->pm) { 899 info = "completing type "; 900 callback = dev->type->pm->complete; 901 } else if (dev->class && dev->class->pm) { 902 info = "completing class "; 903 callback = dev->class->pm->complete; 904 } else if (dev->bus && dev->bus->pm) { 905 info = "completing bus "; 906 callback = dev->bus->pm->complete; 907 } 908 909 if (!callback && dev->driver && dev->driver->pm) { 910 info = "completing driver "; 911 callback = dev->driver->pm->complete; 912 } 913 914 if (callback) { 915 pm_dev_dbg(dev, state, info); 916 callback(dev); 917 } 918 919 device_unlock(dev); 920 921 pm_runtime_put(dev); 922 } 923 924 /** 925 * dpm_complete - Complete a PM transition for all non-sysdev devices. 926 * @state: PM transition of the system being carried out. 927 * 928 * Execute the ->complete() callbacks for all devices whose PM status is not 929 * DPM_ON (this allows new devices to be registered). 930 */ 931 void dpm_complete(pm_message_t state) 932 { 933 struct list_head list; 934 935 might_sleep(); 936 937 INIT_LIST_HEAD(&list); 938 mutex_lock(&dpm_list_mtx); 939 while (!list_empty(&dpm_prepared_list)) { 940 struct device *dev = to_device(dpm_prepared_list.prev); 941 942 get_device(dev); 943 dev->power.is_prepared = false; 944 list_move(&dev->power.entry, &list); 945 mutex_unlock(&dpm_list_mtx); 946 947 device_complete(dev, state); 948 949 mutex_lock(&dpm_list_mtx); 950 put_device(dev); 951 } 952 list_splice(&list, &dpm_list); 953 mutex_unlock(&dpm_list_mtx); 954 } 955 956 /** 957 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 958 * @state: PM transition of the system being carried out. 959 * 960 * Execute "resume" callbacks for all devices and complete the PM transition of 961 * the system. 962 */ 963 void dpm_resume_end(pm_message_t state) 964 { 965 dpm_resume(state); 966 dpm_complete(state); 967 } 968 EXPORT_SYMBOL_GPL(dpm_resume_end); 969 970 971 /*------------------------- Suspend routines -------------------------*/ 972 973 /** 974 * resume_event - Return a "resume" message for given "suspend" sleep state. 975 * @sleep_state: PM message representing a sleep state. 976 * 977 * Return a PM message representing the resume event corresponding to given 978 * sleep state. 979 */ 980 static pm_message_t resume_event(pm_message_t sleep_state) 981 { 982 switch (sleep_state.event) { 983 case PM_EVENT_SUSPEND: 984 return PMSG_RESUME; 985 case PM_EVENT_FREEZE: 986 case PM_EVENT_QUIESCE: 987 return PMSG_RECOVER; 988 case PM_EVENT_HIBERNATE: 989 return PMSG_RESTORE; 990 } 991 return PMSG_ON; 992 } 993 994 /** 995 * device_suspend_noirq - Execute a "late suspend" callback for given device. 996 * @dev: Device to handle. 997 * @state: PM transition of the system being carried out. 998 * 999 * The driver of @dev will not receive interrupts while this function is being 1000 * executed. 1001 */ 1002 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) 1003 { 1004 pm_callback_t callback = NULL; 1005 char *info = NULL; 1006 int error = 0; 1007 1008 if (async_error) 1009 goto Complete; 1010 1011 if (pm_wakeup_pending()) { 1012 async_error = -EBUSY; 1013 goto Complete; 1014 } 1015 1016 if (dev->power.syscore || dev->power.direct_complete) 1017 goto Complete; 1018 1019 dpm_wait_for_children(dev, async); 1020 1021 if (dev->pm_domain) { 1022 info = "noirq power domain "; 1023 callback = pm_noirq_op(&dev->pm_domain->ops, state); 1024 } else if (dev->type && dev->type->pm) { 1025 info = "noirq type "; 1026 callback = pm_noirq_op(dev->type->pm, state); 1027 } else if (dev->class && dev->class->pm) { 1028 info = "noirq class "; 1029 callback = pm_noirq_op(dev->class->pm, state); 1030 } else if (dev->bus && dev->bus->pm) { 1031 info = "noirq bus "; 1032 callback = pm_noirq_op(dev->bus->pm, state); 1033 } 1034 1035 if (!callback && dev->driver && dev->driver->pm) { 1036 info = "noirq driver "; 1037 callback = pm_noirq_op(dev->driver->pm, state); 1038 } 1039 1040 error = dpm_run_callback(callback, dev, state, info); 1041 if (!error) 1042 dev->power.is_noirq_suspended = true; 1043 else 1044 async_error = error; 1045 1046 Complete: 1047 complete_all(&dev->power.completion); 1048 return error; 1049 } 1050 1051 static void async_suspend_noirq(void *data, async_cookie_t cookie) 1052 { 1053 struct device *dev = (struct device *)data; 1054 int error; 1055 1056 error = __device_suspend_noirq(dev, pm_transition, true); 1057 if (error) { 1058 dpm_save_failed_dev(dev_name(dev)); 1059 pm_dev_err(dev, pm_transition, " async", error); 1060 } 1061 1062 put_device(dev); 1063 } 1064 1065 static int device_suspend_noirq(struct device *dev) 1066 { 1067 reinit_completion(&dev->power.completion); 1068 1069 if (pm_async_enabled && dev->power.async_suspend) { 1070 get_device(dev); 1071 async_schedule(async_suspend_noirq, dev); 1072 return 0; 1073 } 1074 return __device_suspend_noirq(dev, pm_transition, false); 1075 } 1076 1077 /** 1078 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1079 * @state: PM transition of the system being carried out. 1080 * 1081 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1082 * handlers for all non-sysdev devices. 1083 */ 1084 static int dpm_suspend_noirq(pm_message_t state) 1085 { 1086 ktime_t starttime = ktime_get(); 1087 int error = 0; 1088 1089 cpuidle_pause(); 1090 suspend_device_irqs(); 1091 mutex_lock(&dpm_list_mtx); 1092 pm_transition = state; 1093 async_error = 0; 1094 1095 while (!list_empty(&dpm_late_early_list)) { 1096 struct device *dev = to_device(dpm_late_early_list.prev); 1097 1098 get_device(dev); 1099 mutex_unlock(&dpm_list_mtx); 1100 1101 error = device_suspend_noirq(dev); 1102 1103 mutex_lock(&dpm_list_mtx); 1104 if (error) { 1105 pm_dev_err(dev, state, " noirq", error); 1106 dpm_save_failed_dev(dev_name(dev)); 1107 put_device(dev); 1108 break; 1109 } 1110 if (!list_empty(&dev->power.entry)) 1111 list_move(&dev->power.entry, &dpm_noirq_list); 1112 put_device(dev); 1113 1114 if (async_error) 1115 break; 1116 } 1117 mutex_unlock(&dpm_list_mtx); 1118 async_synchronize_full(); 1119 if (!error) 1120 error = async_error; 1121 1122 if (error) { 1123 suspend_stats.failed_suspend_noirq++; 1124 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1125 dpm_resume_noirq(resume_event(state)); 1126 } else { 1127 dpm_show_time(starttime, state, "noirq"); 1128 } 1129 return error; 1130 } 1131 1132 /** 1133 * device_suspend_late - Execute a "late suspend" callback for given device. 1134 * @dev: Device to handle. 1135 * @state: PM transition of the system being carried out. 1136 * 1137 * Runtime PM is disabled for @dev while this function is being executed. 1138 */ 1139 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) 1140 { 1141 pm_callback_t callback = NULL; 1142 char *info = NULL; 1143 int error = 0; 1144 1145 __pm_runtime_disable(dev, false); 1146 1147 if (async_error) 1148 goto Complete; 1149 1150 if (pm_wakeup_pending()) { 1151 async_error = -EBUSY; 1152 goto Complete; 1153 } 1154 1155 if (dev->power.syscore || dev->power.direct_complete) 1156 goto Complete; 1157 1158 dpm_wait_for_children(dev, async); 1159 1160 if (dev->pm_domain) { 1161 info = "late power domain "; 1162 callback = pm_late_early_op(&dev->pm_domain->ops, state); 1163 } else if (dev->type && dev->type->pm) { 1164 info = "late type "; 1165 callback = pm_late_early_op(dev->type->pm, state); 1166 } else if (dev->class && dev->class->pm) { 1167 info = "late class "; 1168 callback = pm_late_early_op(dev->class->pm, state); 1169 } else if (dev->bus && dev->bus->pm) { 1170 info = "late bus "; 1171 callback = pm_late_early_op(dev->bus->pm, state); 1172 } 1173 1174 if (!callback && dev->driver && dev->driver->pm) { 1175 info = "late driver "; 1176 callback = pm_late_early_op(dev->driver->pm, state); 1177 } 1178 1179 error = dpm_run_callback(callback, dev, state, info); 1180 if (!error) 1181 dev->power.is_late_suspended = true; 1182 else 1183 async_error = error; 1184 1185 Complete: 1186 complete_all(&dev->power.completion); 1187 return error; 1188 } 1189 1190 static void async_suspend_late(void *data, async_cookie_t cookie) 1191 { 1192 struct device *dev = (struct device *)data; 1193 int error; 1194 1195 error = __device_suspend_late(dev, pm_transition, true); 1196 if (error) { 1197 dpm_save_failed_dev(dev_name(dev)); 1198 pm_dev_err(dev, pm_transition, " async", error); 1199 } 1200 put_device(dev); 1201 } 1202 1203 static int device_suspend_late(struct device *dev) 1204 { 1205 reinit_completion(&dev->power.completion); 1206 1207 if (pm_async_enabled && dev->power.async_suspend) { 1208 get_device(dev); 1209 async_schedule(async_suspend_late, dev); 1210 return 0; 1211 } 1212 1213 return __device_suspend_late(dev, pm_transition, false); 1214 } 1215 1216 /** 1217 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 1218 * @state: PM transition of the system being carried out. 1219 */ 1220 static int dpm_suspend_late(pm_message_t state) 1221 { 1222 ktime_t starttime = ktime_get(); 1223 int error = 0; 1224 1225 mutex_lock(&dpm_list_mtx); 1226 pm_transition = state; 1227 async_error = 0; 1228 1229 while (!list_empty(&dpm_suspended_list)) { 1230 struct device *dev = to_device(dpm_suspended_list.prev); 1231 1232 get_device(dev); 1233 mutex_unlock(&dpm_list_mtx); 1234 1235 error = device_suspend_late(dev); 1236 1237 mutex_lock(&dpm_list_mtx); 1238 if (error) { 1239 pm_dev_err(dev, state, " late", error); 1240 dpm_save_failed_dev(dev_name(dev)); 1241 put_device(dev); 1242 break; 1243 } 1244 if (!list_empty(&dev->power.entry)) 1245 list_move(&dev->power.entry, &dpm_late_early_list); 1246 put_device(dev); 1247 1248 if (async_error) 1249 break; 1250 } 1251 mutex_unlock(&dpm_list_mtx); 1252 async_synchronize_full(); 1253 if (error) { 1254 suspend_stats.failed_suspend_late++; 1255 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1256 dpm_resume_early(resume_event(state)); 1257 } else { 1258 dpm_show_time(starttime, state, "late"); 1259 } 1260 return error; 1261 } 1262 1263 /** 1264 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 1265 * @state: PM transition of the system being carried out. 1266 */ 1267 int dpm_suspend_end(pm_message_t state) 1268 { 1269 int error = dpm_suspend_late(state); 1270 if (error) 1271 return error; 1272 1273 error = dpm_suspend_noirq(state); 1274 if (error) { 1275 dpm_resume_early(resume_event(state)); 1276 return error; 1277 } 1278 1279 return 0; 1280 } 1281 EXPORT_SYMBOL_GPL(dpm_suspend_end); 1282 1283 /** 1284 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 1285 * @dev: Device to suspend. 1286 * @state: PM transition of the system being carried out. 1287 * @cb: Suspend callback to execute. 1288 */ 1289 static int legacy_suspend(struct device *dev, pm_message_t state, 1290 int (*cb)(struct device *dev, pm_message_t state), 1291 char *info) 1292 { 1293 int error; 1294 ktime_t calltime; 1295 1296 calltime = initcall_debug_start(dev); 1297 1298 error = cb(dev, state); 1299 suspend_report_result(cb, error); 1300 1301 initcall_debug_report(dev, calltime, error, state, info); 1302 1303 return error; 1304 } 1305 1306 /** 1307 * device_suspend - Execute "suspend" callbacks for given device. 1308 * @dev: Device to handle. 1309 * @state: PM transition of the system being carried out. 1310 * @async: If true, the device is being suspended asynchronously. 1311 */ 1312 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1313 { 1314 pm_callback_t callback = NULL; 1315 char *info = NULL; 1316 int error = 0; 1317 DECLARE_DPM_WATCHDOG_ON_STACK(wd); 1318 1319 dpm_wait_for_children(dev, async); 1320 1321 if (async_error) 1322 goto Complete; 1323 1324 /* 1325 * If a device configured to wake up the system from sleep states 1326 * has been suspended at run time and there's a resume request pending 1327 * for it, this is equivalent to the device signaling wakeup, so the 1328 * system suspend operation should be aborted. 1329 */ 1330 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1331 pm_wakeup_event(dev, 0); 1332 1333 if (pm_wakeup_pending()) { 1334 async_error = -EBUSY; 1335 goto Complete; 1336 } 1337 1338 if (dev->power.syscore) 1339 goto Complete; 1340 1341 if (dev->power.direct_complete) { 1342 if (pm_runtime_status_suspended(dev)) { 1343 pm_runtime_disable(dev); 1344 if (pm_runtime_suspended_if_enabled(dev)) 1345 goto Complete; 1346 1347 pm_runtime_enable(dev); 1348 } 1349 dev->power.direct_complete = false; 1350 } 1351 1352 dpm_watchdog_set(&wd, dev); 1353 device_lock(dev); 1354 1355 if (dev->pm_domain) { 1356 info = "power domain "; 1357 callback = pm_op(&dev->pm_domain->ops, state); 1358 goto Run; 1359 } 1360 1361 if (dev->type && dev->type->pm) { 1362 info = "type "; 1363 callback = pm_op(dev->type->pm, state); 1364 goto Run; 1365 } 1366 1367 if (dev->class) { 1368 if (dev->class->pm) { 1369 info = "class "; 1370 callback = pm_op(dev->class->pm, state); 1371 goto Run; 1372 } else if (dev->class->suspend) { 1373 pm_dev_dbg(dev, state, "legacy class "); 1374 error = legacy_suspend(dev, state, dev->class->suspend, 1375 "legacy class "); 1376 goto End; 1377 } 1378 } 1379 1380 if (dev->bus) { 1381 if (dev->bus->pm) { 1382 info = "bus "; 1383 callback = pm_op(dev->bus->pm, state); 1384 } else if (dev->bus->suspend) { 1385 pm_dev_dbg(dev, state, "legacy bus "); 1386 error = legacy_suspend(dev, state, dev->bus->suspend, 1387 "legacy bus "); 1388 goto End; 1389 } 1390 } 1391 1392 Run: 1393 if (!callback && dev->driver && dev->driver->pm) { 1394 info = "driver "; 1395 callback = pm_op(dev->driver->pm, state); 1396 } 1397 1398 error = dpm_run_callback(callback, dev, state, info); 1399 1400 End: 1401 if (!error) { 1402 struct device *parent = dev->parent; 1403 1404 dev->power.is_suspended = true; 1405 if (parent) { 1406 spin_lock_irq(&parent->power.lock); 1407 1408 dev->parent->power.direct_complete = false; 1409 if (dev->power.wakeup_path 1410 && !dev->parent->power.ignore_children) 1411 dev->parent->power.wakeup_path = true; 1412 1413 spin_unlock_irq(&parent->power.lock); 1414 } 1415 } 1416 1417 device_unlock(dev); 1418 dpm_watchdog_clear(&wd); 1419 1420 Complete: 1421 complete_all(&dev->power.completion); 1422 if (error) 1423 async_error = error; 1424 1425 return error; 1426 } 1427 1428 static void async_suspend(void *data, async_cookie_t cookie) 1429 { 1430 struct device *dev = (struct device *)data; 1431 int error; 1432 1433 error = __device_suspend(dev, pm_transition, true); 1434 if (error) { 1435 dpm_save_failed_dev(dev_name(dev)); 1436 pm_dev_err(dev, pm_transition, " async", error); 1437 } 1438 1439 put_device(dev); 1440 } 1441 1442 static int device_suspend(struct device *dev) 1443 { 1444 reinit_completion(&dev->power.completion); 1445 1446 if (pm_async_enabled && dev->power.async_suspend) { 1447 get_device(dev); 1448 async_schedule(async_suspend, dev); 1449 return 0; 1450 } 1451 1452 return __device_suspend(dev, pm_transition, false); 1453 } 1454 1455 /** 1456 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1457 * @state: PM transition of the system being carried out. 1458 */ 1459 int dpm_suspend(pm_message_t state) 1460 { 1461 ktime_t starttime = ktime_get(); 1462 int error = 0; 1463 1464 might_sleep(); 1465 1466 cpufreq_suspend(); 1467 1468 mutex_lock(&dpm_list_mtx); 1469 pm_transition = state; 1470 async_error = 0; 1471 while (!list_empty(&dpm_prepared_list)) { 1472 struct device *dev = to_device(dpm_prepared_list.prev); 1473 1474 get_device(dev); 1475 mutex_unlock(&dpm_list_mtx); 1476 1477 error = device_suspend(dev); 1478 1479 mutex_lock(&dpm_list_mtx); 1480 if (error) { 1481 pm_dev_err(dev, state, "", error); 1482 dpm_save_failed_dev(dev_name(dev)); 1483 put_device(dev); 1484 break; 1485 } 1486 if (!list_empty(&dev->power.entry)) 1487 list_move(&dev->power.entry, &dpm_suspended_list); 1488 put_device(dev); 1489 if (async_error) 1490 break; 1491 } 1492 mutex_unlock(&dpm_list_mtx); 1493 async_synchronize_full(); 1494 if (!error) 1495 error = async_error; 1496 if (error) { 1497 suspend_stats.failed_suspend++; 1498 dpm_save_failed_step(SUSPEND_SUSPEND); 1499 } else 1500 dpm_show_time(starttime, state, NULL); 1501 return error; 1502 } 1503 1504 /** 1505 * device_prepare - Prepare a device for system power transition. 1506 * @dev: Device to handle. 1507 * @state: PM transition of the system being carried out. 1508 * 1509 * Execute the ->prepare() callback(s) for given device. No new children of the 1510 * device may be registered after this function has returned. 1511 */ 1512 static int device_prepare(struct device *dev, pm_message_t state) 1513 { 1514 int (*callback)(struct device *) = NULL; 1515 char *info = NULL; 1516 int ret = 0; 1517 1518 if (dev->power.syscore) 1519 return 0; 1520 1521 /* 1522 * If a device's parent goes into runtime suspend at the wrong time, 1523 * it won't be possible to resume the device. To prevent this we 1524 * block runtime suspend here, during the prepare phase, and allow 1525 * it again during the complete phase. 1526 */ 1527 pm_runtime_get_noresume(dev); 1528 1529 device_lock(dev); 1530 1531 dev->power.wakeup_path = device_may_wakeup(dev); 1532 1533 if (dev->pm_domain) { 1534 info = "preparing power domain "; 1535 callback = dev->pm_domain->ops.prepare; 1536 } else if (dev->type && dev->type->pm) { 1537 info = "preparing type "; 1538 callback = dev->type->pm->prepare; 1539 } else if (dev->class && dev->class->pm) { 1540 info = "preparing class "; 1541 callback = dev->class->pm->prepare; 1542 } else if (dev->bus && dev->bus->pm) { 1543 info = "preparing bus "; 1544 callback = dev->bus->pm->prepare; 1545 } 1546 1547 if (!callback && dev->driver && dev->driver->pm) { 1548 info = "preparing driver "; 1549 callback = dev->driver->pm->prepare; 1550 } 1551 1552 if (callback) 1553 ret = callback(dev); 1554 1555 device_unlock(dev); 1556 1557 if (ret < 0) { 1558 suspend_report_result(callback, ret); 1559 pm_runtime_put(dev); 1560 return ret; 1561 } 1562 /* 1563 * A positive return value from ->prepare() means "this device appears 1564 * to be runtime-suspended and its state is fine, so if it really is 1565 * runtime-suspended, you can leave it in that state provided that you 1566 * will do the same thing with all of its descendants". This only 1567 * applies to suspend transitions, however. 1568 */ 1569 spin_lock_irq(&dev->power.lock); 1570 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; 1571 spin_unlock_irq(&dev->power.lock); 1572 return 0; 1573 } 1574 1575 /** 1576 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1577 * @state: PM transition of the system being carried out. 1578 * 1579 * Execute the ->prepare() callback(s) for all devices. 1580 */ 1581 int dpm_prepare(pm_message_t state) 1582 { 1583 int error = 0; 1584 1585 might_sleep(); 1586 1587 mutex_lock(&dpm_list_mtx); 1588 while (!list_empty(&dpm_list)) { 1589 struct device *dev = to_device(dpm_list.next); 1590 1591 get_device(dev); 1592 mutex_unlock(&dpm_list_mtx); 1593 1594 error = device_prepare(dev, state); 1595 1596 mutex_lock(&dpm_list_mtx); 1597 if (error) { 1598 if (error == -EAGAIN) { 1599 put_device(dev); 1600 error = 0; 1601 continue; 1602 } 1603 printk(KERN_INFO "PM: Device %s not prepared " 1604 "for power transition: code %d\n", 1605 dev_name(dev), error); 1606 put_device(dev); 1607 break; 1608 } 1609 dev->power.is_prepared = true; 1610 if (!list_empty(&dev->power.entry)) 1611 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1612 put_device(dev); 1613 } 1614 mutex_unlock(&dpm_list_mtx); 1615 return error; 1616 } 1617 1618 /** 1619 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1620 * @state: PM transition of the system being carried out. 1621 * 1622 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1623 * callbacks for them. 1624 */ 1625 int dpm_suspend_start(pm_message_t state) 1626 { 1627 int error; 1628 1629 error = dpm_prepare(state); 1630 if (error) { 1631 suspend_stats.failed_prepare++; 1632 dpm_save_failed_step(SUSPEND_PREPARE); 1633 } else 1634 error = dpm_suspend(state); 1635 return error; 1636 } 1637 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1638 1639 void __suspend_report_result(const char *function, void *fn, int ret) 1640 { 1641 if (ret) 1642 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1643 } 1644 EXPORT_SYMBOL_GPL(__suspend_report_result); 1645 1646 /** 1647 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1648 * @dev: Device to wait for. 1649 * @subordinate: Device that needs to wait for @dev. 1650 */ 1651 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1652 { 1653 dpm_wait(dev, subordinate->power.async_suspend); 1654 return async_error; 1655 } 1656 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1657 1658 /** 1659 * dpm_for_each_dev - device iterator. 1660 * @data: data for the callback. 1661 * @fn: function to be called for each device. 1662 * 1663 * Iterate over devices in dpm_list, and call @fn for each device, 1664 * passing it @data. 1665 */ 1666 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 1667 { 1668 struct device *dev; 1669 1670 if (!fn) 1671 return; 1672 1673 device_pm_lock(); 1674 list_for_each_entry(dev, &dpm_list, power.entry) 1675 fn(dev, data); 1676 device_pm_unlock(); 1677 } 1678 EXPORT_SYMBOL_GPL(dpm_for_each_dev); 1679