1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/resume-trace.h> 27 #include <linux/interrupt.h> 28 #include <linux/sched.h> 29 #include <linux/async.h> 30 #include <linux/suspend.h> 31 32 #include "../base.h" 33 #include "power.h" 34 35 /* 36 * The entries in the dpm_list list are in a depth first order, simply 37 * because children are guaranteed to be discovered after parents, and 38 * are inserted at the back of the list on discovery. 39 * 40 * Since device_pm_add() may be called with a device lock held, 41 * we must never try to acquire a device lock while holding 42 * dpm_list_mutex. 43 */ 44 45 LIST_HEAD(dpm_list); 46 LIST_HEAD(dpm_prepared_list); 47 LIST_HEAD(dpm_suspended_list); 48 LIST_HEAD(dpm_noirq_list); 49 50 struct suspend_stats suspend_stats; 51 static DEFINE_MUTEX(dpm_list_mtx); 52 static pm_message_t pm_transition; 53 54 static int async_error; 55 56 /** 57 * device_pm_init - Initialize the PM-related part of a device object. 58 * @dev: Device object being initialized. 59 */ 60 void device_pm_init(struct device *dev) 61 { 62 dev->power.is_prepared = false; 63 dev->power.is_suspended = false; 64 init_completion(&dev->power.completion); 65 complete_all(&dev->power.completion); 66 dev->power.wakeup = NULL; 67 spin_lock_init(&dev->power.lock); 68 pm_runtime_init(dev); 69 INIT_LIST_HEAD(&dev->power.entry); 70 dev->power.power_state = PMSG_INVALID; 71 } 72 73 /** 74 * device_pm_lock - Lock the list of active devices used by the PM core. 75 */ 76 void device_pm_lock(void) 77 { 78 mutex_lock(&dpm_list_mtx); 79 } 80 81 /** 82 * device_pm_unlock - Unlock the list of active devices used by the PM core. 83 */ 84 void device_pm_unlock(void) 85 { 86 mutex_unlock(&dpm_list_mtx); 87 } 88 89 /** 90 * device_pm_add - Add a device to the PM core's list of active devices. 91 * @dev: Device to add to the list. 92 */ 93 void device_pm_add(struct device *dev) 94 { 95 pr_debug("PM: Adding info for %s:%s\n", 96 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 97 mutex_lock(&dpm_list_mtx); 98 if (dev->parent && dev->parent->power.is_prepared) 99 dev_warn(dev, "parent %s should not be sleeping\n", 100 dev_name(dev->parent)); 101 list_add_tail(&dev->power.entry, &dpm_list); 102 dev_pm_qos_constraints_init(dev); 103 mutex_unlock(&dpm_list_mtx); 104 } 105 106 /** 107 * device_pm_remove - Remove a device from the PM core's list of active devices. 108 * @dev: Device to be removed from the list. 109 */ 110 void device_pm_remove(struct device *dev) 111 { 112 pr_debug("PM: Removing info for %s:%s\n", 113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 114 complete_all(&dev->power.completion); 115 mutex_lock(&dpm_list_mtx); 116 dev_pm_qos_constraints_destroy(dev); 117 list_del_init(&dev->power.entry); 118 mutex_unlock(&dpm_list_mtx); 119 device_wakeup_disable(dev); 120 pm_runtime_remove(dev); 121 } 122 123 /** 124 * device_pm_move_before - Move device in the PM core's list of active devices. 125 * @deva: Device to move in dpm_list. 126 * @devb: Device @deva should come before. 127 */ 128 void device_pm_move_before(struct device *deva, struct device *devb) 129 { 130 pr_debug("PM: Moving %s:%s before %s:%s\n", 131 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 132 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 133 /* Delete deva from dpm_list and reinsert before devb. */ 134 list_move_tail(&deva->power.entry, &devb->power.entry); 135 } 136 137 /** 138 * device_pm_move_after - Move device in the PM core's list of active devices. 139 * @deva: Device to move in dpm_list. 140 * @devb: Device @deva should come after. 141 */ 142 void device_pm_move_after(struct device *deva, struct device *devb) 143 { 144 pr_debug("PM: Moving %s:%s after %s:%s\n", 145 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 146 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 147 /* Delete deva from dpm_list and reinsert after devb. */ 148 list_move(&deva->power.entry, &devb->power.entry); 149 } 150 151 /** 152 * device_pm_move_last - Move device to end of the PM core's list of devices. 153 * @dev: Device to move in dpm_list. 154 */ 155 void device_pm_move_last(struct device *dev) 156 { 157 pr_debug("PM: Moving %s:%s to end of list\n", 158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 159 list_move_tail(&dev->power.entry, &dpm_list); 160 } 161 162 static ktime_t initcall_debug_start(struct device *dev) 163 { 164 ktime_t calltime = ktime_set(0, 0); 165 166 if (initcall_debug) { 167 pr_info("calling %s+ @ %i\n", 168 dev_name(dev), task_pid_nr(current)); 169 calltime = ktime_get(); 170 } 171 172 return calltime; 173 } 174 175 static void initcall_debug_report(struct device *dev, ktime_t calltime, 176 int error) 177 { 178 ktime_t delta, rettime; 179 180 if (initcall_debug) { 181 rettime = ktime_get(); 182 delta = ktime_sub(rettime, calltime); 183 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 184 error, (unsigned long long)ktime_to_ns(delta) >> 10); 185 } 186 } 187 188 /** 189 * dpm_wait - Wait for a PM operation to complete. 190 * @dev: Device to wait for. 191 * @async: If unset, wait only if the device's power.async_suspend flag is set. 192 */ 193 static void dpm_wait(struct device *dev, bool async) 194 { 195 if (!dev) 196 return; 197 198 if (async || (pm_async_enabled && dev->power.async_suspend)) 199 wait_for_completion(&dev->power.completion); 200 } 201 202 static int dpm_wait_fn(struct device *dev, void *async_ptr) 203 { 204 dpm_wait(dev, *((bool *)async_ptr)); 205 return 0; 206 } 207 208 static void dpm_wait_for_children(struct device *dev, bool async) 209 { 210 device_for_each_child(dev, &async, dpm_wait_fn); 211 } 212 213 /** 214 * pm_op - Execute the PM operation appropriate for given PM event. 215 * @dev: Device to handle. 216 * @ops: PM operations to choose from. 217 * @state: PM transition of the system being carried out. 218 */ 219 static int pm_op(struct device *dev, 220 const struct dev_pm_ops *ops, 221 pm_message_t state) 222 { 223 int error = 0; 224 ktime_t calltime; 225 226 calltime = initcall_debug_start(dev); 227 228 switch (state.event) { 229 #ifdef CONFIG_SUSPEND 230 case PM_EVENT_SUSPEND: 231 if (ops->suspend) { 232 error = ops->suspend(dev); 233 suspend_report_result(ops->suspend, error); 234 } 235 break; 236 case PM_EVENT_RESUME: 237 if (ops->resume) { 238 error = ops->resume(dev); 239 suspend_report_result(ops->resume, error); 240 } 241 break; 242 #endif /* CONFIG_SUSPEND */ 243 #ifdef CONFIG_HIBERNATE_CALLBACKS 244 case PM_EVENT_FREEZE: 245 case PM_EVENT_QUIESCE: 246 if (ops->freeze) { 247 error = ops->freeze(dev); 248 suspend_report_result(ops->freeze, error); 249 } 250 break; 251 case PM_EVENT_HIBERNATE: 252 if (ops->poweroff) { 253 error = ops->poweroff(dev); 254 suspend_report_result(ops->poweroff, error); 255 } 256 break; 257 case PM_EVENT_THAW: 258 case PM_EVENT_RECOVER: 259 if (ops->thaw) { 260 error = ops->thaw(dev); 261 suspend_report_result(ops->thaw, error); 262 } 263 break; 264 case PM_EVENT_RESTORE: 265 if (ops->restore) { 266 error = ops->restore(dev); 267 suspend_report_result(ops->restore, error); 268 } 269 break; 270 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 271 default: 272 error = -EINVAL; 273 } 274 275 initcall_debug_report(dev, calltime, error); 276 277 return error; 278 } 279 280 /** 281 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 282 * @dev: Device to handle. 283 * @ops: PM operations to choose from. 284 * @state: PM transition of the system being carried out. 285 * 286 * The driver of @dev will not receive interrupts while this function is being 287 * executed. 288 */ 289 static int pm_noirq_op(struct device *dev, 290 const struct dev_pm_ops *ops, 291 pm_message_t state) 292 { 293 int error = 0; 294 ktime_t calltime = ktime_set(0, 0), delta, rettime; 295 296 if (initcall_debug) { 297 pr_info("calling %s+ @ %i, parent: %s\n", 298 dev_name(dev), task_pid_nr(current), 299 dev->parent ? dev_name(dev->parent) : "none"); 300 calltime = ktime_get(); 301 } 302 303 switch (state.event) { 304 #ifdef CONFIG_SUSPEND 305 case PM_EVENT_SUSPEND: 306 if (ops->suspend_noirq) { 307 error = ops->suspend_noirq(dev); 308 suspend_report_result(ops->suspend_noirq, error); 309 } 310 break; 311 case PM_EVENT_RESUME: 312 if (ops->resume_noirq) { 313 error = ops->resume_noirq(dev); 314 suspend_report_result(ops->resume_noirq, error); 315 } 316 break; 317 #endif /* CONFIG_SUSPEND */ 318 #ifdef CONFIG_HIBERNATE_CALLBACKS 319 case PM_EVENT_FREEZE: 320 case PM_EVENT_QUIESCE: 321 if (ops->freeze_noirq) { 322 error = ops->freeze_noirq(dev); 323 suspend_report_result(ops->freeze_noirq, error); 324 } 325 break; 326 case PM_EVENT_HIBERNATE: 327 if (ops->poweroff_noirq) { 328 error = ops->poweroff_noirq(dev); 329 suspend_report_result(ops->poweroff_noirq, error); 330 } 331 break; 332 case PM_EVENT_THAW: 333 case PM_EVENT_RECOVER: 334 if (ops->thaw_noirq) { 335 error = ops->thaw_noirq(dev); 336 suspend_report_result(ops->thaw_noirq, error); 337 } 338 break; 339 case PM_EVENT_RESTORE: 340 if (ops->restore_noirq) { 341 error = ops->restore_noirq(dev); 342 suspend_report_result(ops->restore_noirq, error); 343 } 344 break; 345 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 346 default: 347 error = -EINVAL; 348 } 349 350 if (initcall_debug) { 351 rettime = ktime_get(); 352 delta = ktime_sub(rettime, calltime); 353 printk("initcall %s_i+ returned %d after %Ld usecs\n", 354 dev_name(dev), error, 355 (unsigned long long)ktime_to_ns(delta) >> 10); 356 } 357 358 return error; 359 } 360 361 static char *pm_verb(int event) 362 { 363 switch (event) { 364 case PM_EVENT_SUSPEND: 365 return "suspend"; 366 case PM_EVENT_RESUME: 367 return "resume"; 368 case PM_EVENT_FREEZE: 369 return "freeze"; 370 case PM_EVENT_QUIESCE: 371 return "quiesce"; 372 case PM_EVENT_HIBERNATE: 373 return "hibernate"; 374 case PM_EVENT_THAW: 375 return "thaw"; 376 case PM_EVENT_RESTORE: 377 return "restore"; 378 case PM_EVENT_RECOVER: 379 return "recover"; 380 default: 381 return "(unknown PM event)"; 382 } 383 } 384 385 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 386 { 387 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 388 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 389 ", may wakeup" : ""); 390 } 391 392 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 393 int error) 394 { 395 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 396 dev_name(dev), pm_verb(state.event), info, error); 397 } 398 399 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 400 { 401 ktime_t calltime; 402 u64 usecs64; 403 int usecs; 404 405 calltime = ktime_get(); 406 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 407 do_div(usecs64, NSEC_PER_USEC); 408 usecs = usecs64; 409 if (usecs == 0) 410 usecs = 1; 411 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 412 info ?: "", info ? " " : "", pm_verb(state.event), 413 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 414 } 415 416 /*------------------------- Resume routines -------------------------*/ 417 418 /** 419 * device_resume_noirq - Execute an "early resume" callback for given device. 420 * @dev: Device to handle. 421 * @state: PM transition of the system being carried out. 422 * 423 * The driver of @dev will not receive interrupts while this function is being 424 * executed. 425 */ 426 static int device_resume_noirq(struct device *dev, pm_message_t state) 427 { 428 int error = 0; 429 430 TRACE_DEVICE(dev); 431 TRACE_RESUME(0); 432 433 if (dev->pm_domain) { 434 pm_dev_dbg(dev, state, "EARLY power domain "); 435 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 436 } else if (dev->type && dev->type->pm) { 437 pm_dev_dbg(dev, state, "EARLY type "); 438 error = pm_noirq_op(dev, dev->type->pm, state); 439 } else if (dev->class && dev->class->pm) { 440 pm_dev_dbg(dev, state, "EARLY class "); 441 error = pm_noirq_op(dev, dev->class->pm, state); 442 } else if (dev->bus && dev->bus->pm) { 443 pm_dev_dbg(dev, state, "EARLY "); 444 error = pm_noirq_op(dev, dev->bus->pm, state); 445 } 446 447 TRACE_RESUME(error); 448 return error; 449 } 450 451 /** 452 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 453 * @state: PM transition of the system being carried out. 454 * 455 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 456 * enable device drivers to receive interrupts. 457 */ 458 void dpm_resume_noirq(pm_message_t state) 459 { 460 ktime_t starttime = ktime_get(); 461 462 mutex_lock(&dpm_list_mtx); 463 while (!list_empty(&dpm_noirq_list)) { 464 struct device *dev = to_device(dpm_noirq_list.next); 465 int error; 466 467 get_device(dev); 468 list_move_tail(&dev->power.entry, &dpm_suspended_list); 469 mutex_unlock(&dpm_list_mtx); 470 471 error = device_resume_noirq(dev, state); 472 if (error) { 473 suspend_stats.failed_resume_noirq++; 474 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 475 dpm_save_failed_dev(dev_name(dev)); 476 pm_dev_err(dev, state, " early", error); 477 } 478 479 mutex_lock(&dpm_list_mtx); 480 put_device(dev); 481 } 482 mutex_unlock(&dpm_list_mtx); 483 dpm_show_time(starttime, state, "early"); 484 resume_device_irqs(); 485 } 486 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 487 488 /** 489 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 490 * @dev: Device to resume. 491 * @cb: Resume callback to execute. 492 */ 493 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 494 { 495 int error; 496 ktime_t calltime; 497 498 calltime = initcall_debug_start(dev); 499 500 error = cb(dev); 501 suspend_report_result(cb, error); 502 503 initcall_debug_report(dev, calltime, error); 504 505 return error; 506 } 507 508 /** 509 * device_resume - Execute "resume" callbacks for given device. 510 * @dev: Device to handle. 511 * @state: PM transition of the system being carried out. 512 * @async: If true, the device is being resumed asynchronously. 513 */ 514 static int device_resume(struct device *dev, pm_message_t state, bool async) 515 { 516 int error = 0; 517 bool put = false; 518 519 TRACE_DEVICE(dev); 520 TRACE_RESUME(0); 521 522 dpm_wait(dev->parent, async); 523 device_lock(dev); 524 525 /* 526 * This is a fib. But we'll allow new children to be added below 527 * a resumed device, even if the device hasn't been completed yet. 528 */ 529 dev->power.is_prepared = false; 530 531 if (!dev->power.is_suspended) 532 goto Unlock; 533 534 pm_runtime_enable(dev); 535 put = true; 536 537 if (dev->pm_domain) { 538 pm_dev_dbg(dev, state, "power domain "); 539 error = pm_op(dev, &dev->pm_domain->ops, state); 540 goto End; 541 } 542 543 if (dev->type && dev->type->pm) { 544 pm_dev_dbg(dev, state, "type "); 545 error = pm_op(dev, dev->type->pm, state); 546 goto End; 547 } 548 549 if (dev->class) { 550 if (dev->class->pm) { 551 pm_dev_dbg(dev, state, "class "); 552 error = pm_op(dev, dev->class->pm, state); 553 goto End; 554 } else if (dev->class->resume) { 555 pm_dev_dbg(dev, state, "legacy class "); 556 error = legacy_resume(dev, dev->class->resume); 557 goto End; 558 } 559 } 560 561 if (dev->bus) { 562 if (dev->bus->pm) { 563 pm_dev_dbg(dev, state, ""); 564 error = pm_op(dev, dev->bus->pm, state); 565 } else if (dev->bus->resume) { 566 pm_dev_dbg(dev, state, "legacy "); 567 error = legacy_resume(dev, dev->bus->resume); 568 } 569 } 570 571 End: 572 dev->power.is_suspended = false; 573 574 Unlock: 575 device_unlock(dev); 576 complete_all(&dev->power.completion); 577 578 TRACE_RESUME(error); 579 580 if (put) 581 pm_runtime_put_sync(dev); 582 583 return error; 584 } 585 586 static void async_resume(void *data, async_cookie_t cookie) 587 { 588 struct device *dev = (struct device *)data; 589 int error; 590 591 error = device_resume(dev, pm_transition, true); 592 if (error) 593 pm_dev_err(dev, pm_transition, " async", error); 594 put_device(dev); 595 } 596 597 static bool is_async(struct device *dev) 598 { 599 return dev->power.async_suspend && pm_async_enabled 600 && !pm_trace_is_enabled(); 601 } 602 603 /** 604 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 605 * @state: PM transition of the system being carried out. 606 * 607 * Execute the appropriate "resume" callback for all devices whose status 608 * indicates that they are suspended. 609 */ 610 void dpm_resume(pm_message_t state) 611 { 612 struct device *dev; 613 ktime_t starttime = ktime_get(); 614 615 might_sleep(); 616 617 mutex_lock(&dpm_list_mtx); 618 pm_transition = state; 619 async_error = 0; 620 621 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 622 INIT_COMPLETION(dev->power.completion); 623 if (is_async(dev)) { 624 get_device(dev); 625 async_schedule(async_resume, dev); 626 } 627 } 628 629 while (!list_empty(&dpm_suspended_list)) { 630 dev = to_device(dpm_suspended_list.next); 631 get_device(dev); 632 if (!is_async(dev)) { 633 int error; 634 635 mutex_unlock(&dpm_list_mtx); 636 637 error = device_resume(dev, state, false); 638 if (error) { 639 suspend_stats.failed_resume++; 640 dpm_save_failed_step(SUSPEND_RESUME); 641 dpm_save_failed_dev(dev_name(dev)); 642 pm_dev_err(dev, state, "", error); 643 } 644 645 mutex_lock(&dpm_list_mtx); 646 } 647 if (!list_empty(&dev->power.entry)) 648 list_move_tail(&dev->power.entry, &dpm_prepared_list); 649 put_device(dev); 650 } 651 mutex_unlock(&dpm_list_mtx); 652 async_synchronize_full(); 653 dpm_show_time(starttime, state, NULL); 654 } 655 656 /** 657 * device_complete - Complete a PM transition for given device. 658 * @dev: Device to handle. 659 * @state: PM transition of the system being carried out. 660 */ 661 static void device_complete(struct device *dev, pm_message_t state) 662 { 663 device_lock(dev); 664 665 if (dev->pm_domain) { 666 pm_dev_dbg(dev, state, "completing power domain "); 667 if (dev->pm_domain->ops.complete) 668 dev->pm_domain->ops.complete(dev); 669 } else if (dev->type && dev->type->pm) { 670 pm_dev_dbg(dev, state, "completing type "); 671 if (dev->type->pm->complete) 672 dev->type->pm->complete(dev); 673 } else if (dev->class && dev->class->pm) { 674 pm_dev_dbg(dev, state, "completing class "); 675 if (dev->class->pm->complete) 676 dev->class->pm->complete(dev); 677 } else if (dev->bus && dev->bus->pm) { 678 pm_dev_dbg(dev, state, "completing "); 679 if (dev->bus->pm->complete) 680 dev->bus->pm->complete(dev); 681 } 682 683 device_unlock(dev); 684 } 685 686 /** 687 * dpm_complete - Complete a PM transition for all non-sysdev devices. 688 * @state: PM transition of the system being carried out. 689 * 690 * Execute the ->complete() callbacks for all devices whose PM status is not 691 * DPM_ON (this allows new devices to be registered). 692 */ 693 void dpm_complete(pm_message_t state) 694 { 695 struct list_head list; 696 697 might_sleep(); 698 699 INIT_LIST_HEAD(&list); 700 mutex_lock(&dpm_list_mtx); 701 while (!list_empty(&dpm_prepared_list)) { 702 struct device *dev = to_device(dpm_prepared_list.prev); 703 704 get_device(dev); 705 dev->power.is_prepared = false; 706 list_move(&dev->power.entry, &list); 707 mutex_unlock(&dpm_list_mtx); 708 709 device_complete(dev, state); 710 711 mutex_lock(&dpm_list_mtx); 712 put_device(dev); 713 } 714 list_splice(&list, &dpm_list); 715 mutex_unlock(&dpm_list_mtx); 716 } 717 718 /** 719 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 720 * @state: PM transition of the system being carried out. 721 * 722 * Execute "resume" callbacks for all devices and complete the PM transition of 723 * the system. 724 */ 725 void dpm_resume_end(pm_message_t state) 726 { 727 dpm_resume(state); 728 dpm_complete(state); 729 } 730 EXPORT_SYMBOL_GPL(dpm_resume_end); 731 732 733 /*------------------------- Suspend routines -------------------------*/ 734 735 /** 736 * resume_event - Return a "resume" message for given "suspend" sleep state. 737 * @sleep_state: PM message representing a sleep state. 738 * 739 * Return a PM message representing the resume event corresponding to given 740 * sleep state. 741 */ 742 static pm_message_t resume_event(pm_message_t sleep_state) 743 { 744 switch (sleep_state.event) { 745 case PM_EVENT_SUSPEND: 746 return PMSG_RESUME; 747 case PM_EVENT_FREEZE: 748 case PM_EVENT_QUIESCE: 749 return PMSG_RECOVER; 750 case PM_EVENT_HIBERNATE: 751 return PMSG_RESTORE; 752 } 753 return PMSG_ON; 754 } 755 756 /** 757 * device_suspend_noirq - Execute a "late suspend" callback for given device. 758 * @dev: Device to handle. 759 * @state: PM transition of the system being carried out. 760 * 761 * The driver of @dev will not receive interrupts while this function is being 762 * executed. 763 */ 764 static int device_suspend_noirq(struct device *dev, pm_message_t state) 765 { 766 int error; 767 768 if (dev->pm_domain) { 769 pm_dev_dbg(dev, state, "LATE power domain "); 770 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 771 if (error) 772 return error; 773 } else if (dev->type && dev->type->pm) { 774 pm_dev_dbg(dev, state, "LATE type "); 775 error = pm_noirq_op(dev, dev->type->pm, state); 776 if (error) 777 return error; 778 } else if (dev->class && dev->class->pm) { 779 pm_dev_dbg(dev, state, "LATE class "); 780 error = pm_noirq_op(dev, dev->class->pm, state); 781 if (error) 782 return error; 783 } else if (dev->bus && dev->bus->pm) { 784 pm_dev_dbg(dev, state, "LATE "); 785 error = pm_noirq_op(dev, dev->bus->pm, state); 786 if (error) 787 return error; 788 } 789 790 return 0; 791 } 792 793 /** 794 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 795 * @state: PM transition of the system being carried out. 796 * 797 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 798 * handlers for all non-sysdev devices. 799 */ 800 int dpm_suspend_noirq(pm_message_t state) 801 { 802 ktime_t starttime = ktime_get(); 803 int error = 0; 804 805 suspend_device_irqs(); 806 mutex_lock(&dpm_list_mtx); 807 while (!list_empty(&dpm_suspended_list)) { 808 struct device *dev = to_device(dpm_suspended_list.prev); 809 810 get_device(dev); 811 mutex_unlock(&dpm_list_mtx); 812 813 error = device_suspend_noirq(dev, state); 814 815 mutex_lock(&dpm_list_mtx); 816 if (error) { 817 pm_dev_err(dev, state, " late", error); 818 suspend_stats.failed_suspend_noirq++; 819 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 820 dpm_save_failed_dev(dev_name(dev)); 821 put_device(dev); 822 break; 823 } 824 if (!list_empty(&dev->power.entry)) 825 list_move(&dev->power.entry, &dpm_noirq_list); 826 put_device(dev); 827 } 828 mutex_unlock(&dpm_list_mtx); 829 if (error) 830 dpm_resume_noirq(resume_event(state)); 831 else 832 dpm_show_time(starttime, state, "late"); 833 return error; 834 } 835 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 836 837 /** 838 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 839 * @dev: Device to suspend. 840 * @state: PM transition of the system being carried out. 841 * @cb: Suspend callback to execute. 842 */ 843 static int legacy_suspend(struct device *dev, pm_message_t state, 844 int (*cb)(struct device *dev, pm_message_t state)) 845 { 846 int error; 847 ktime_t calltime; 848 849 calltime = initcall_debug_start(dev); 850 851 error = cb(dev, state); 852 suspend_report_result(cb, error); 853 854 initcall_debug_report(dev, calltime, error); 855 856 return error; 857 } 858 859 /** 860 * device_suspend - Execute "suspend" callbacks for given device. 861 * @dev: Device to handle. 862 * @state: PM transition of the system being carried out. 863 * @async: If true, the device is being suspended asynchronously. 864 */ 865 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 866 { 867 int error = 0; 868 869 dpm_wait_for_children(dev, async); 870 871 if (async_error) 872 return 0; 873 874 pm_runtime_get_noresume(dev); 875 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 876 pm_wakeup_event(dev, 0); 877 878 if (pm_wakeup_pending()) { 879 pm_runtime_put_sync(dev); 880 async_error = -EBUSY; 881 return 0; 882 } 883 884 device_lock(dev); 885 886 if (dev->pm_domain) { 887 pm_dev_dbg(dev, state, "power domain "); 888 error = pm_op(dev, &dev->pm_domain->ops, state); 889 goto End; 890 } 891 892 if (dev->type && dev->type->pm) { 893 pm_dev_dbg(dev, state, "type "); 894 error = pm_op(dev, dev->type->pm, state); 895 goto End; 896 } 897 898 if (dev->class) { 899 if (dev->class->pm) { 900 pm_dev_dbg(dev, state, "class "); 901 error = pm_op(dev, dev->class->pm, state); 902 goto End; 903 } else if (dev->class->suspend) { 904 pm_dev_dbg(dev, state, "legacy class "); 905 error = legacy_suspend(dev, state, dev->class->suspend); 906 goto End; 907 } 908 } 909 910 if (dev->bus) { 911 if (dev->bus->pm) { 912 pm_dev_dbg(dev, state, ""); 913 error = pm_op(dev, dev->bus->pm, state); 914 } else if (dev->bus->suspend) { 915 pm_dev_dbg(dev, state, "legacy "); 916 error = legacy_suspend(dev, state, dev->bus->suspend); 917 } 918 } 919 920 End: 921 if (!error) { 922 dev->power.is_suspended = true; 923 if (dev->power.wakeup_path && dev->parent) 924 dev->parent->power.wakeup_path = true; 925 } 926 927 device_unlock(dev); 928 complete_all(&dev->power.completion); 929 930 if (error) { 931 pm_runtime_put_sync(dev); 932 async_error = error; 933 } else if (dev->power.is_suspended) { 934 __pm_runtime_disable(dev, false); 935 } 936 937 return error; 938 } 939 940 static void async_suspend(void *data, async_cookie_t cookie) 941 { 942 struct device *dev = (struct device *)data; 943 int error; 944 945 error = __device_suspend(dev, pm_transition, true); 946 if (error) { 947 dpm_save_failed_dev(dev_name(dev)); 948 pm_dev_err(dev, pm_transition, " async", error); 949 } 950 951 put_device(dev); 952 } 953 954 static int device_suspend(struct device *dev) 955 { 956 INIT_COMPLETION(dev->power.completion); 957 958 if (pm_async_enabled && dev->power.async_suspend) { 959 get_device(dev); 960 async_schedule(async_suspend, dev); 961 return 0; 962 } 963 964 return __device_suspend(dev, pm_transition, false); 965 } 966 967 /** 968 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 969 * @state: PM transition of the system being carried out. 970 */ 971 int dpm_suspend(pm_message_t state) 972 { 973 ktime_t starttime = ktime_get(); 974 int error = 0; 975 976 might_sleep(); 977 978 mutex_lock(&dpm_list_mtx); 979 pm_transition = state; 980 async_error = 0; 981 while (!list_empty(&dpm_prepared_list)) { 982 struct device *dev = to_device(dpm_prepared_list.prev); 983 984 get_device(dev); 985 mutex_unlock(&dpm_list_mtx); 986 987 error = device_suspend(dev); 988 989 mutex_lock(&dpm_list_mtx); 990 if (error) { 991 pm_dev_err(dev, state, "", error); 992 dpm_save_failed_dev(dev_name(dev)); 993 put_device(dev); 994 break; 995 } 996 if (!list_empty(&dev->power.entry)) 997 list_move(&dev->power.entry, &dpm_suspended_list); 998 put_device(dev); 999 if (async_error) 1000 break; 1001 } 1002 mutex_unlock(&dpm_list_mtx); 1003 async_synchronize_full(); 1004 if (!error) 1005 error = async_error; 1006 if (error) { 1007 suspend_stats.failed_suspend++; 1008 dpm_save_failed_step(SUSPEND_SUSPEND); 1009 } else 1010 dpm_show_time(starttime, state, NULL); 1011 return error; 1012 } 1013 1014 /** 1015 * device_prepare - Prepare a device for system power transition. 1016 * @dev: Device to handle. 1017 * @state: PM transition of the system being carried out. 1018 * 1019 * Execute the ->prepare() callback(s) for given device. No new children of the 1020 * device may be registered after this function has returned. 1021 */ 1022 static int device_prepare(struct device *dev, pm_message_t state) 1023 { 1024 int error = 0; 1025 1026 device_lock(dev); 1027 1028 dev->power.wakeup_path = device_may_wakeup(dev); 1029 1030 if (dev->pm_domain) { 1031 pm_dev_dbg(dev, state, "preparing power domain "); 1032 if (dev->pm_domain->ops.prepare) 1033 error = dev->pm_domain->ops.prepare(dev); 1034 suspend_report_result(dev->pm_domain->ops.prepare, error); 1035 if (error) 1036 goto End; 1037 } else if (dev->type && dev->type->pm) { 1038 pm_dev_dbg(dev, state, "preparing type "); 1039 if (dev->type->pm->prepare) 1040 error = dev->type->pm->prepare(dev); 1041 suspend_report_result(dev->type->pm->prepare, error); 1042 if (error) 1043 goto End; 1044 } else if (dev->class && dev->class->pm) { 1045 pm_dev_dbg(dev, state, "preparing class "); 1046 if (dev->class->pm->prepare) 1047 error = dev->class->pm->prepare(dev); 1048 suspend_report_result(dev->class->pm->prepare, error); 1049 if (error) 1050 goto End; 1051 } else if (dev->bus && dev->bus->pm) { 1052 pm_dev_dbg(dev, state, "preparing "); 1053 if (dev->bus->pm->prepare) 1054 error = dev->bus->pm->prepare(dev); 1055 suspend_report_result(dev->bus->pm->prepare, error); 1056 } 1057 1058 End: 1059 device_unlock(dev); 1060 1061 return error; 1062 } 1063 1064 /** 1065 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1066 * @state: PM transition of the system being carried out. 1067 * 1068 * Execute the ->prepare() callback(s) for all devices. 1069 */ 1070 int dpm_prepare(pm_message_t state) 1071 { 1072 int error = 0; 1073 1074 might_sleep(); 1075 1076 mutex_lock(&dpm_list_mtx); 1077 while (!list_empty(&dpm_list)) { 1078 struct device *dev = to_device(dpm_list.next); 1079 1080 get_device(dev); 1081 mutex_unlock(&dpm_list_mtx); 1082 1083 error = device_prepare(dev, state); 1084 1085 mutex_lock(&dpm_list_mtx); 1086 if (error) { 1087 if (error == -EAGAIN) { 1088 put_device(dev); 1089 error = 0; 1090 continue; 1091 } 1092 printk(KERN_INFO "PM: Device %s not prepared " 1093 "for power transition: code %d\n", 1094 dev_name(dev), error); 1095 put_device(dev); 1096 break; 1097 } 1098 dev->power.is_prepared = true; 1099 if (!list_empty(&dev->power.entry)) 1100 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1101 put_device(dev); 1102 } 1103 mutex_unlock(&dpm_list_mtx); 1104 return error; 1105 } 1106 1107 /** 1108 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1109 * @state: PM transition of the system being carried out. 1110 * 1111 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1112 * callbacks for them. 1113 */ 1114 int dpm_suspend_start(pm_message_t state) 1115 { 1116 int error; 1117 1118 error = dpm_prepare(state); 1119 if (error) { 1120 suspend_stats.failed_prepare++; 1121 dpm_save_failed_step(SUSPEND_PREPARE); 1122 } else 1123 error = dpm_suspend(state); 1124 return error; 1125 } 1126 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1127 1128 void __suspend_report_result(const char *function, void *fn, int ret) 1129 { 1130 if (ret) 1131 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1132 } 1133 EXPORT_SYMBOL_GPL(__suspend_report_result); 1134 1135 /** 1136 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1137 * @dev: Device to wait for. 1138 * @subordinate: Device that needs to wait for @dev. 1139 */ 1140 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1141 { 1142 dpm_wait(dev, subordinate->power.async_suspend); 1143 return async_error; 1144 } 1145 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1146