1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 #include <linux/suspend.h> 30 31 #include "../base.h" 32 #include "power.h" 33 34 /* 35 * The entries in the dpm_list list are in a depth first order, simply 36 * because children are guaranteed to be discovered after parents, and 37 * are inserted at the back of the list on discovery. 38 * 39 * Since device_pm_add() may be called with a device lock held, 40 * we must never try to acquire a device lock while holding 41 * dpm_list_mutex. 42 */ 43 44 LIST_HEAD(dpm_list); 45 LIST_HEAD(dpm_prepared_list); 46 LIST_HEAD(dpm_suspended_list); 47 LIST_HEAD(dpm_noirq_list); 48 49 static DEFINE_MUTEX(dpm_list_mtx); 50 static pm_message_t pm_transition; 51 52 static int async_error; 53 54 /** 55 * device_pm_init - Initialize the PM-related part of a device object. 56 * @dev: Device object being initialized. 57 */ 58 void device_pm_init(struct device *dev) 59 { 60 dev->power.is_prepared = false; 61 dev->power.is_suspended = false; 62 init_completion(&dev->power.completion); 63 complete_all(&dev->power.completion); 64 dev->power.wakeup = NULL; 65 spin_lock_init(&dev->power.lock); 66 pm_runtime_init(dev); 67 INIT_LIST_HEAD(&dev->power.entry); 68 } 69 70 /** 71 * device_pm_lock - Lock the list of active devices used by the PM core. 72 */ 73 void device_pm_lock(void) 74 { 75 mutex_lock(&dpm_list_mtx); 76 } 77 78 /** 79 * device_pm_unlock - Unlock the list of active devices used by the PM core. 80 */ 81 void device_pm_unlock(void) 82 { 83 mutex_unlock(&dpm_list_mtx); 84 } 85 86 /** 87 * device_pm_add - Add a device to the PM core's list of active devices. 88 * @dev: Device to add to the list. 89 */ 90 void device_pm_add(struct device *dev) 91 { 92 pr_debug("PM: Adding info for %s:%s\n", 93 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 94 mutex_lock(&dpm_list_mtx); 95 if (dev->parent && dev->parent->power.is_prepared) 96 dev_warn(dev, "parent %s should not be sleeping\n", 97 dev_name(dev->parent)); 98 list_add_tail(&dev->power.entry, &dpm_list); 99 mutex_unlock(&dpm_list_mtx); 100 } 101 102 /** 103 * device_pm_remove - Remove a device from the PM core's list of active devices. 104 * @dev: Device to be removed from the list. 105 */ 106 void device_pm_remove(struct device *dev) 107 { 108 pr_debug("PM: Removing info for %s:%s\n", 109 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 110 complete_all(&dev->power.completion); 111 mutex_lock(&dpm_list_mtx); 112 list_del_init(&dev->power.entry); 113 mutex_unlock(&dpm_list_mtx); 114 device_wakeup_disable(dev); 115 pm_runtime_remove(dev); 116 } 117 118 /** 119 * device_pm_move_before - Move device in the PM core's list of active devices. 120 * @deva: Device to move in dpm_list. 121 * @devb: Device @deva should come before. 122 */ 123 void device_pm_move_before(struct device *deva, struct device *devb) 124 { 125 pr_debug("PM: Moving %s:%s before %s:%s\n", 126 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 127 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 128 /* Delete deva from dpm_list and reinsert before devb. */ 129 list_move_tail(&deva->power.entry, &devb->power.entry); 130 } 131 132 /** 133 * device_pm_move_after - Move device in the PM core's list of active devices. 134 * @deva: Device to move in dpm_list. 135 * @devb: Device @deva should come after. 136 */ 137 void device_pm_move_after(struct device *deva, struct device *devb) 138 { 139 pr_debug("PM: Moving %s:%s after %s:%s\n", 140 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 141 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 142 /* Delete deva from dpm_list and reinsert after devb. */ 143 list_move(&deva->power.entry, &devb->power.entry); 144 } 145 146 /** 147 * device_pm_move_last - Move device to end of the PM core's list of devices. 148 * @dev: Device to move in dpm_list. 149 */ 150 void device_pm_move_last(struct device *dev) 151 { 152 pr_debug("PM: Moving %s:%s to end of list\n", 153 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 154 list_move_tail(&dev->power.entry, &dpm_list); 155 } 156 157 static ktime_t initcall_debug_start(struct device *dev) 158 { 159 ktime_t calltime = ktime_set(0, 0); 160 161 if (initcall_debug) { 162 pr_info("calling %s+ @ %i\n", 163 dev_name(dev), task_pid_nr(current)); 164 calltime = ktime_get(); 165 } 166 167 return calltime; 168 } 169 170 static void initcall_debug_report(struct device *dev, ktime_t calltime, 171 int error) 172 { 173 ktime_t delta, rettime; 174 175 if (initcall_debug) { 176 rettime = ktime_get(); 177 delta = ktime_sub(rettime, calltime); 178 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 179 error, (unsigned long long)ktime_to_ns(delta) >> 10); 180 } 181 } 182 183 /** 184 * dpm_wait - Wait for a PM operation to complete. 185 * @dev: Device to wait for. 186 * @async: If unset, wait only if the device's power.async_suspend flag is set. 187 */ 188 static void dpm_wait(struct device *dev, bool async) 189 { 190 if (!dev) 191 return; 192 193 if (async || (pm_async_enabled && dev->power.async_suspend)) 194 wait_for_completion(&dev->power.completion); 195 } 196 197 static int dpm_wait_fn(struct device *dev, void *async_ptr) 198 { 199 dpm_wait(dev, *((bool *)async_ptr)); 200 return 0; 201 } 202 203 static void dpm_wait_for_children(struct device *dev, bool async) 204 { 205 device_for_each_child(dev, &async, dpm_wait_fn); 206 } 207 208 /** 209 * pm_op - Execute the PM operation appropriate for given PM event. 210 * @dev: Device to handle. 211 * @ops: PM operations to choose from. 212 * @state: PM transition of the system being carried out. 213 */ 214 static int pm_op(struct device *dev, 215 const struct dev_pm_ops *ops, 216 pm_message_t state) 217 { 218 int error = 0; 219 ktime_t calltime; 220 221 calltime = initcall_debug_start(dev); 222 223 switch (state.event) { 224 #ifdef CONFIG_SUSPEND 225 case PM_EVENT_SUSPEND: 226 if (ops->suspend) { 227 error = ops->suspend(dev); 228 suspend_report_result(ops->suspend, error); 229 } 230 break; 231 case PM_EVENT_RESUME: 232 if (ops->resume) { 233 error = ops->resume(dev); 234 suspend_report_result(ops->resume, error); 235 } 236 break; 237 #endif /* CONFIG_SUSPEND */ 238 #ifdef CONFIG_HIBERNATE_CALLBACKS 239 case PM_EVENT_FREEZE: 240 case PM_EVENT_QUIESCE: 241 if (ops->freeze) { 242 error = ops->freeze(dev); 243 suspend_report_result(ops->freeze, error); 244 } 245 break; 246 case PM_EVENT_HIBERNATE: 247 if (ops->poweroff) { 248 error = ops->poweroff(dev); 249 suspend_report_result(ops->poweroff, error); 250 } 251 break; 252 case PM_EVENT_THAW: 253 case PM_EVENT_RECOVER: 254 if (ops->thaw) { 255 error = ops->thaw(dev); 256 suspend_report_result(ops->thaw, error); 257 } 258 break; 259 case PM_EVENT_RESTORE: 260 if (ops->restore) { 261 error = ops->restore(dev); 262 suspend_report_result(ops->restore, error); 263 } 264 break; 265 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 266 default: 267 error = -EINVAL; 268 } 269 270 initcall_debug_report(dev, calltime, error); 271 272 return error; 273 } 274 275 /** 276 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 277 * @dev: Device to handle. 278 * @ops: PM operations to choose from. 279 * @state: PM transition of the system being carried out. 280 * 281 * The driver of @dev will not receive interrupts while this function is being 282 * executed. 283 */ 284 static int pm_noirq_op(struct device *dev, 285 const struct dev_pm_ops *ops, 286 pm_message_t state) 287 { 288 int error = 0; 289 ktime_t calltime = ktime_set(0, 0), delta, rettime; 290 291 if (initcall_debug) { 292 pr_info("calling %s+ @ %i, parent: %s\n", 293 dev_name(dev), task_pid_nr(current), 294 dev->parent ? dev_name(dev->parent) : "none"); 295 calltime = ktime_get(); 296 } 297 298 switch (state.event) { 299 #ifdef CONFIG_SUSPEND 300 case PM_EVENT_SUSPEND: 301 if (ops->suspend_noirq) { 302 error = ops->suspend_noirq(dev); 303 suspend_report_result(ops->suspend_noirq, error); 304 } 305 break; 306 case PM_EVENT_RESUME: 307 if (ops->resume_noirq) { 308 error = ops->resume_noirq(dev); 309 suspend_report_result(ops->resume_noirq, error); 310 } 311 break; 312 #endif /* CONFIG_SUSPEND */ 313 #ifdef CONFIG_HIBERNATE_CALLBACKS 314 case PM_EVENT_FREEZE: 315 case PM_EVENT_QUIESCE: 316 if (ops->freeze_noirq) { 317 error = ops->freeze_noirq(dev); 318 suspend_report_result(ops->freeze_noirq, error); 319 } 320 break; 321 case PM_EVENT_HIBERNATE: 322 if (ops->poweroff_noirq) { 323 error = ops->poweroff_noirq(dev); 324 suspend_report_result(ops->poweroff_noirq, error); 325 } 326 break; 327 case PM_EVENT_THAW: 328 case PM_EVENT_RECOVER: 329 if (ops->thaw_noirq) { 330 error = ops->thaw_noirq(dev); 331 suspend_report_result(ops->thaw_noirq, error); 332 } 333 break; 334 case PM_EVENT_RESTORE: 335 if (ops->restore_noirq) { 336 error = ops->restore_noirq(dev); 337 suspend_report_result(ops->restore_noirq, error); 338 } 339 break; 340 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 341 default: 342 error = -EINVAL; 343 } 344 345 if (initcall_debug) { 346 rettime = ktime_get(); 347 delta = ktime_sub(rettime, calltime); 348 printk("initcall %s_i+ returned %d after %Ld usecs\n", 349 dev_name(dev), error, 350 (unsigned long long)ktime_to_ns(delta) >> 10); 351 } 352 353 return error; 354 } 355 356 static char *pm_verb(int event) 357 { 358 switch (event) { 359 case PM_EVENT_SUSPEND: 360 return "suspend"; 361 case PM_EVENT_RESUME: 362 return "resume"; 363 case PM_EVENT_FREEZE: 364 return "freeze"; 365 case PM_EVENT_QUIESCE: 366 return "quiesce"; 367 case PM_EVENT_HIBERNATE: 368 return "hibernate"; 369 case PM_EVENT_THAW: 370 return "thaw"; 371 case PM_EVENT_RESTORE: 372 return "restore"; 373 case PM_EVENT_RECOVER: 374 return "recover"; 375 default: 376 return "(unknown PM event)"; 377 } 378 } 379 380 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 381 { 382 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 383 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 384 ", may wakeup" : ""); 385 } 386 387 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 388 int error) 389 { 390 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 391 dev_name(dev), pm_verb(state.event), info, error); 392 } 393 394 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 395 { 396 ktime_t calltime; 397 u64 usecs64; 398 int usecs; 399 400 calltime = ktime_get(); 401 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 402 do_div(usecs64, NSEC_PER_USEC); 403 usecs = usecs64; 404 if (usecs == 0) 405 usecs = 1; 406 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 407 info ?: "", info ? " " : "", pm_verb(state.event), 408 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 409 } 410 411 /*------------------------- Resume routines -------------------------*/ 412 413 /** 414 * device_resume_noirq - Execute an "early resume" callback for given device. 415 * @dev: Device to handle. 416 * @state: PM transition of the system being carried out. 417 * 418 * The driver of @dev will not receive interrupts while this function is being 419 * executed. 420 */ 421 static int device_resume_noirq(struct device *dev, pm_message_t state) 422 { 423 int error = 0; 424 425 TRACE_DEVICE(dev); 426 TRACE_RESUME(0); 427 428 if (dev->pm_domain) { 429 pm_dev_dbg(dev, state, "EARLY power domain "); 430 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 431 } else if (dev->type && dev->type->pm) { 432 pm_dev_dbg(dev, state, "EARLY type "); 433 error = pm_noirq_op(dev, dev->type->pm, state); 434 } else if (dev->class && dev->class->pm) { 435 pm_dev_dbg(dev, state, "EARLY class "); 436 error = pm_noirq_op(dev, dev->class->pm, state); 437 } else if (dev->bus && dev->bus->pm) { 438 pm_dev_dbg(dev, state, "EARLY "); 439 error = pm_noirq_op(dev, dev->bus->pm, state); 440 } 441 442 TRACE_RESUME(error); 443 return error; 444 } 445 446 /** 447 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 448 * @state: PM transition of the system being carried out. 449 * 450 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 451 * enable device drivers to receive interrupts. 452 */ 453 void dpm_resume_noirq(pm_message_t state) 454 { 455 ktime_t starttime = ktime_get(); 456 457 mutex_lock(&dpm_list_mtx); 458 while (!list_empty(&dpm_noirq_list)) { 459 struct device *dev = to_device(dpm_noirq_list.next); 460 int error; 461 462 get_device(dev); 463 list_move_tail(&dev->power.entry, &dpm_suspended_list); 464 mutex_unlock(&dpm_list_mtx); 465 466 error = device_resume_noirq(dev, state); 467 if (error) 468 pm_dev_err(dev, state, " early", error); 469 470 mutex_lock(&dpm_list_mtx); 471 put_device(dev); 472 } 473 mutex_unlock(&dpm_list_mtx); 474 dpm_show_time(starttime, state, "early"); 475 resume_device_irqs(); 476 } 477 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 478 479 /** 480 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 481 * @dev: Device to resume. 482 * @cb: Resume callback to execute. 483 */ 484 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 485 { 486 int error; 487 ktime_t calltime; 488 489 calltime = initcall_debug_start(dev); 490 491 error = cb(dev); 492 suspend_report_result(cb, error); 493 494 initcall_debug_report(dev, calltime, error); 495 496 return error; 497 } 498 499 /** 500 * device_resume - Execute "resume" callbacks for given device. 501 * @dev: Device to handle. 502 * @state: PM transition of the system being carried out. 503 * @async: If true, the device is being resumed asynchronously. 504 */ 505 static int device_resume(struct device *dev, pm_message_t state, bool async) 506 { 507 int error = 0; 508 bool put = false; 509 510 TRACE_DEVICE(dev); 511 TRACE_RESUME(0); 512 513 dpm_wait(dev->parent, async); 514 device_lock(dev); 515 516 /* 517 * This is a fib. But we'll allow new children to be added below 518 * a resumed device, even if the device hasn't been completed yet. 519 */ 520 dev->power.is_prepared = false; 521 522 if (!dev->power.is_suspended) 523 goto Unlock; 524 525 pm_runtime_enable(dev); 526 put = true; 527 528 if (dev->pm_domain) { 529 pm_dev_dbg(dev, state, "power domain "); 530 error = pm_op(dev, &dev->pm_domain->ops, state); 531 goto End; 532 } 533 534 if (dev->type && dev->type->pm) { 535 pm_dev_dbg(dev, state, "type "); 536 error = pm_op(dev, dev->type->pm, state); 537 goto End; 538 } 539 540 if (dev->class) { 541 if (dev->class->pm) { 542 pm_dev_dbg(dev, state, "class "); 543 error = pm_op(dev, dev->class->pm, state); 544 goto End; 545 } else if (dev->class->resume) { 546 pm_dev_dbg(dev, state, "legacy class "); 547 error = legacy_resume(dev, dev->class->resume); 548 goto End; 549 } 550 } 551 552 if (dev->bus) { 553 if (dev->bus->pm) { 554 pm_dev_dbg(dev, state, ""); 555 error = pm_op(dev, dev->bus->pm, state); 556 } else if (dev->bus->resume) { 557 pm_dev_dbg(dev, state, "legacy "); 558 error = legacy_resume(dev, dev->bus->resume); 559 } 560 } 561 562 End: 563 dev->power.is_suspended = false; 564 565 Unlock: 566 device_unlock(dev); 567 complete_all(&dev->power.completion); 568 569 TRACE_RESUME(error); 570 571 if (put) 572 pm_runtime_put_sync(dev); 573 574 return error; 575 } 576 577 static void async_resume(void *data, async_cookie_t cookie) 578 { 579 struct device *dev = (struct device *)data; 580 int error; 581 582 error = device_resume(dev, pm_transition, true); 583 if (error) 584 pm_dev_err(dev, pm_transition, " async", error); 585 put_device(dev); 586 } 587 588 static bool is_async(struct device *dev) 589 { 590 return dev->power.async_suspend && pm_async_enabled 591 && !pm_trace_is_enabled(); 592 } 593 594 /** 595 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 596 * @state: PM transition of the system being carried out. 597 * 598 * Execute the appropriate "resume" callback for all devices whose status 599 * indicates that they are suspended. 600 */ 601 void dpm_resume(pm_message_t state) 602 { 603 struct device *dev; 604 ktime_t starttime = ktime_get(); 605 606 might_sleep(); 607 608 mutex_lock(&dpm_list_mtx); 609 pm_transition = state; 610 async_error = 0; 611 612 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 613 INIT_COMPLETION(dev->power.completion); 614 if (is_async(dev)) { 615 get_device(dev); 616 async_schedule(async_resume, dev); 617 } 618 } 619 620 while (!list_empty(&dpm_suspended_list)) { 621 dev = to_device(dpm_suspended_list.next); 622 get_device(dev); 623 if (!is_async(dev)) { 624 int error; 625 626 mutex_unlock(&dpm_list_mtx); 627 628 error = device_resume(dev, state, false); 629 if (error) 630 pm_dev_err(dev, state, "", error); 631 632 mutex_lock(&dpm_list_mtx); 633 } 634 if (!list_empty(&dev->power.entry)) 635 list_move_tail(&dev->power.entry, &dpm_prepared_list); 636 put_device(dev); 637 } 638 mutex_unlock(&dpm_list_mtx); 639 async_synchronize_full(); 640 dpm_show_time(starttime, state, NULL); 641 } 642 643 /** 644 * device_complete - Complete a PM transition for given device. 645 * @dev: Device to handle. 646 * @state: PM transition of the system being carried out. 647 */ 648 static void device_complete(struct device *dev, pm_message_t state) 649 { 650 device_lock(dev); 651 652 if (dev->pm_domain) { 653 pm_dev_dbg(dev, state, "completing power domain "); 654 if (dev->pm_domain->ops.complete) 655 dev->pm_domain->ops.complete(dev); 656 } else if (dev->type && dev->type->pm) { 657 pm_dev_dbg(dev, state, "completing type "); 658 if (dev->type->pm->complete) 659 dev->type->pm->complete(dev); 660 } else if (dev->class && dev->class->pm) { 661 pm_dev_dbg(dev, state, "completing class "); 662 if (dev->class->pm->complete) 663 dev->class->pm->complete(dev); 664 } else if (dev->bus && dev->bus->pm) { 665 pm_dev_dbg(dev, state, "completing "); 666 if (dev->bus->pm->complete) 667 dev->bus->pm->complete(dev); 668 } 669 670 device_unlock(dev); 671 } 672 673 /** 674 * dpm_complete - Complete a PM transition for all non-sysdev devices. 675 * @state: PM transition of the system being carried out. 676 * 677 * Execute the ->complete() callbacks for all devices whose PM status is not 678 * DPM_ON (this allows new devices to be registered). 679 */ 680 void dpm_complete(pm_message_t state) 681 { 682 struct list_head list; 683 684 might_sleep(); 685 686 INIT_LIST_HEAD(&list); 687 mutex_lock(&dpm_list_mtx); 688 while (!list_empty(&dpm_prepared_list)) { 689 struct device *dev = to_device(dpm_prepared_list.prev); 690 691 get_device(dev); 692 dev->power.is_prepared = false; 693 list_move(&dev->power.entry, &list); 694 mutex_unlock(&dpm_list_mtx); 695 696 device_complete(dev, state); 697 698 mutex_lock(&dpm_list_mtx); 699 put_device(dev); 700 } 701 list_splice(&list, &dpm_list); 702 mutex_unlock(&dpm_list_mtx); 703 } 704 705 /** 706 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 707 * @state: PM transition of the system being carried out. 708 * 709 * Execute "resume" callbacks for all devices and complete the PM transition of 710 * the system. 711 */ 712 void dpm_resume_end(pm_message_t state) 713 { 714 dpm_resume(state); 715 dpm_complete(state); 716 } 717 EXPORT_SYMBOL_GPL(dpm_resume_end); 718 719 720 /*------------------------- Suspend routines -------------------------*/ 721 722 /** 723 * resume_event - Return a "resume" message for given "suspend" sleep state. 724 * @sleep_state: PM message representing a sleep state. 725 * 726 * Return a PM message representing the resume event corresponding to given 727 * sleep state. 728 */ 729 static pm_message_t resume_event(pm_message_t sleep_state) 730 { 731 switch (sleep_state.event) { 732 case PM_EVENT_SUSPEND: 733 return PMSG_RESUME; 734 case PM_EVENT_FREEZE: 735 case PM_EVENT_QUIESCE: 736 return PMSG_RECOVER; 737 case PM_EVENT_HIBERNATE: 738 return PMSG_RESTORE; 739 } 740 return PMSG_ON; 741 } 742 743 /** 744 * device_suspend_noirq - Execute a "late suspend" callback for given device. 745 * @dev: Device to handle. 746 * @state: PM transition of the system being carried out. 747 * 748 * The driver of @dev will not receive interrupts while this function is being 749 * executed. 750 */ 751 static int device_suspend_noirq(struct device *dev, pm_message_t state) 752 { 753 int error; 754 755 if (dev->pm_domain) { 756 pm_dev_dbg(dev, state, "LATE power domain "); 757 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 758 if (error) 759 return error; 760 } else if (dev->type && dev->type->pm) { 761 pm_dev_dbg(dev, state, "LATE type "); 762 error = pm_noirq_op(dev, dev->type->pm, state); 763 if (error) 764 return error; 765 } else if (dev->class && dev->class->pm) { 766 pm_dev_dbg(dev, state, "LATE class "); 767 error = pm_noirq_op(dev, dev->class->pm, state); 768 if (error) 769 return error; 770 } else if (dev->bus && dev->bus->pm) { 771 pm_dev_dbg(dev, state, "LATE "); 772 error = pm_noirq_op(dev, dev->bus->pm, state); 773 if (error) 774 return error; 775 } 776 777 return 0; 778 } 779 780 /** 781 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 782 * @state: PM transition of the system being carried out. 783 * 784 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 785 * handlers for all non-sysdev devices. 786 */ 787 int dpm_suspend_noirq(pm_message_t state) 788 { 789 ktime_t starttime = ktime_get(); 790 int error = 0; 791 792 suspend_device_irqs(); 793 mutex_lock(&dpm_list_mtx); 794 while (!list_empty(&dpm_suspended_list)) { 795 struct device *dev = to_device(dpm_suspended_list.prev); 796 797 get_device(dev); 798 mutex_unlock(&dpm_list_mtx); 799 800 error = device_suspend_noirq(dev, state); 801 802 mutex_lock(&dpm_list_mtx); 803 if (error) { 804 pm_dev_err(dev, state, " late", error); 805 put_device(dev); 806 break; 807 } 808 if (!list_empty(&dev->power.entry)) 809 list_move(&dev->power.entry, &dpm_noirq_list); 810 put_device(dev); 811 } 812 mutex_unlock(&dpm_list_mtx); 813 if (error) 814 dpm_resume_noirq(resume_event(state)); 815 else 816 dpm_show_time(starttime, state, "late"); 817 return error; 818 } 819 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 820 821 /** 822 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 823 * @dev: Device to suspend. 824 * @state: PM transition of the system being carried out. 825 * @cb: Suspend callback to execute. 826 */ 827 static int legacy_suspend(struct device *dev, pm_message_t state, 828 int (*cb)(struct device *dev, pm_message_t state)) 829 { 830 int error; 831 ktime_t calltime; 832 833 calltime = initcall_debug_start(dev); 834 835 error = cb(dev, state); 836 suspend_report_result(cb, error); 837 838 initcall_debug_report(dev, calltime, error); 839 840 return error; 841 } 842 843 /** 844 * device_suspend - Execute "suspend" callbacks for given device. 845 * @dev: Device to handle. 846 * @state: PM transition of the system being carried out. 847 * @async: If true, the device is being suspended asynchronously. 848 */ 849 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 850 { 851 int error = 0; 852 853 dpm_wait_for_children(dev, async); 854 855 if (async_error) 856 return 0; 857 858 pm_runtime_get_noresume(dev); 859 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 860 pm_wakeup_event(dev, 0); 861 862 if (pm_wakeup_pending()) { 863 pm_runtime_put_sync(dev); 864 async_error = -EBUSY; 865 return 0; 866 } 867 868 device_lock(dev); 869 870 if (dev->pm_domain) { 871 pm_dev_dbg(dev, state, "power domain "); 872 error = pm_op(dev, &dev->pm_domain->ops, state); 873 goto End; 874 } 875 876 if (dev->type && dev->type->pm) { 877 pm_dev_dbg(dev, state, "type "); 878 error = pm_op(dev, dev->type->pm, state); 879 goto End; 880 } 881 882 if (dev->class) { 883 if (dev->class->pm) { 884 pm_dev_dbg(dev, state, "class "); 885 error = pm_op(dev, dev->class->pm, state); 886 goto End; 887 } else if (dev->class->suspend) { 888 pm_dev_dbg(dev, state, "legacy class "); 889 error = legacy_suspend(dev, state, dev->class->suspend); 890 goto End; 891 } 892 } 893 894 if (dev->bus) { 895 if (dev->bus->pm) { 896 pm_dev_dbg(dev, state, ""); 897 error = pm_op(dev, dev->bus->pm, state); 898 } else if (dev->bus->suspend) { 899 pm_dev_dbg(dev, state, "legacy "); 900 error = legacy_suspend(dev, state, dev->bus->suspend); 901 } 902 } 903 904 End: 905 dev->power.is_suspended = !error; 906 907 device_unlock(dev); 908 complete_all(&dev->power.completion); 909 910 if (error) { 911 pm_runtime_put_sync(dev); 912 async_error = error; 913 } else if (dev->power.is_suspended) { 914 __pm_runtime_disable(dev, false); 915 } 916 917 return error; 918 } 919 920 static void async_suspend(void *data, async_cookie_t cookie) 921 { 922 struct device *dev = (struct device *)data; 923 int error; 924 925 error = __device_suspend(dev, pm_transition, true); 926 if (error) 927 pm_dev_err(dev, pm_transition, " async", error); 928 929 put_device(dev); 930 } 931 932 static int device_suspend(struct device *dev) 933 { 934 INIT_COMPLETION(dev->power.completion); 935 936 if (pm_async_enabled && dev->power.async_suspend) { 937 get_device(dev); 938 async_schedule(async_suspend, dev); 939 return 0; 940 } 941 942 return __device_suspend(dev, pm_transition, false); 943 } 944 945 /** 946 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 947 * @state: PM transition of the system being carried out. 948 */ 949 int dpm_suspend(pm_message_t state) 950 { 951 ktime_t starttime = ktime_get(); 952 int error = 0; 953 954 might_sleep(); 955 956 mutex_lock(&dpm_list_mtx); 957 pm_transition = state; 958 async_error = 0; 959 while (!list_empty(&dpm_prepared_list)) { 960 struct device *dev = to_device(dpm_prepared_list.prev); 961 962 get_device(dev); 963 mutex_unlock(&dpm_list_mtx); 964 965 error = device_suspend(dev); 966 967 mutex_lock(&dpm_list_mtx); 968 if (error) { 969 pm_dev_err(dev, state, "", error); 970 put_device(dev); 971 break; 972 } 973 if (!list_empty(&dev->power.entry)) 974 list_move(&dev->power.entry, &dpm_suspended_list); 975 put_device(dev); 976 if (async_error) 977 break; 978 } 979 mutex_unlock(&dpm_list_mtx); 980 async_synchronize_full(); 981 if (!error) 982 error = async_error; 983 if (!error) 984 dpm_show_time(starttime, state, NULL); 985 return error; 986 } 987 988 /** 989 * device_prepare - Prepare a device for system power transition. 990 * @dev: Device to handle. 991 * @state: PM transition of the system being carried out. 992 * 993 * Execute the ->prepare() callback(s) for given device. No new children of the 994 * device may be registered after this function has returned. 995 */ 996 static int device_prepare(struct device *dev, pm_message_t state) 997 { 998 int error = 0; 999 1000 device_lock(dev); 1001 1002 if (dev->pm_domain) { 1003 pm_dev_dbg(dev, state, "preparing power domain "); 1004 if (dev->pm_domain->ops.prepare) 1005 error = dev->pm_domain->ops.prepare(dev); 1006 suspend_report_result(dev->pm_domain->ops.prepare, error); 1007 if (error) 1008 goto End; 1009 } else if (dev->type && dev->type->pm) { 1010 pm_dev_dbg(dev, state, "preparing type "); 1011 if (dev->type->pm->prepare) 1012 error = dev->type->pm->prepare(dev); 1013 suspend_report_result(dev->type->pm->prepare, error); 1014 if (error) 1015 goto End; 1016 } else if (dev->class && dev->class->pm) { 1017 pm_dev_dbg(dev, state, "preparing class "); 1018 if (dev->class->pm->prepare) 1019 error = dev->class->pm->prepare(dev); 1020 suspend_report_result(dev->class->pm->prepare, error); 1021 if (error) 1022 goto End; 1023 } else if (dev->bus && dev->bus->pm) { 1024 pm_dev_dbg(dev, state, "preparing "); 1025 if (dev->bus->pm->prepare) 1026 error = dev->bus->pm->prepare(dev); 1027 suspend_report_result(dev->bus->pm->prepare, error); 1028 } 1029 1030 End: 1031 device_unlock(dev); 1032 1033 return error; 1034 } 1035 1036 /** 1037 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1038 * @state: PM transition of the system being carried out. 1039 * 1040 * Execute the ->prepare() callback(s) for all devices. 1041 */ 1042 int dpm_prepare(pm_message_t state) 1043 { 1044 int error = 0; 1045 1046 might_sleep(); 1047 1048 mutex_lock(&dpm_list_mtx); 1049 while (!list_empty(&dpm_list)) { 1050 struct device *dev = to_device(dpm_list.next); 1051 1052 get_device(dev); 1053 mutex_unlock(&dpm_list_mtx); 1054 1055 error = device_prepare(dev, state); 1056 1057 mutex_lock(&dpm_list_mtx); 1058 if (error) { 1059 if (error == -EAGAIN) { 1060 put_device(dev); 1061 error = 0; 1062 continue; 1063 } 1064 printk(KERN_INFO "PM: Device %s not prepared " 1065 "for power transition: code %d\n", 1066 dev_name(dev), error); 1067 put_device(dev); 1068 break; 1069 } 1070 dev->power.is_prepared = true; 1071 if (!list_empty(&dev->power.entry)) 1072 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1073 put_device(dev); 1074 } 1075 mutex_unlock(&dpm_list_mtx); 1076 return error; 1077 } 1078 1079 /** 1080 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1081 * @state: PM transition of the system being carried out. 1082 * 1083 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1084 * callbacks for them. 1085 */ 1086 int dpm_suspend_start(pm_message_t state) 1087 { 1088 int error; 1089 1090 error = dpm_prepare(state); 1091 if (!error) 1092 error = dpm_suspend(state); 1093 return error; 1094 } 1095 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1096 1097 void __suspend_report_result(const char *function, void *fn, int ret) 1098 { 1099 if (ret) 1100 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1101 } 1102 EXPORT_SYMBOL_GPL(__suspend_report_result); 1103 1104 /** 1105 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1106 * @dev: Device to wait for. 1107 * @subordinate: Device that needs to wait for @dev. 1108 */ 1109 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1110 { 1111 dpm_wait(dev, subordinate->power.async_suspend); 1112 return async_error; 1113 } 1114 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1115