1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/export.h> 23 #include <linux/mutex.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/resume-trace.h> 27 #include <linux/interrupt.h> 28 #include <linux/sched.h> 29 #include <linux/async.h> 30 #include <linux/suspend.h> 31 32 #include "../base.h" 33 #include "power.h" 34 35 typedef int (*pm_callback_t)(struct device *); 36 37 /* 38 * The entries in the dpm_list list are in a depth first order, simply 39 * because children are guaranteed to be discovered after parents, and 40 * are inserted at the back of the list on discovery. 41 * 42 * Since device_pm_add() may be called with a device lock held, 43 * we must never try to acquire a device lock while holding 44 * dpm_list_mutex. 45 */ 46 47 LIST_HEAD(dpm_list); 48 LIST_HEAD(dpm_prepared_list); 49 LIST_HEAD(dpm_suspended_list); 50 LIST_HEAD(dpm_late_early_list); 51 LIST_HEAD(dpm_noirq_list); 52 53 struct suspend_stats suspend_stats; 54 static DEFINE_MUTEX(dpm_list_mtx); 55 static pm_message_t pm_transition; 56 57 static int async_error; 58 59 /** 60 * device_pm_init - Initialize the PM-related part of a device object. 61 * @dev: Device object being initialized. 62 */ 63 void device_pm_init(struct device *dev) 64 { 65 dev->power.is_prepared = false; 66 dev->power.is_suspended = false; 67 init_completion(&dev->power.completion); 68 complete_all(&dev->power.completion); 69 dev->power.wakeup = NULL; 70 spin_lock_init(&dev->power.lock); 71 pm_runtime_init(dev); 72 INIT_LIST_HEAD(&dev->power.entry); 73 dev->power.power_state = PMSG_INVALID; 74 } 75 76 /** 77 * device_pm_lock - Lock the list of active devices used by the PM core. 78 */ 79 void device_pm_lock(void) 80 { 81 mutex_lock(&dpm_list_mtx); 82 } 83 84 /** 85 * device_pm_unlock - Unlock the list of active devices used by the PM core. 86 */ 87 void device_pm_unlock(void) 88 { 89 mutex_unlock(&dpm_list_mtx); 90 } 91 92 /** 93 * device_pm_add - Add a device to the PM core's list of active devices. 94 * @dev: Device to add to the list. 95 */ 96 void device_pm_add(struct device *dev) 97 { 98 pr_debug("PM: Adding info for %s:%s\n", 99 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 100 mutex_lock(&dpm_list_mtx); 101 if (dev->parent && dev->parent->power.is_prepared) 102 dev_warn(dev, "parent %s should not be sleeping\n", 103 dev_name(dev->parent)); 104 list_add_tail(&dev->power.entry, &dpm_list); 105 dev_pm_qos_constraints_init(dev); 106 mutex_unlock(&dpm_list_mtx); 107 } 108 109 /** 110 * device_pm_remove - Remove a device from the PM core's list of active devices. 111 * @dev: Device to be removed from the list. 112 */ 113 void device_pm_remove(struct device *dev) 114 { 115 pr_debug("PM: Removing info for %s:%s\n", 116 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 117 complete_all(&dev->power.completion); 118 mutex_lock(&dpm_list_mtx); 119 dev_pm_qos_constraints_destroy(dev); 120 list_del_init(&dev->power.entry); 121 mutex_unlock(&dpm_list_mtx); 122 device_wakeup_disable(dev); 123 pm_runtime_remove(dev); 124 } 125 126 /** 127 * device_pm_move_before - Move device in the PM core's list of active devices. 128 * @deva: Device to move in dpm_list. 129 * @devb: Device @deva should come before. 130 */ 131 void device_pm_move_before(struct device *deva, struct device *devb) 132 { 133 pr_debug("PM: Moving %s:%s before %s:%s\n", 134 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 135 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 136 /* Delete deva from dpm_list and reinsert before devb. */ 137 list_move_tail(&deva->power.entry, &devb->power.entry); 138 } 139 140 /** 141 * device_pm_move_after - Move device in the PM core's list of active devices. 142 * @deva: Device to move in dpm_list. 143 * @devb: Device @deva should come after. 144 */ 145 void device_pm_move_after(struct device *deva, struct device *devb) 146 { 147 pr_debug("PM: Moving %s:%s after %s:%s\n", 148 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 149 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 150 /* Delete deva from dpm_list and reinsert after devb. */ 151 list_move(&deva->power.entry, &devb->power.entry); 152 } 153 154 /** 155 * device_pm_move_last - Move device to end of the PM core's list of devices. 156 * @dev: Device to move in dpm_list. 157 */ 158 void device_pm_move_last(struct device *dev) 159 { 160 pr_debug("PM: Moving %s:%s to end of list\n", 161 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 162 list_move_tail(&dev->power.entry, &dpm_list); 163 } 164 165 static ktime_t initcall_debug_start(struct device *dev) 166 { 167 ktime_t calltime = ktime_set(0, 0); 168 169 if (initcall_debug) { 170 pr_info("calling %s+ @ %i, parent: %s\n", 171 dev_name(dev), task_pid_nr(current), 172 dev->parent ? dev_name(dev->parent) : "none"); 173 calltime = ktime_get(); 174 } 175 176 return calltime; 177 } 178 179 static void initcall_debug_report(struct device *dev, ktime_t calltime, 180 int error) 181 { 182 ktime_t delta, rettime; 183 184 if (initcall_debug) { 185 rettime = ktime_get(); 186 delta = ktime_sub(rettime, calltime); 187 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 188 error, (unsigned long long)ktime_to_ns(delta) >> 10); 189 } 190 } 191 192 /** 193 * dpm_wait - Wait for a PM operation to complete. 194 * @dev: Device to wait for. 195 * @async: If unset, wait only if the device's power.async_suspend flag is set. 196 */ 197 static void dpm_wait(struct device *dev, bool async) 198 { 199 if (!dev) 200 return; 201 202 if (async || (pm_async_enabled && dev->power.async_suspend)) 203 wait_for_completion(&dev->power.completion); 204 } 205 206 static int dpm_wait_fn(struct device *dev, void *async_ptr) 207 { 208 dpm_wait(dev, *((bool *)async_ptr)); 209 return 0; 210 } 211 212 static void dpm_wait_for_children(struct device *dev, bool async) 213 { 214 device_for_each_child(dev, &async, dpm_wait_fn); 215 } 216 217 /** 218 * pm_op - Return the PM operation appropriate for given PM event. 219 * @ops: PM operations to choose from. 220 * @state: PM transition of the system being carried out. 221 */ 222 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) 223 { 224 switch (state.event) { 225 #ifdef CONFIG_SUSPEND 226 case PM_EVENT_SUSPEND: 227 return ops->suspend; 228 case PM_EVENT_RESUME: 229 return ops->resume; 230 #endif /* CONFIG_SUSPEND */ 231 #ifdef CONFIG_HIBERNATE_CALLBACKS 232 case PM_EVENT_FREEZE: 233 case PM_EVENT_QUIESCE: 234 return ops->freeze; 235 case PM_EVENT_HIBERNATE: 236 return ops->poweroff; 237 case PM_EVENT_THAW: 238 case PM_EVENT_RECOVER: 239 return ops->thaw; 240 break; 241 case PM_EVENT_RESTORE: 242 return ops->restore; 243 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 244 } 245 246 return NULL; 247 } 248 249 /** 250 * pm_late_early_op - Return the PM operation appropriate for given PM event. 251 * @ops: PM operations to choose from. 252 * @state: PM transition of the system being carried out. 253 * 254 * Runtime PM is disabled for @dev while this function is being executed. 255 */ 256 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, 257 pm_message_t state) 258 { 259 switch (state.event) { 260 #ifdef CONFIG_SUSPEND 261 case PM_EVENT_SUSPEND: 262 return ops->suspend_late; 263 case PM_EVENT_RESUME: 264 return ops->resume_early; 265 #endif /* CONFIG_SUSPEND */ 266 #ifdef CONFIG_HIBERNATE_CALLBACKS 267 case PM_EVENT_FREEZE: 268 case PM_EVENT_QUIESCE: 269 return ops->freeze_late; 270 case PM_EVENT_HIBERNATE: 271 return ops->poweroff_late; 272 case PM_EVENT_THAW: 273 case PM_EVENT_RECOVER: 274 return ops->thaw_early; 275 case PM_EVENT_RESTORE: 276 return ops->restore_early; 277 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 278 } 279 280 return NULL; 281 } 282 283 /** 284 * pm_noirq_op - Return the PM operation appropriate for given PM event. 285 * @ops: PM operations to choose from. 286 * @state: PM transition of the system being carried out. 287 * 288 * The driver of @dev will not receive interrupts while this function is being 289 * executed. 290 */ 291 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) 292 { 293 switch (state.event) { 294 #ifdef CONFIG_SUSPEND 295 case PM_EVENT_SUSPEND: 296 return ops->suspend_noirq; 297 case PM_EVENT_RESUME: 298 return ops->resume_noirq; 299 #endif /* CONFIG_SUSPEND */ 300 #ifdef CONFIG_HIBERNATE_CALLBACKS 301 case PM_EVENT_FREEZE: 302 case PM_EVENT_QUIESCE: 303 return ops->freeze_noirq; 304 case PM_EVENT_HIBERNATE: 305 return ops->poweroff_noirq; 306 case PM_EVENT_THAW: 307 case PM_EVENT_RECOVER: 308 return ops->thaw_noirq; 309 case PM_EVENT_RESTORE: 310 return ops->restore_noirq; 311 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 312 } 313 314 return NULL; 315 } 316 317 static char *pm_verb(int event) 318 { 319 switch (event) { 320 case PM_EVENT_SUSPEND: 321 return "suspend"; 322 case PM_EVENT_RESUME: 323 return "resume"; 324 case PM_EVENT_FREEZE: 325 return "freeze"; 326 case PM_EVENT_QUIESCE: 327 return "quiesce"; 328 case PM_EVENT_HIBERNATE: 329 return "hibernate"; 330 case PM_EVENT_THAW: 331 return "thaw"; 332 case PM_EVENT_RESTORE: 333 return "restore"; 334 case PM_EVENT_RECOVER: 335 return "recover"; 336 default: 337 return "(unknown PM event)"; 338 } 339 } 340 341 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 342 { 343 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 344 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 345 ", may wakeup" : ""); 346 } 347 348 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 349 int error) 350 { 351 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 352 dev_name(dev), pm_verb(state.event), info, error); 353 } 354 355 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 356 { 357 ktime_t calltime; 358 u64 usecs64; 359 int usecs; 360 361 calltime = ktime_get(); 362 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 363 do_div(usecs64, NSEC_PER_USEC); 364 usecs = usecs64; 365 if (usecs == 0) 366 usecs = 1; 367 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 368 info ?: "", info ? " " : "", pm_verb(state.event), 369 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 370 } 371 372 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 373 pm_message_t state, char *info) 374 { 375 ktime_t calltime; 376 int error; 377 378 if (!cb) 379 return 0; 380 381 calltime = initcall_debug_start(dev); 382 383 pm_dev_dbg(dev, state, info); 384 error = cb(dev); 385 suspend_report_result(cb, error); 386 387 initcall_debug_report(dev, calltime, error); 388 389 return error; 390 } 391 392 /*------------------------- Resume routines -------------------------*/ 393 394 /** 395 * device_resume_noirq - Execute an "early resume" callback for given device. 396 * @dev: Device to handle. 397 * @state: PM transition of the system being carried out. 398 * 399 * The driver of @dev will not receive interrupts while this function is being 400 * executed. 401 */ 402 static int device_resume_noirq(struct device *dev, pm_message_t state) 403 { 404 pm_callback_t callback = NULL; 405 char *info = NULL; 406 int error = 0; 407 408 TRACE_DEVICE(dev); 409 TRACE_RESUME(0); 410 411 if (dev->pm_domain) { 412 info = "noirq power domain "; 413 callback = pm_noirq_op(&dev->pm_domain->ops, state); 414 } else if (dev->type && dev->type->pm) { 415 info = "noirq type "; 416 callback = pm_noirq_op(dev->type->pm, state); 417 } else if (dev->class && dev->class->pm) { 418 info = "noirq class "; 419 callback = pm_noirq_op(dev->class->pm, state); 420 } else if (dev->bus && dev->bus->pm) { 421 info = "noirq bus "; 422 callback = pm_noirq_op(dev->bus->pm, state); 423 } 424 425 if (!callback && dev->driver && dev->driver->pm) { 426 info = "noirq driver "; 427 callback = pm_noirq_op(dev->driver->pm, state); 428 } 429 430 error = dpm_run_callback(callback, dev, state, info); 431 432 TRACE_RESUME(error); 433 return error; 434 } 435 436 /** 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 438 * @state: PM transition of the system being carried out. 439 * 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 441 * enable device drivers to receive interrupts. 442 */ 443 static void dpm_resume_noirq(pm_message_t state) 444 { 445 ktime_t starttime = ktime_get(); 446 447 mutex_lock(&dpm_list_mtx); 448 while (!list_empty(&dpm_noirq_list)) { 449 struct device *dev = to_device(dpm_noirq_list.next); 450 int error; 451 452 get_device(dev); 453 list_move_tail(&dev->power.entry, &dpm_late_early_list); 454 mutex_unlock(&dpm_list_mtx); 455 456 error = device_resume_noirq(dev, state); 457 if (error) { 458 suspend_stats.failed_resume_noirq++; 459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 460 dpm_save_failed_dev(dev_name(dev)); 461 pm_dev_err(dev, state, " noirq", error); 462 } 463 464 mutex_lock(&dpm_list_mtx); 465 put_device(dev); 466 } 467 mutex_unlock(&dpm_list_mtx); 468 dpm_show_time(starttime, state, "noirq"); 469 resume_device_irqs(); 470 } 471 472 /** 473 * device_resume_early - Execute an "early resume" callback for given device. 474 * @dev: Device to handle. 475 * @state: PM transition of the system being carried out. 476 * 477 * Runtime PM is disabled for @dev while this function is being executed. 478 */ 479 static int device_resume_early(struct device *dev, pm_message_t state) 480 { 481 pm_callback_t callback = NULL; 482 char *info = NULL; 483 int error = 0; 484 485 TRACE_DEVICE(dev); 486 TRACE_RESUME(0); 487 488 if (dev->pm_domain) { 489 info = "early power domain "; 490 callback = pm_late_early_op(&dev->pm_domain->ops, state); 491 } else if (dev->type && dev->type->pm) { 492 info = "early type "; 493 callback = pm_late_early_op(dev->type->pm, state); 494 } else if (dev->class && dev->class->pm) { 495 info = "early class "; 496 callback = pm_late_early_op(dev->class->pm, state); 497 } else if (dev->bus && dev->bus->pm) { 498 info = "early bus "; 499 callback = pm_late_early_op(dev->bus->pm, state); 500 } 501 502 if (!callback && dev->driver && dev->driver->pm) { 503 info = "early driver "; 504 callback = pm_late_early_op(dev->driver->pm, state); 505 } 506 507 error = dpm_run_callback(callback, dev, state, info); 508 509 TRACE_RESUME(error); 510 return error; 511 } 512 513 /** 514 * dpm_resume_early - Execute "early resume" callbacks for all devices. 515 * @state: PM transition of the system being carried out. 516 */ 517 static void dpm_resume_early(pm_message_t state) 518 { 519 ktime_t starttime = ktime_get(); 520 521 mutex_lock(&dpm_list_mtx); 522 while (!list_empty(&dpm_late_early_list)) { 523 struct device *dev = to_device(dpm_late_early_list.next); 524 int error; 525 526 get_device(dev); 527 list_move_tail(&dev->power.entry, &dpm_suspended_list); 528 mutex_unlock(&dpm_list_mtx); 529 530 error = device_resume_early(dev, state); 531 if (error) { 532 suspend_stats.failed_resume_early++; 533 dpm_save_failed_step(SUSPEND_RESUME_EARLY); 534 dpm_save_failed_dev(dev_name(dev)); 535 pm_dev_err(dev, state, " early", error); 536 } 537 538 mutex_lock(&dpm_list_mtx); 539 put_device(dev); 540 } 541 mutex_unlock(&dpm_list_mtx); 542 dpm_show_time(starttime, state, "early"); 543 } 544 545 /** 546 * dpm_resume_start - Execute "noirq" and "early" device callbacks. 547 * @state: PM transition of the system being carried out. 548 */ 549 void dpm_resume_start(pm_message_t state) 550 { 551 dpm_resume_noirq(state); 552 dpm_resume_early(state); 553 } 554 EXPORT_SYMBOL_GPL(dpm_resume_start); 555 556 /** 557 * device_resume - Execute "resume" callbacks for given device. 558 * @dev: Device to handle. 559 * @state: PM transition of the system being carried out. 560 * @async: If true, the device is being resumed asynchronously. 561 */ 562 static int device_resume(struct device *dev, pm_message_t state, bool async) 563 { 564 pm_callback_t callback = NULL; 565 char *info = NULL; 566 int error = 0; 567 bool put = false; 568 569 TRACE_DEVICE(dev); 570 TRACE_RESUME(0); 571 572 dpm_wait(dev->parent, async); 573 device_lock(dev); 574 575 /* 576 * This is a fib. But we'll allow new children to be added below 577 * a resumed device, even if the device hasn't been completed yet. 578 */ 579 dev->power.is_prepared = false; 580 581 if (!dev->power.is_suspended) 582 goto Unlock; 583 584 pm_runtime_enable(dev); 585 put = true; 586 587 if (dev->pm_domain) { 588 info = "power domain "; 589 callback = pm_op(&dev->pm_domain->ops, state); 590 goto Driver; 591 } 592 593 if (dev->type && dev->type->pm) { 594 info = "type "; 595 callback = pm_op(dev->type->pm, state); 596 goto Driver; 597 } 598 599 if (dev->class) { 600 if (dev->class->pm) { 601 info = "class "; 602 callback = pm_op(dev->class->pm, state); 603 goto Driver; 604 } else if (dev->class->resume) { 605 info = "legacy class "; 606 callback = dev->class->resume; 607 goto End; 608 } 609 } 610 611 if (dev->bus) { 612 if (dev->bus->pm) { 613 info = "bus "; 614 callback = pm_op(dev->bus->pm, state); 615 } else if (dev->bus->resume) { 616 info = "legacy bus "; 617 callback = dev->bus->resume; 618 goto End; 619 } 620 } 621 622 Driver: 623 if (!callback && dev->driver && dev->driver->pm) { 624 info = "driver "; 625 callback = pm_op(dev->driver->pm, state); 626 } 627 628 End: 629 error = dpm_run_callback(callback, dev, state, info); 630 dev->power.is_suspended = false; 631 632 Unlock: 633 device_unlock(dev); 634 complete_all(&dev->power.completion); 635 636 TRACE_RESUME(error); 637 638 if (put) 639 pm_runtime_put_sync(dev); 640 641 return error; 642 } 643 644 static void async_resume(void *data, async_cookie_t cookie) 645 { 646 struct device *dev = (struct device *)data; 647 int error; 648 649 error = device_resume(dev, pm_transition, true); 650 if (error) 651 pm_dev_err(dev, pm_transition, " async", error); 652 put_device(dev); 653 } 654 655 static bool is_async(struct device *dev) 656 { 657 return dev->power.async_suspend && pm_async_enabled 658 && !pm_trace_is_enabled(); 659 } 660 661 /** 662 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 663 * @state: PM transition of the system being carried out. 664 * 665 * Execute the appropriate "resume" callback for all devices whose status 666 * indicates that they are suspended. 667 */ 668 void dpm_resume(pm_message_t state) 669 { 670 struct device *dev; 671 ktime_t starttime = ktime_get(); 672 673 might_sleep(); 674 675 mutex_lock(&dpm_list_mtx); 676 pm_transition = state; 677 async_error = 0; 678 679 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 680 INIT_COMPLETION(dev->power.completion); 681 if (is_async(dev)) { 682 get_device(dev); 683 async_schedule(async_resume, dev); 684 } 685 } 686 687 while (!list_empty(&dpm_suspended_list)) { 688 dev = to_device(dpm_suspended_list.next); 689 get_device(dev); 690 if (!is_async(dev)) { 691 int error; 692 693 mutex_unlock(&dpm_list_mtx); 694 695 error = device_resume(dev, state, false); 696 if (error) { 697 suspend_stats.failed_resume++; 698 dpm_save_failed_step(SUSPEND_RESUME); 699 dpm_save_failed_dev(dev_name(dev)); 700 pm_dev_err(dev, state, "", error); 701 } 702 703 mutex_lock(&dpm_list_mtx); 704 } 705 if (!list_empty(&dev->power.entry)) 706 list_move_tail(&dev->power.entry, &dpm_prepared_list); 707 put_device(dev); 708 } 709 mutex_unlock(&dpm_list_mtx); 710 async_synchronize_full(); 711 dpm_show_time(starttime, state, NULL); 712 } 713 714 /** 715 * device_complete - Complete a PM transition for given device. 716 * @dev: Device to handle. 717 * @state: PM transition of the system being carried out. 718 */ 719 static void device_complete(struct device *dev, pm_message_t state) 720 { 721 void (*callback)(struct device *) = NULL; 722 char *info = NULL; 723 724 device_lock(dev); 725 726 if (dev->pm_domain) { 727 info = "completing power domain "; 728 callback = dev->pm_domain->ops.complete; 729 } else if (dev->type && dev->type->pm) { 730 info = "completing type "; 731 callback = dev->type->pm->complete; 732 } else if (dev->class && dev->class->pm) { 733 info = "completing class "; 734 callback = dev->class->pm->complete; 735 } else if (dev->bus && dev->bus->pm) { 736 info = "completing bus "; 737 callback = dev->bus->pm->complete; 738 } 739 740 if (!callback && dev->driver && dev->driver->pm) { 741 info = "completing driver "; 742 callback = dev->driver->pm->complete; 743 } 744 745 if (callback) { 746 pm_dev_dbg(dev, state, info); 747 callback(dev); 748 } 749 750 device_unlock(dev); 751 } 752 753 /** 754 * dpm_complete - Complete a PM transition for all non-sysdev devices. 755 * @state: PM transition of the system being carried out. 756 * 757 * Execute the ->complete() callbacks for all devices whose PM status is not 758 * DPM_ON (this allows new devices to be registered). 759 */ 760 void dpm_complete(pm_message_t state) 761 { 762 struct list_head list; 763 764 might_sleep(); 765 766 INIT_LIST_HEAD(&list); 767 mutex_lock(&dpm_list_mtx); 768 while (!list_empty(&dpm_prepared_list)) { 769 struct device *dev = to_device(dpm_prepared_list.prev); 770 771 get_device(dev); 772 dev->power.is_prepared = false; 773 list_move(&dev->power.entry, &list); 774 mutex_unlock(&dpm_list_mtx); 775 776 device_complete(dev, state); 777 778 mutex_lock(&dpm_list_mtx); 779 put_device(dev); 780 } 781 list_splice(&list, &dpm_list); 782 mutex_unlock(&dpm_list_mtx); 783 } 784 785 /** 786 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 787 * @state: PM transition of the system being carried out. 788 * 789 * Execute "resume" callbacks for all devices and complete the PM transition of 790 * the system. 791 */ 792 void dpm_resume_end(pm_message_t state) 793 { 794 dpm_resume(state); 795 dpm_complete(state); 796 } 797 EXPORT_SYMBOL_GPL(dpm_resume_end); 798 799 800 /*------------------------- Suspend routines -------------------------*/ 801 802 /** 803 * resume_event - Return a "resume" message for given "suspend" sleep state. 804 * @sleep_state: PM message representing a sleep state. 805 * 806 * Return a PM message representing the resume event corresponding to given 807 * sleep state. 808 */ 809 static pm_message_t resume_event(pm_message_t sleep_state) 810 { 811 switch (sleep_state.event) { 812 case PM_EVENT_SUSPEND: 813 return PMSG_RESUME; 814 case PM_EVENT_FREEZE: 815 case PM_EVENT_QUIESCE: 816 return PMSG_RECOVER; 817 case PM_EVENT_HIBERNATE: 818 return PMSG_RESTORE; 819 } 820 return PMSG_ON; 821 } 822 823 /** 824 * device_suspend_noirq - Execute a "late suspend" callback for given device. 825 * @dev: Device to handle. 826 * @state: PM transition of the system being carried out. 827 * 828 * The driver of @dev will not receive interrupts while this function is being 829 * executed. 830 */ 831 static int device_suspend_noirq(struct device *dev, pm_message_t state) 832 { 833 pm_callback_t callback = NULL; 834 char *info = NULL; 835 836 if (dev->pm_domain) { 837 info = "noirq power domain "; 838 callback = pm_noirq_op(&dev->pm_domain->ops, state); 839 } else if (dev->type && dev->type->pm) { 840 info = "noirq type "; 841 callback = pm_noirq_op(dev->type->pm, state); 842 } else if (dev->class && dev->class->pm) { 843 info = "noirq class "; 844 callback = pm_noirq_op(dev->class->pm, state); 845 } else if (dev->bus && dev->bus->pm) { 846 info = "noirq bus "; 847 callback = pm_noirq_op(dev->bus->pm, state); 848 } 849 850 if (!callback && dev->driver && dev->driver->pm) { 851 info = "noirq driver "; 852 callback = pm_noirq_op(dev->driver->pm, state); 853 } 854 855 return dpm_run_callback(callback, dev, state, info); 856 } 857 858 /** 859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 860 * @state: PM transition of the system being carried out. 861 * 862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 863 * handlers for all non-sysdev devices. 864 */ 865 static int dpm_suspend_noirq(pm_message_t state) 866 { 867 ktime_t starttime = ktime_get(); 868 int error = 0; 869 870 suspend_device_irqs(); 871 mutex_lock(&dpm_list_mtx); 872 while (!list_empty(&dpm_late_early_list)) { 873 struct device *dev = to_device(dpm_late_early_list.prev); 874 875 get_device(dev); 876 mutex_unlock(&dpm_list_mtx); 877 878 error = device_suspend_noirq(dev, state); 879 880 mutex_lock(&dpm_list_mtx); 881 if (error) { 882 pm_dev_err(dev, state, " noirq", error); 883 suspend_stats.failed_suspend_noirq++; 884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 885 dpm_save_failed_dev(dev_name(dev)); 886 put_device(dev); 887 break; 888 } 889 if (!list_empty(&dev->power.entry)) 890 list_move(&dev->power.entry, &dpm_noirq_list); 891 put_device(dev); 892 } 893 mutex_unlock(&dpm_list_mtx); 894 if (error) 895 dpm_resume_noirq(resume_event(state)); 896 else 897 dpm_show_time(starttime, state, "noirq"); 898 return error; 899 } 900 901 /** 902 * device_suspend_late - Execute a "late suspend" callback for given device. 903 * @dev: Device to handle. 904 * @state: PM transition of the system being carried out. 905 * 906 * Runtime PM is disabled for @dev while this function is being executed. 907 */ 908 static int device_suspend_late(struct device *dev, pm_message_t state) 909 { 910 pm_callback_t callback = NULL; 911 char *info = NULL; 912 913 if (dev->pm_domain) { 914 info = "late power domain "; 915 callback = pm_late_early_op(&dev->pm_domain->ops, state); 916 } else if (dev->type && dev->type->pm) { 917 info = "late type "; 918 callback = pm_late_early_op(dev->type->pm, state); 919 } else if (dev->class && dev->class->pm) { 920 info = "late class "; 921 callback = pm_late_early_op(dev->class->pm, state); 922 } else if (dev->bus && dev->bus->pm) { 923 info = "late bus "; 924 callback = pm_late_early_op(dev->bus->pm, state); 925 } 926 927 if (!callback && dev->driver && dev->driver->pm) { 928 info = "late driver "; 929 callback = pm_late_early_op(dev->driver->pm, state); 930 } 931 932 return dpm_run_callback(callback, dev, state, info); 933 } 934 935 /** 936 * dpm_suspend_late - Execute "late suspend" callbacks for all devices. 937 * @state: PM transition of the system being carried out. 938 */ 939 static int dpm_suspend_late(pm_message_t state) 940 { 941 ktime_t starttime = ktime_get(); 942 int error = 0; 943 944 mutex_lock(&dpm_list_mtx); 945 while (!list_empty(&dpm_suspended_list)) { 946 struct device *dev = to_device(dpm_suspended_list.prev); 947 948 get_device(dev); 949 mutex_unlock(&dpm_list_mtx); 950 951 error = device_suspend_late(dev, state); 952 953 mutex_lock(&dpm_list_mtx); 954 if (error) { 955 pm_dev_err(dev, state, " late", error); 956 suspend_stats.failed_suspend_late++; 957 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 958 dpm_save_failed_dev(dev_name(dev)); 959 put_device(dev); 960 break; 961 } 962 if (!list_empty(&dev->power.entry)) 963 list_move(&dev->power.entry, &dpm_late_early_list); 964 put_device(dev); 965 } 966 mutex_unlock(&dpm_list_mtx); 967 if (error) 968 dpm_resume_early(resume_event(state)); 969 else 970 dpm_show_time(starttime, state, "late"); 971 972 return error; 973 } 974 975 /** 976 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. 977 * @state: PM transition of the system being carried out. 978 */ 979 int dpm_suspend_end(pm_message_t state) 980 { 981 int error = dpm_suspend_late(state); 982 983 return error ? : dpm_suspend_noirq(state); 984 } 985 EXPORT_SYMBOL_GPL(dpm_suspend_end); 986 987 /** 988 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 989 * @dev: Device to suspend. 990 * @state: PM transition of the system being carried out. 991 * @cb: Suspend callback to execute. 992 */ 993 static int legacy_suspend(struct device *dev, pm_message_t state, 994 int (*cb)(struct device *dev, pm_message_t state)) 995 { 996 int error; 997 ktime_t calltime; 998 999 calltime = initcall_debug_start(dev); 1000 1001 error = cb(dev, state); 1002 suspend_report_result(cb, error); 1003 1004 initcall_debug_report(dev, calltime, error); 1005 1006 return error; 1007 } 1008 1009 /** 1010 * device_suspend - Execute "suspend" callbacks for given device. 1011 * @dev: Device to handle. 1012 * @state: PM transition of the system being carried out. 1013 * @async: If true, the device is being suspended asynchronously. 1014 */ 1015 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 1016 { 1017 pm_callback_t callback = NULL; 1018 char *info = NULL; 1019 int error = 0; 1020 1021 dpm_wait_for_children(dev, async); 1022 1023 if (async_error) 1024 return 0; 1025 1026 pm_runtime_get_noresume(dev); 1027 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 1028 pm_wakeup_event(dev, 0); 1029 1030 if (pm_wakeup_pending()) { 1031 pm_runtime_put_sync(dev); 1032 async_error = -EBUSY; 1033 return 0; 1034 } 1035 1036 device_lock(dev); 1037 1038 if (dev->pm_domain) { 1039 info = "power domain "; 1040 callback = pm_op(&dev->pm_domain->ops, state); 1041 goto Run; 1042 } 1043 1044 if (dev->type && dev->type->pm) { 1045 info = "type "; 1046 callback = pm_op(dev->type->pm, state); 1047 goto Run; 1048 } 1049 1050 if (dev->class) { 1051 if (dev->class->pm) { 1052 info = "class "; 1053 callback = pm_op(dev->class->pm, state); 1054 goto Run; 1055 } else if (dev->class->suspend) { 1056 pm_dev_dbg(dev, state, "legacy class "); 1057 error = legacy_suspend(dev, state, dev->class->suspend); 1058 goto End; 1059 } 1060 } 1061 1062 if (dev->bus) { 1063 if (dev->bus->pm) { 1064 info = "bus "; 1065 callback = pm_op(dev->bus->pm, state); 1066 } else if (dev->bus->suspend) { 1067 pm_dev_dbg(dev, state, "legacy bus "); 1068 error = legacy_suspend(dev, state, dev->bus->suspend); 1069 goto End; 1070 } 1071 } 1072 1073 Run: 1074 if (!callback && dev->driver && dev->driver->pm) { 1075 info = "driver "; 1076 callback = pm_op(dev->driver->pm, state); 1077 } 1078 1079 error = dpm_run_callback(callback, dev, state, info); 1080 1081 End: 1082 if (!error) { 1083 dev->power.is_suspended = true; 1084 if (dev->power.wakeup_path 1085 && dev->parent && !dev->parent->power.ignore_children) 1086 dev->parent->power.wakeup_path = true; 1087 } 1088 1089 device_unlock(dev); 1090 complete_all(&dev->power.completion); 1091 1092 if (error) { 1093 pm_runtime_put_sync(dev); 1094 async_error = error; 1095 } else if (dev->power.is_suspended) { 1096 __pm_runtime_disable(dev, false); 1097 } 1098 1099 return error; 1100 } 1101 1102 static void async_suspend(void *data, async_cookie_t cookie) 1103 { 1104 struct device *dev = (struct device *)data; 1105 int error; 1106 1107 error = __device_suspend(dev, pm_transition, true); 1108 if (error) { 1109 dpm_save_failed_dev(dev_name(dev)); 1110 pm_dev_err(dev, pm_transition, " async", error); 1111 } 1112 1113 put_device(dev); 1114 } 1115 1116 static int device_suspend(struct device *dev) 1117 { 1118 INIT_COMPLETION(dev->power.completion); 1119 1120 if (pm_async_enabled && dev->power.async_suspend) { 1121 get_device(dev); 1122 async_schedule(async_suspend, dev); 1123 return 0; 1124 } 1125 1126 return __device_suspend(dev, pm_transition, false); 1127 } 1128 1129 /** 1130 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 1131 * @state: PM transition of the system being carried out. 1132 */ 1133 int dpm_suspend(pm_message_t state) 1134 { 1135 ktime_t starttime = ktime_get(); 1136 int error = 0; 1137 1138 might_sleep(); 1139 1140 mutex_lock(&dpm_list_mtx); 1141 pm_transition = state; 1142 async_error = 0; 1143 while (!list_empty(&dpm_prepared_list)) { 1144 struct device *dev = to_device(dpm_prepared_list.prev); 1145 1146 get_device(dev); 1147 mutex_unlock(&dpm_list_mtx); 1148 1149 error = device_suspend(dev); 1150 1151 mutex_lock(&dpm_list_mtx); 1152 if (error) { 1153 pm_dev_err(dev, state, "", error); 1154 dpm_save_failed_dev(dev_name(dev)); 1155 put_device(dev); 1156 break; 1157 } 1158 if (!list_empty(&dev->power.entry)) 1159 list_move(&dev->power.entry, &dpm_suspended_list); 1160 put_device(dev); 1161 if (async_error) 1162 break; 1163 } 1164 mutex_unlock(&dpm_list_mtx); 1165 async_synchronize_full(); 1166 if (!error) 1167 error = async_error; 1168 if (error) { 1169 suspend_stats.failed_suspend++; 1170 dpm_save_failed_step(SUSPEND_SUSPEND); 1171 } else 1172 dpm_show_time(starttime, state, NULL); 1173 return error; 1174 } 1175 1176 /** 1177 * device_prepare - Prepare a device for system power transition. 1178 * @dev: Device to handle. 1179 * @state: PM transition of the system being carried out. 1180 * 1181 * Execute the ->prepare() callback(s) for given device. No new children of the 1182 * device may be registered after this function has returned. 1183 */ 1184 static int device_prepare(struct device *dev, pm_message_t state) 1185 { 1186 int (*callback)(struct device *) = NULL; 1187 char *info = NULL; 1188 int error = 0; 1189 1190 device_lock(dev); 1191 1192 dev->power.wakeup_path = device_may_wakeup(dev); 1193 1194 if (dev->pm_domain) { 1195 info = "preparing power domain "; 1196 callback = dev->pm_domain->ops.prepare; 1197 } else if (dev->type && dev->type->pm) { 1198 info = "preparing type "; 1199 callback = dev->type->pm->prepare; 1200 } else if (dev->class && dev->class->pm) { 1201 info = "preparing class "; 1202 callback = dev->class->pm->prepare; 1203 } else if (dev->bus && dev->bus->pm) { 1204 info = "preparing bus "; 1205 callback = dev->bus->pm->prepare; 1206 } 1207 1208 if (!callback && dev->driver && dev->driver->pm) { 1209 info = "preparing driver "; 1210 callback = dev->driver->pm->prepare; 1211 } 1212 1213 if (callback) { 1214 error = callback(dev); 1215 suspend_report_result(callback, error); 1216 } 1217 1218 device_unlock(dev); 1219 1220 return error; 1221 } 1222 1223 /** 1224 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1225 * @state: PM transition of the system being carried out. 1226 * 1227 * Execute the ->prepare() callback(s) for all devices. 1228 */ 1229 int dpm_prepare(pm_message_t state) 1230 { 1231 int error = 0; 1232 1233 might_sleep(); 1234 1235 mutex_lock(&dpm_list_mtx); 1236 while (!list_empty(&dpm_list)) { 1237 struct device *dev = to_device(dpm_list.next); 1238 1239 get_device(dev); 1240 mutex_unlock(&dpm_list_mtx); 1241 1242 error = device_prepare(dev, state); 1243 1244 mutex_lock(&dpm_list_mtx); 1245 if (error) { 1246 if (error == -EAGAIN) { 1247 put_device(dev); 1248 error = 0; 1249 continue; 1250 } 1251 printk(KERN_INFO "PM: Device %s not prepared " 1252 "for power transition: code %d\n", 1253 dev_name(dev), error); 1254 put_device(dev); 1255 break; 1256 } 1257 dev->power.is_prepared = true; 1258 if (!list_empty(&dev->power.entry)) 1259 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1260 put_device(dev); 1261 } 1262 mutex_unlock(&dpm_list_mtx); 1263 return error; 1264 } 1265 1266 /** 1267 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1268 * @state: PM transition of the system being carried out. 1269 * 1270 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1271 * callbacks for them. 1272 */ 1273 int dpm_suspend_start(pm_message_t state) 1274 { 1275 int error; 1276 1277 error = dpm_prepare(state); 1278 if (error) { 1279 suspend_stats.failed_prepare++; 1280 dpm_save_failed_step(SUSPEND_PREPARE); 1281 } else 1282 error = dpm_suspend(state); 1283 return error; 1284 } 1285 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1286 1287 void __suspend_report_result(const char *function, void *fn, int ret) 1288 { 1289 if (ret) 1290 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1291 } 1292 EXPORT_SYMBOL_GPL(__suspend_report_result); 1293 1294 /** 1295 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1296 * @dev: Device to wait for. 1297 * @subordinate: Device that needs to wait for @dev. 1298 */ 1299 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1300 { 1301 dpm_wait(dev, subordinate->power.async_suspend); 1302 return async_error; 1303 } 1304 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1305