1 /* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will intialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/kallsyms.h> 22 #include <linux/mutex.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resume-trace.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/async.h> 29 30 #include "../base.h" 31 #include "power.h" 32 33 /* 34 * The entries in the dpm_list list are in a depth first order, simply 35 * because children are guaranteed to be discovered after parents, and 36 * are inserted at the back of the list on discovery. 37 * 38 * Since device_pm_add() may be called with a device lock held, 39 * we must never try to acquire a device lock while holding 40 * dpm_list_mutex. 41 */ 42 43 LIST_HEAD(dpm_list); 44 45 static DEFINE_MUTEX(dpm_list_mtx); 46 static pm_message_t pm_transition; 47 48 /* 49 * Set once the preparation of devices for a PM transition has started, reset 50 * before starting to resume devices. Protected by dpm_list_mtx. 51 */ 52 static bool transition_started; 53 54 static int async_error; 55 56 /** 57 * device_pm_init - Initialize the PM-related part of a device object. 58 * @dev: Device object being initialized. 59 */ 60 void device_pm_init(struct device *dev) 61 { 62 dev->power.status = DPM_ON; 63 init_completion(&dev->power.completion); 64 complete_all(&dev->power.completion); 65 dev->power.wakeup = NULL; 66 spin_lock_init(&dev->power.lock); 67 pm_runtime_init(dev); 68 } 69 70 /** 71 * device_pm_lock - Lock the list of active devices used by the PM core. 72 */ 73 void device_pm_lock(void) 74 { 75 mutex_lock(&dpm_list_mtx); 76 } 77 78 /** 79 * device_pm_unlock - Unlock the list of active devices used by the PM core. 80 */ 81 void device_pm_unlock(void) 82 { 83 mutex_unlock(&dpm_list_mtx); 84 } 85 86 /** 87 * device_pm_add - Add a device to the PM core's list of active devices. 88 * @dev: Device to add to the list. 89 */ 90 void device_pm_add(struct device *dev) 91 { 92 pr_debug("PM: Adding info for %s:%s\n", 93 dev->bus ? dev->bus->name : "No Bus", 94 kobject_name(&dev->kobj)); 95 mutex_lock(&dpm_list_mtx); 96 if (dev->parent) { 97 if (dev->parent->power.status >= DPM_SUSPENDING) 98 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_name(dev->parent)); 100 } else if (transition_started) { 101 /* 102 * We refuse to register parentless devices while a PM 103 * transition is in progress in order to avoid leaving them 104 * unhandled down the road 105 */ 106 dev_WARN(dev, "Parentless device registered during a PM transaction\n"); 107 } 108 109 list_add_tail(&dev->power.entry, &dpm_list); 110 mutex_unlock(&dpm_list_mtx); 111 } 112 113 /** 114 * device_pm_remove - Remove a device from the PM core's list of active devices. 115 * @dev: Device to be removed from the list. 116 */ 117 void device_pm_remove(struct device *dev) 118 { 119 pr_debug("PM: Removing info for %s:%s\n", 120 dev->bus ? dev->bus->name : "No Bus", 121 kobject_name(&dev->kobj)); 122 complete_all(&dev->power.completion); 123 mutex_lock(&dpm_list_mtx); 124 list_del_init(&dev->power.entry); 125 mutex_unlock(&dpm_list_mtx); 126 device_wakeup_disable(dev); 127 pm_runtime_remove(dev); 128 } 129 130 /** 131 * device_pm_move_before - Move device in the PM core's list of active devices. 132 * @deva: Device to move in dpm_list. 133 * @devb: Device @deva should come before. 134 */ 135 void device_pm_move_before(struct device *deva, struct device *devb) 136 { 137 pr_debug("PM: Moving %s:%s before %s:%s\n", 138 deva->bus ? deva->bus->name : "No Bus", 139 kobject_name(&deva->kobj), 140 devb->bus ? devb->bus->name : "No Bus", 141 kobject_name(&devb->kobj)); 142 /* Delete deva from dpm_list and reinsert before devb. */ 143 list_move_tail(&deva->power.entry, &devb->power.entry); 144 } 145 146 /** 147 * device_pm_move_after - Move device in the PM core's list of active devices. 148 * @deva: Device to move in dpm_list. 149 * @devb: Device @deva should come after. 150 */ 151 void device_pm_move_after(struct device *deva, struct device *devb) 152 { 153 pr_debug("PM: Moving %s:%s after %s:%s\n", 154 deva->bus ? deva->bus->name : "No Bus", 155 kobject_name(&deva->kobj), 156 devb->bus ? devb->bus->name : "No Bus", 157 kobject_name(&devb->kobj)); 158 /* Delete deva from dpm_list and reinsert after devb. */ 159 list_move(&deva->power.entry, &devb->power.entry); 160 } 161 162 /** 163 * device_pm_move_last - Move device to end of the PM core's list of devices. 164 * @dev: Device to move in dpm_list. 165 */ 166 void device_pm_move_last(struct device *dev) 167 { 168 pr_debug("PM: Moving %s:%s to end of list\n", 169 dev->bus ? dev->bus->name : "No Bus", 170 kobject_name(&dev->kobj)); 171 list_move_tail(&dev->power.entry, &dpm_list); 172 } 173 174 static ktime_t initcall_debug_start(struct device *dev) 175 { 176 ktime_t calltime = ktime_set(0, 0); 177 178 if (initcall_debug) { 179 pr_info("calling %s+ @ %i\n", 180 dev_name(dev), task_pid_nr(current)); 181 calltime = ktime_get(); 182 } 183 184 return calltime; 185 } 186 187 static void initcall_debug_report(struct device *dev, ktime_t calltime, 188 int error) 189 { 190 ktime_t delta, rettime; 191 192 if (initcall_debug) { 193 rettime = ktime_get(); 194 delta = ktime_sub(rettime, calltime); 195 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 196 error, (unsigned long long)ktime_to_ns(delta) >> 10); 197 } 198 } 199 200 /** 201 * dpm_wait - Wait for a PM operation to complete. 202 * @dev: Device to wait for. 203 * @async: If unset, wait only if the device's power.async_suspend flag is set. 204 */ 205 static void dpm_wait(struct device *dev, bool async) 206 { 207 if (!dev) 208 return; 209 210 if (async || (pm_async_enabled && dev->power.async_suspend)) 211 wait_for_completion(&dev->power.completion); 212 } 213 214 static int dpm_wait_fn(struct device *dev, void *async_ptr) 215 { 216 dpm_wait(dev, *((bool *)async_ptr)); 217 return 0; 218 } 219 220 static void dpm_wait_for_children(struct device *dev, bool async) 221 { 222 device_for_each_child(dev, &async, dpm_wait_fn); 223 } 224 225 /** 226 * pm_op - Execute the PM operation appropriate for given PM event. 227 * @dev: Device to handle. 228 * @ops: PM operations to choose from. 229 * @state: PM transition of the system being carried out. 230 */ 231 static int pm_op(struct device *dev, 232 const struct dev_pm_ops *ops, 233 pm_message_t state) 234 { 235 int error = 0; 236 ktime_t calltime; 237 238 calltime = initcall_debug_start(dev); 239 240 switch (state.event) { 241 #ifdef CONFIG_SUSPEND 242 case PM_EVENT_SUSPEND: 243 if (ops->suspend) { 244 error = ops->suspend(dev); 245 suspend_report_result(ops->suspend, error); 246 } 247 break; 248 case PM_EVENT_RESUME: 249 if (ops->resume) { 250 error = ops->resume(dev); 251 suspend_report_result(ops->resume, error); 252 } 253 break; 254 #endif /* CONFIG_SUSPEND */ 255 #ifdef CONFIG_HIBERNATION 256 case PM_EVENT_FREEZE: 257 case PM_EVENT_QUIESCE: 258 if (ops->freeze) { 259 error = ops->freeze(dev); 260 suspend_report_result(ops->freeze, error); 261 } 262 break; 263 case PM_EVENT_HIBERNATE: 264 if (ops->poweroff) { 265 error = ops->poweroff(dev); 266 suspend_report_result(ops->poweroff, error); 267 } 268 break; 269 case PM_EVENT_THAW: 270 case PM_EVENT_RECOVER: 271 if (ops->thaw) { 272 error = ops->thaw(dev); 273 suspend_report_result(ops->thaw, error); 274 } 275 break; 276 case PM_EVENT_RESTORE: 277 if (ops->restore) { 278 error = ops->restore(dev); 279 suspend_report_result(ops->restore, error); 280 } 281 break; 282 #endif /* CONFIG_HIBERNATION */ 283 default: 284 error = -EINVAL; 285 } 286 287 initcall_debug_report(dev, calltime, error); 288 289 return error; 290 } 291 292 /** 293 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 294 * @dev: Device to handle. 295 * @ops: PM operations to choose from. 296 * @state: PM transition of the system being carried out. 297 * 298 * The driver of @dev will not receive interrupts while this function is being 299 * executed. 300 */ 301 static int pm_noirq_op(struct device *dev, 302 const struct dev_pm_ops *ops, 303 pm_message_t state) 304 { 305 int error = 0; 306 ktime_t calltime, delta, rettime; 307 308 if (initcall_debug) { 309 pr_info("calling %s+ @ %i, parent: %s\n", 310 dev_name(dev), task_pid_nr(current), 311 dev->parent ? dev_name(dev->parent) : "none"); 312 calltime = ktime_get(); 313 } 314 315 switch (state.event) { 316 #ifdef CONFIG_SUSPEND 317 case PM_EVENT_SUSPEND: 318 if (ops->suspend_noirq) { 319 error = ops->suspend_noirq(dev); 320 suspend_report_result(ops->suspend_noirq, error); 321 } 322 break; 323 case PM_EVENT_RESUME: 324 if (ops->resume_noirq) { 325 error = ops->resume_noirq(dev); 326 suspend_report_result(ops->resume_noirq, error); 327 } 328 break; 329 #endif /* CONFIG_SUSPEND */ 330 #ifdef CONFIG_HIBERNATION 331 case PM_EVENT_FREEZE: 332 case PM_EVENT_QUIESCE: 333 if (ops->freeze_noirq) { 334 error = ops->freeze_noirq(dev); 335 suspend_report_result(ops->freeze_noirq, error); 336 } 337 break; 338 case PM_EVENT_HIBERNATE: 339 if (ops->poweroff_noirq) { 340 error = ops->poweroff_noirq(dev); 341 suspend_report_result(ops->poweroff_noirq, error); 342 } 343 break; 344 case PM_EVENT_THAW: 345 case PM_EVENT_RECOVER: 346 if (ops->thaw_noirq) { 347 error = ops->thaw_noirq(dev); 348 suspend_report_result(ops->thaw_noirq, error); 349 } 350 break; 351 case PM_EVENT_RESTORE: 352 if (ops->restore_noirq) { 353 error = ops->restore_noirq(dev); 354 suspend_report_result(ops->restore_noirq, error); 355 } 356 break; 357 #endif /* CONFIG_HIBERNATION */ 358 default: 359 error = -EINVAL; 360 } 361 362 if (initcall_debug) { 363 rettime = ktime_get(); 364 delta = ktime_sub(rettime, calltime); 365 printk("initcall %s_i+ returned %d after %Ld usecs\n", 366 dev_name(dev), error, 367 (unsigned long long)ktime_to_ns(delta) >> 10); 368 } 369 370 return error; 371 } 372 373 static char *pm_verb(int event) 374 { 375 switch (event) { 376 case PM_EVENT_SUSPEND: 377 return "suspend"; 378 case PM_EVENT_RESUME: 379 return "resume"; 380 case PM_EVENT_FREEZE: 381 return "freeze"; 382 case PM_EVENT_QUIESCE: 383 return "quiesce"; 384 case PM_EVENT_HIBERNATE: 385 return "hibernate"; 386 case PM_EVENT_THAW: 387 return "thaw"; 388 case PM_EVENT_RESTORE: 389 return "restore"; 390 case PM_EVENT_RECOVER: 391 return "recover"; 392 default: 393 return "(unknown PM event)"; 394 } 395 } 396 397 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 398 { 399 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 400 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 401 ", may wakeup" : ""); 402 } 403 404 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 405 int error) 406 { 407 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 408 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 409 } 410 411 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 412 { 413 ktime_t calltime; 414 u64 usecs64; 415 int usecs; 416 417 calltime = ktime_get(); 418 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 419 do_div(usecs64, NSEC_PER_USEC); 420 usecs = usecs64; 421 if (usecs == 0) 422 usecs = 1; 423 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 424 info ?: "", info ? " " : "", pm_verb(state.event), 425 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 426 } 427 428 /*------------------------- Resume routines -------------------------*/ 429 430 /** 431 * device_resume_noirq - Execute an "early resume" callback for given device. 432 * @dev: Device to handle. 433 * @state: PM transition of the system being carried out. 434 * 435 * The driver of @dev will not receive interrupts while this function is being 436 * executed. 437 */ 438 static int device_resume_noirq(struct device *dev, pm_message_t state) 439 { 440 int error = 0; 441 442 TRACE_DEVICE(dev); 443 TRACE_RESUME(0); 444 445 if (dev->bus && dev->bus->pm) { 446 pm_dev_dbg(dev, state, "EARLY "); 447 error = pm_noirq_op(dev, dev->bus->pm, state); 448 if (error) 449 goto End; 450 } 451 452 if (dev->type && dev->type->pm) { 453 pm_dev_dbg(dev, state, "EARLY type "); 454 error = pm_noirq_op(dev, dev->type->pm, state); 455 if (error) 456 goto End; 457 } 458 459 if (dev->class && dev->class->pm) { 460 pm_dev_dbg(dev, state, "EARLY class "); 461 error = pm_noirq_op(dev, dev->class->pm, state); 462 } 463 464 End: 465 TRACE_RESUME(error); 466 return error; 467 } 468 469 /** 470 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 471 * @state: PM transition of the system being carried out. 472 * 473 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 474 * enable device drivers to receive interrupts. 475 */ 476 void dpm_resume_noirq(pm_message_t state) 477 { 478 struct list_head list; 479 ktime_t starttime = ktime_get(); 480 481 INIT_LIST_HEAD(&list); 482 mutex_lock(&dpm_list_mtx); 483 transition_started = false; 484 while (!list_empty(&dpm_list)) { 485 struct device *dev = to_device(dpm_list.next); 486 487 get_device(dev); 488 if (dev->power.status > DPM_OFF) { 489 int error; 490 491 dev->power.status = DPM_OFF; 492 mutex_unlock(&dpm_list_mtx); 493 494 error = device_resume_noirq(dev, state); 495 496 mutex_lock(&dpm_list_mtx); 497 if (error) 498 pm_dev_err(dev, state, " early", error); 499 } 500 if (!list_empty(&dev->power.entry)) 501 list_move_tail(&dev->power.entry, &list); 502 put_device(dev); 503 } 504 list_splice(&list, &dpm_list); 505 mutex_unlock(&dpm_list_mtx); 506 dpm_show_time(starttime, state, "early"); 507 resume_device_irqs(); 508 } 509 EXPORT_SYMBOL_GPL(dpm_resume_noirq); 510 511 /** 512 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 513 * @dev: Device to resume. 514 * @cb: Resume callback to execute. 515 */ 516 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 517 { 518 int error; 519 ktime_t calltime; 520 521 calltime = initcall_debug_start(dev); 522 523 error = cb(dev); 524 suspend_report_result(cb, error); 525 526 initcall_debug_report(dev, calltime, error); 527 528 return error; 529 } 530 531 /** 532 * device_resume - Execute "resume" callbacks for given device. 533 * @dev: Device to handle. 534 * @state: PM transition of the system being carried out. 535 * @async: If true, the device is being resumed asynchronously. 536 */ 537 static int device_resume(struct device *dev, pm_message_t state, bool async) 538 { 539 int error = 0; 540 541 TRACE_DEVICE(dev); 542 TRACE_RESUME(0); 543 544 dpm_wait(dev->parent, async); 545 device_lock(dev); 546 547 dev->power.status = DPM_RESUMING; 548 549 if (dev->bus) { 550 if (dev->bus->pm) { 551 pm_dev_dbg(dev, state, ""); 552 error = pm_op(dev, dev->bus->pm, state); 553 } else if (dev->bus->resume) { 554 pm_dev_dbg(dev, state, "legacy "); 555 error = legacy_resume(dev, dev->bus->resume); 556 } 557 if (error) 558 goto End; 559 } 560 561 if (dev->type) { 562 if (dev->type->pm) { 563 pm_dev_dbg(dev, state, "type "); 564 error = pm_op(dev, dev->type->pm, state); 565 } 566 if (error) 567 goto End; 568 } 569 570 if (dev->class) { 571 if (dev->class->pm) { 572 pm_dev_dbg(dev, state, "class "); 573 error = pm_op(dev, dev->class->pm, state); 574 } else if (dev->class->resume) { 575 pm_dev_dbg(dev, state, "legacy class "); 576 error = legacy_resume(dev, dev->class->resume); 577 } 578 } 579 End: 580 device_unlock(dev); 581 complete_all(&dev->power.completion); 582 583 TRACE_RESUME(error); 584 return error; 585 } 586 587 static void async_resume(void *data, async_cookie_t cookie) 588 { 589 struct device *dev = (struct device *)data; 590 int error; 591 592 error = device_resume(dev, pm_transition, true); 593 if (error) 594 pm_dev_err(dev, pm_transition, " async", error); 595 put_device(dev); 596 } 597 598 static bool is_async(struct device *dev) 599 { 600 return dev->power.async_suspend && pm_async_enabled 601 && !pm_trace_is_enabled(); 602 } 603 604 /** 605 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 606 * @state: PM transition of the system being carried out. 607 * 608 * Execute the appropriate "resume" callback for all devices whose status 609 * indicates that they are suspended. 610 */ 611 static void dpm_resume(pm_message_t state) 612 { 613 struct list_head list; 614 struct device *dev; 615 ktime_t starttime = ktime_get(); 616 617 INIT_LIST_HEAD(&list); 618 mutex_lock(&dpm_list_mtx); 619 pm_transition = state; 620 async_error = 0; 621 622 list_for_each_entry(dev, &dpm_list, power.entry) { 623 if (dev->power.status < DPM_OFF) 624 continue; 625 626 INIT_COMPLETION(dev->power.completion); 627 if (is_async(dev)) { 628 get_device(dev); 629 async_schedule(async_resume, dev); 630 } 631 } 632 633 while (!list_empty(&dpm_list)) { 634 dev = to_device(dpm_list.next); 635 get_device(dev); 636 if (dev->power.status >= DPM_OFF && !is_async(dev)) { 637 int error; 638 639 mutex_unlock(&dpm_list_mtx); 640 641 error = device_resume(dev, state, false); 642 643 mutex_lock(&dpm_list_mtx); 644 if (error) 645 pm_dev_err(dev, state, "", error); 646 } else if (dev->power.status == DPM_SUSPENDING) { 647 /* Allow new children of the device to be registered */ 648 dev->power.status = DPM_RESUMING; 649 } 650 if (!list_empty(&dev->power.entry)) 651 list_move_tail(&dev->power.entry, &list); 652 put_device(dev); 653 } 654 list_splice(&list, &dpm_list); 655 mutex_unlock(&dpm_list_mtx); 656 async_synchronize_full(); 657 dpm_show_time(starttime, state, NULL); 658 } 659 660 /** 661 * device_complete - Complete a PM transition for given device. 662 * @dev: Device to handle. 663 * @state: PM transition of the system being carried out. 664 */ 665 static void device_complete(struct device *dev, pm_message_t state) 666 { 667 device_lock(dev); 668 669 if (dev->class && dev->class->pm && dev->class->pm->complete) { 670 pm_dev_dbg(dev, state, "completing class "); 671 dev->class->pm->complete(dev); 672 } 673 674 if (dev->type && dev->type->pm && dev->type->pm->complete) { 675 pm_dev_dbg(dev, state, "completing type "); 676 dev->type->pm->complete(dev); 677 } 678 679 if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { 680 pm_dev_dbg(dev, state, "completing "); 681 dev->bus->pm->complete(dev); 682 } 683 684 device_unlock(dev); 685 } 686 687 /** 688 * dpm_complete - Complete a PM transition for all non-sysdev devices. 689 * @state: PM transition of the system being carried out. 690 * 691 * Execute the ->complete() callbacks for all devices whose PM status is not 692 * DPM_ON (this allows new devices to be registered). 693 */ 694 static void dpm_complete(pm_message_t state) 695 { 696 struct list_head list; 697 698 INIT_LIST_HEAD(&list); 699 mutex_lock(&dpm_list_mtx); 700 transition_started = false; 701 while (!list_empty(&dpm_list)) { 702 struct device *dev = to_device(dpm_list.prev); 703 704 get_device(dev); 705 if (dev->power.status > DPM_ON) { 706 dev->power.status = DPM_ON; 707 mutex_unlock(&dpm_list_mtx); 708 709 device_complete(dev, state); 710 pm_runtime_put_sync(dev); 711 712 mutex_lock(&dpm_list_mtx); 713 } 714 if (!list_empty(&dev->power.entry)) 715 list_move(&dev->power.entry, &list); 716 put_device(dev); 717 } 718 list_splice(&list, &dpm_list); 719 mutex_unlock(&dpm_list_mtx); 720 } 721 722 /** 723 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 724 * @state: PM transition of the system being carried out. 725 * 726 * Execute "resume" callbacks for all devices and complete the PM transition of 727 * the system. 728 */ 729 void dpm_resume_end(pm_message_t state) 730 { 731 might_sleep(); 732 dpm_resume(state); 733 dpm_complete(state); 734 } 735 EXPORT_SYMBOL_GPL(dpm_resume_end); 736 737 738 /*------------------------- Suspend routines -------------------------*/ 739 740 /** 741 * resume_event - Return a "resume" message for given "suspend" sleep state. 742 * @sleep_state: PM message representing a sleep state. 743 * 744 * Return a PM message representing the resume event corresponding to given 745 * sleep state. 746 */ 747 static pm_message_t resume_event(pm_message_t sleep_state) 748 { 749 switch (sleep_state.event) { 750 case PM_EVENT_SUSPEND: 751 return PMSG_RESUME; 752 case PM_EVENT_FREEZE: 753 case PM_EVENT_QUIESCE: 754 return PMSG_RECOVER; 755 case PM_EVENT_HIBERNATE: 756 return PMSG_RESTORE; 757 } 758 return PMSG_ON; 759 } 760 761 /** 762 * device_suspend_noirq - Execute a "late suspend" callback for given device. 763 * @dev: Device to handle. 764 * @state: PM transition of the system being carried out. 765 * 766 * The driver of @dev will not receive interrupts while this function is being 767 * executed. 768 */ 769 static int device_suspend_noirq(struct device *dev, pm_message_t state) 770 { 771 int error = 0; 772 773 if (dev->class && dev->class->pm) { 774 pm_dev_dbg(dev, state, "LATE class "); 775 error = pm_noirq_op(dev, dev->class->pm, state); 776 if (error) 777 goto End; 778 } 779 780 if (dev->type && dev->type->pm) { 781 pm_dev_dbg(dev, state, "LATE type "); 782 error = pm_noirq_op(dev, dev->type->pm, state); 783 if (error) 784 goto End; 785 } 786 787 if (dev->bus && dev->bus->pm) { 788 pm_dev_dbg(dev, state, "LATE "); 789 error = pm_noirq_op(dev, dev->bus->pm, state); 790 } 791 792 End: 793 return error; 794 } 795 796 /** 797 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 798 * @state: PM transition of the system being carried out. 799 * 800 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 801 * handlers for all non-sysdev devices. 802 */ 803 int dpm_suspend_noirq(pm_message_t state) 804 { 805 struct list_head list; 806 ktime_t starttime = ktime_get(); 807 int error = 0; 808 809 INIT_LIST_HEAD(&list); 810 suspend_device_irqs(); 811 mutex_lock(&dpm_list_mtx); 812 while (!list_empty(&dpm_list)) { 813 struct device *dev = to_device(dpm_list.prev); 814 815 get_device(dev); 816 mutex_unlock(&dpm_list_mtx); 817 818 error = device_suspend_noirq(dev, state); 819 820 mutex_lock(&dpm_list_mtx); 821 if (error) { 822 pm_dev_err(dev, state, " late", error); 823 put_device(dev); 824 break; 825 } 826 dev->power.status = DPM_OFF_IRQ; 827 if (!list_empty(&dev->power.entry)) 828 list_move(&dev->power.entry, &list); 829 put_device(dev); 830 } 831 list_splice_tail(&list, &dpm_list); 832 mutex_unlock(&dpm_list_mtx); 833 if (error) 834 dpm_resume_noirq(resume_event(state)); 835 else 836 dpm_show_time(starttime, state, "late"); 837 return error; 838 } 839 EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 840 841 /** 842 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 843 * @dev: Device to suspend. 844 * @state: PM transition of the system being carried out. 845 * @cb: Suspend callback to execute. 846 */ 847 static int legacy_suspend(struct device *dev, pm_message_t state, 848 int (*cb)(struct device *dev, pm_message_t state)) 849 { 850 int error; 851 ktime_t calltime; 852 853 calltime = initcall_debug_start(dev); 854 855 error = cb(dev, state); 856 suspend_report_result(cb, error); 857 858 initcall_debug_report(dev, calltime, error); 859 860 return error; 861 } 862 863 /** 864 * device_suspend - Execute "suspend" callbacks for given device. 865 * @dev: Device to handle. 866 * @state: PM transition of the system being carried out. 867 * @async: If true, the device is being suspended asynchronously. 868 */ 869 static int __device_suspend(struct device *dev, pm_message_t state, bool async) 870 { 871 int error = 0; 872 873 dpm_wait_for_children(dev, async); 874 device_lock(dev); 875 876 if (async_error) 877 goto End; 878 879 if (dev->class) { 880 if (dev->class->pm) { 881 pm_dev_dbg(dev, state, "class "); 882 error = pm_op(dev, dev->class->pm, state); 883 } else if (dev->class->suspend) { 884 pm_dev_dbg(dev, state, "legacy class "); 885 error = legacy_suspend(dev, state, dev->class->suspend); 886 } 887 if (error) 888 goto End; 889 } 890 891 if (dev->type) { 892 if (dev->type->pm) { 893 pm_dev_dbg(dev, state, "type "); 894 error = pm_op(dev, dev->type->pm, state); 895 } 896 if (error) 897 goto End; 898 } 899 900 if (dev->bus) { 901 if (dev->bus->pm) { 902 pm_dev_dbg(dev, state, ""); 903 error = pm_op(dev, dev->bus->pm, state); 904 } else if (dev->bus->suspend) { 905 pm_dev_dbg(dev, state, "legacy "); 906 error = legacy_suspend(dev, state, dev->bus->suspend); 907 } 908 } 909 910 if (!error) 911 dev->power.status = DPM_OFF; 912 913 End: 914 device_unlock(dev); 915 complete_all(&dev->power.completion); 916 917 if (error) 918 async_error = error; 919 920 return error; 921 } 922 923 static void async_suspend(void *data, async_cookie_t cookie) 924 { 925 struct device *dev = (struct device *)data; 926 int error; 927 928 error = __device_suspend(dev, pm_transition, true); 929 if (error) 930 pm_dev_err(dev, pm_transition, " async", error); 931 932 put_device(dev); 933 } 934 935 static int device_suspend(struct device *dev) 936 { 937 INIT_COMPLETION(dev->power.completion); 938 939 if (pm_async_enabled && dev->power.async_suspend) { 940 get_device(dev); 941 async_schedule(async_suspend, dev); 942 return 0; 943 } 944 945 return __device_suspend(dev, pm_transition, false); 946 } 947 948 /** 949 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 950 * @state: PM transition of the system being carried out. 951 */ 952 static int dpm_suspend(pm_message_t state) 953 { 954 struct list_head list; 955 ktime_t starttime = ktime_get(); 956 int error = 0; 957 958 INIT_LIST_HEAD(&list); 959 mutex_lock(&dpm_list_mtx); 960 pm_transition = state; 961 async_error = 0; 962 while (!list_empty(&dpm_list)) { 963 struct device *dev = to_device(dpm_list.prev); 964 965 get_device(dev); 966 mutex_unlock(&dpm_list_mtx); 967 968 error = device_suspend(dev); 969 970 mutex_lock(&dpm_list_mtx); 971 if (error) { 972 pm_dev_err(dev, state, "", error); 973 put_device(dev); 974 break; 975 } 976 if (!list_empty(&dev->power.entry)) 977 list_move(&dev->power.entry, &list); 978 put_device(dev); 979 if (async_error) 980 break; 981 } 982 list_splice(&list, dpm_list.prev); 983 mutex_unlock(&dpm_list_mtx); 984 async_synchronize_full(); 985 if (!error) 986 error = async_error; 987 if (!error) 988 dpm_show_time(starttime, state, NULL); 989 return error; 990 } 991 992 /** 993 * device_prepare - Prepare a device for system power transition. 994 * @dev: Device to handle. 995 * @state: PM transition of the system being carried out. 996 * 997 * Execute the ->prepare() callback(s) for given device. No new children of the 998 * device may be registered after this function has returned. 999 */ 1000 static int device_prepare(struct device *dev, pm_message_t state) 1001 { 1002 int error = 0; 1003 1004 device_lock(dev); 1005 1006 if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { 1007 pm_dev_dbg(dev, state, "preparing "); 1008 error = dev->bus->pm->prepare(dev); 1009 suspend_report_result(dev->bus->pm->prepare, error); 1010 if (error) 1011 goto End; 1012 } 1013 1014 if (dev->type && dev->type->pm && dev->type->pm->prepare) { 1015 pm_dev_dbg(dev, state, "preparing type "); 1016 error = dev->type->pm->prepare(dev); 1017 suspend_report_result(dev->type->pm->prepare, error); 1018 if (error) 1019 goto End; 1020 } 1021 1022 if (dev->class && dev->class->pm && dev->class->pm->prepare) { 1023 pm_dev_dbg(dev, state, "preparing class "); 1024 error = dev->class->pm->prepare(dev); 1025 suspend_report_result(dev->class->pm->prepare, error); 1026 } 1027 End: 1028 device_unlock(dev); 1029 1030 return error; 1031 } 1032 1033 /** 1034 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1035 * @state: PM transition of the system being carried out. 1036 * 1037 * Execute the ->prepare() callback(s) for all devices. 1038 */ 1039 static int dpm_prepare(pm_message_t state) 1040 { 1041 struct list_head list; 1042 int error = 0; 1043 1044 INIT_LIST_HEAD(&list); 1045 mutex_lock(&dpm_list_mtx); 1046 transition_started = true; 1047 while (!list_empty(&dpm_list)) { 1048 struct device *dev = to_device(dpm_list.next); 1049 1050 get_device(dev); 1051 dev->power.status = DPM_PREPARING; 1052 mutex_unlock(&dpm_list_mtx); 1053 1054 pm_runtime_get_noresume(dev); 1055 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 1056 /* Wake-up requested during system sleep transition. */ 1057 pm_runtime_put_sync(dev); 1058 error = -EBUSY; 1059 } else { 1060 error = device_prepare(dev, state); 1061 } 1062 1063 mutex_lock(&dpm_list_mtx); 1064 if (error) { 1065 dev->power.status = DPM_ON; 1066 if (error == -EAGAIN) { 1067 put_device(dev); 1068 error = 0; 1069 continue; 1070 } 1071 printk(KERN_ERR "PM: Failed to prepare device %s " 1072 "for power transition: error %d\n", 1073 kobject_name(&dev->kobj), error); 1074 put_device(dev); 1075 break; 1076 } 1077 dev->power.status = DPM_SUSPENDING; 1078 if (!list_empty(&dev->power.entry)) 1079 list_move_tail(&dev->power.entry, &list); 1080 put_device(dev); 1081 } 1082 list_splice(&list, &dpm_list); 1083 mutex_unlock(&dpm_list_mtx); 1084 return error; 1085 } 1086 1087 /** 1088 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1089 * @state: PM transition of the system being carried out. 1090 * 1091 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1092 * callbacks for them. 1093 */ 1094 int dpm_suspend_start(pm_message_t state) 1095 { 1096 int error; 1097 1098 might_sleep(); 1099 error = dpm_prepare(state); 1100 if (!error) 1101 error = dpm_suspend(state); 1102 return error; 1103 } 1104 EXPORT_SYMBOL_GPL(dpm_suspend_start); 1105 1106 void __suspend_report_result(const char *function, void *fn, int ret) 1107 { 1108 if (ret) 1109 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1110 } 1111 EXPORT_SYMBOL_GPL(__suspend_report_result); 1112 1113 /** 1114 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1115 * @dev: Device to wait for. 1116 * @subordinate: Device that needs to wait for @dev. 1117 */ 1118 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1119 { 1120 dpm_wait(dev, subordinate->power.async_suspend); 1121 return async_error; 1122 } 1123 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1124