1 /* 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 6 * 7 * This file is released under the GPLv2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/export.h> 12 #include <linux/pm_runtime.h> 13 #include <trace/events/rpm.h> 14 #include "power.h" 15 16 static int rpm_resume(struct device *dev, int rpmflags); 17 static int rpm_suspend(struct device *dev, int rpmflags); 18 19 /** 20 * update_pm_runtime_accounting - Update the time accounting of power states 21 * @dev: Device to update the accounting for 22 * 23 * In order to be able to have time accounting of the various power states 24 * (as used by programs such as PowerTOP to show the effectiveness of runtime 25 * PM), we need to track the time spent in each state. 26 * update_pm_runtime_accounting must be called each time before the 27 * runtime_status field is updated, to account the time in the old state 28 * correctly. 29 */ 30 void update_pm_runtime_accounting(struct device *dev) 31 { 32 unsigned long now = jiffies; 33 unsigned long delta; 34 35 delta = now - dev->power.accounting_timestamp; 36 37 dev->power.accounting_timestamp = now; 38 39 if (dev->power.disable_depth > 0) 40 return; 41 42 if (dev->power.runtime_status == RPM_SUSPENDED) 43 dev->power.suspended_jiffies += delta; 44 else 45 dev->power.active_jiffies += delta; 46 } 47 48 static void __update_runtime_status(struct device *dev, enum rpm_status status) 49 { 50 update_pm_runtime_accounting(dev); 51 dev->power.runtime_status = status; 52 } 53 54 /** 55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 56 * @dev: Device to handle. 57 */ 58 static void pm_runtime_deactivate_timer(struct device *dev) 59 { 60 if (dev->power.timer_expires > 0) { 61 del_timer(&dev->power.suspend_timer); 62 dev->power.timer_expires = 0; 63 } 64 } 65 66 /** 67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 68 * @dev: Device to handle. 69 */ 70 static void pm_runtime_cancel_pending(struct device *dev) 71 { 72 pm_runtime_deactivate_timer(dev); 73 /* 74 * In case there's a request pending, make sure its work function will 75 * return without doing anything. 76 */ 77 dev->power.request = RPM_REQ_NONE; 78 } 79 80 /* 81 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. 82 * @dev: Device to handle. 83 * 84 * Compute the autosuspend-delay expiration time based on the device's 85 * power.last_busy time. If the delay has already expired or is disabled 86 * (negative) or the power.use_autosuspend flag isn't set, return 0. 87 * Otherwise return the expiration time in jiffies (adjusted to be nonzero). 88 * 89 * This function may be called either with or without dev->power.lock held. 90 * Either way it can be racy, since power.last_busy may be updated at any time. 91 */ 92 unsigned long pm_runtime_autosuspend_expiration(struct device *dev) 93 { 94 int autosuspend_delay; 95 long elapsed; 96 unsigned long last_busy; 97 unsigned long expires = 0; 98 99 if (!dev->power.use_autosuspend) 100 goto out; 101 102 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); 103 if (autosuspend_delay < 0) 104 goto out; 105 106 last_busy = ACCESS_ONCE(dev->power.last_busy); 107 elapsed = jiffies - last_busy; 108 if (elapsed < 0) 109 goto out; /* jiffies has wrapped around. */ 110 111 /* 112 * If the autosuspend_delay is >= 1 second, align the timer by rounding 113 * up to the nearest second. 114 */ 115 expires = last_busy + msecs_to_jiffies(autosuspend_delay); 116 if (autosuspend_delay >= 1000) 117 expires = round_jiffies(expires); 118 expires += !expires; 119 if (elapsed >= expires - last_busy) 120 expires = 0; /* Already expired. */ 121 122 out: 123 return expires; 124 } 125 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 126 127 static int dev_memalloc_noio(struct device *dev, void *data) 128 { 129 return dev->power.memalloc_noio; 130 } 131 132 /* 133 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. 134 * @dev: Device to handle. 135 * @enable: True for setting the flag and False for clearing the flag. 136 * 137 * Set the flag for all devices in the path from the device to the 138 * root device in the device tree if @enable is true, otherwise clear 139 * the flag for devices in the path whose siblings don't set the flag. 140 * 141 * The function should only be called by block device, or network 142 * device driver for solving the deadlock problem during runtime 143 * resume/suspend: 144 * 145 * If memory allocation with GFP_KERNEL is called inside runtime 146 * resume/suspend callback of any one of its ancestors(or the 147 * block device itself), the deadlock may be triggered inside the 148 * memory allocation since it might not complete until the block 149 * device becomes active and the involed page I/O finishes. The 150 * situation is pointed out first by Alan Stern. Network device 151 * are involved in iSCSI kind of situation. 152 * 153 * The lock of dev_hotplug_mutex is held in the function for handling 154 * hotplug race because pm_runtime_set_memalloc_noio() may be called 155 * in async probe(). 156 * 157 * The function should be called between device_add() and device_del() 158 * on the affected device(block/network device). 159 */ 160 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) 161 { 162 static DEFINE_MUTEX(dev_hotplug_mutex); 163 164 mutex_lock(&dev_hotplug_mutex); 165 for (;;) { 166 bool enabled; 167 168 /* hold power lock since bitfield is not SMP-safe. */ 169 spin_lock_irq(&dev->power.lock); 170 enabled = dev->power.memalloc_noio; 171 dev->power.memalloc_noio = enable; 172 spin_unlock_irq(&dev->power.lock); 173 174 /* 175 * not need to enable ancestors any more if the device 176 * has been enabled. 177 */ 178 if (enabled && enable) 179 break; 180 181 dev = dev->parent; 182 183 /* 184 * clear flag of the parent device only if all the 185 * children don't set the flag because ancestor's 186 * flag was set by any one of the descendants. 187 */ 188 if (!dev || (!enable && 189 device_for_each_child(dev, NULL, 190 dev_memalloc_noio))) 191 break; 192 } 193 mutex_unlock(&dev_hotplug_mutex); 194 } 195 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); 196 197 /** 198 * rpm_check_suspend_allowed - Test whether a device may be suspended. 199 * @dev: Device to test. 200 */ 201 static int rpm_check_suspend_allowed(struct device *dev) 202 { 203 int retval = 0; 204 205 if (dev->power.runtime_error) 206 retval = -EINVAL; 207 else if (dev->power.disable_depth > 0) 208 retval = -EACCES; 209 else if (atomic_read(&dev->power.usage_count) > 0) 210 retval = -EAGAIN; 211 else if (!pm_children_suspended(dev)) 212 retval = -EBUSY; 213 214 /* Pending resume requests take precedence over suspends. */ 215 else if ((dev->power.deferred_resume 216 && dev->power.runtime_status == RPM_SUSPENDING) 217 || (dev->power.request_pending 218 && dev->power.request == RPM_REQ_RESUME)) 219 retval = -EAGAIN; 220 else if (__dev_pm_qos_read_value(dev) < 0) 221 retval = -EPERM; 222 else if (dev->power.runtime_status == RPM_SUSPENDED) 223 retval = 1; 224 225 return retval; 226 } 227 228 /** 229 * __rpm_callback - Run a given runtime PM callback for a given device. 230 * @cb: Runtime PM callback to run. 231 * @dev: Device to run the callback for. 232 */ 233 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 234 __releases(&dev->power.lock) __acquires(&dev->power.lock) 235 { 236 int retval; 237 238 if (dev->power.irq_safe) 239 spin_unlock(&dev->power.lock); 240 else 241 spin_unlock_irq(&dev->power.lock); 242 243 retval = cb(dev); 244 245 if (dev->power.irq_safe) 246 spin_lock(&dev->power.lock); 247 else 248 spin_lock_irq(&dev->power.lock); 249 250 return retval; 251 } 252 253 /** 254 * rpm_idle - Notify device bus type if the device can be suspended. 255 * @dev: Device to notify the bus type about. 256 * @rpmflags: Flag bits. 257 * 258 * Check if the device's runtime PM status allows it to be suspended. If 259 * another idle notification has been started earlier, return immediately. If 260 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 261 * run the ->runtime_idle() callback directly. 262 * 263 * This function must be called under dev->power.lock with interrupts disabled. 264 */ 265 static int rpm_idle(struct device *dev, int rpmflags) 266 { 267 int (*callback)(struct device *); 268 int retval; 269 270 trace_rpm_idle(dev, rpmflags); 271 retval = rpm_check_suspend_allowed(dev); 272 if (retval < 0) 273 ; /* Conditions are wrong. */ 274 275 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 276 else if (dev->power.runtime_status != RPM_ACTIVE) 277 retval = -EAGAIN; 278 279 /* 280 * Any pending request other than an idle notification takes 281 * precedence over us, except that the timer may be running. 282 */ 283 else if (dev->power.request_pending && 284 dev->power.request > RPM_REQ_IDLE) 285 retval = -EAGAIN; 286 287 /* Act as though RPM_NOWAIT is always set. */ 288 else if (dev->power.idle_notification) 289 retval = -EINPROGRESS; 290 if (retval) 291 goto out; 292 293 /* Pending requests need to be canceled. */ 294 dev->power.request = RPM_REQ_NONE; 295 296 if (dev->power.no_callbacks) { 297 /* Assume ->runtime_idle() callback would have suspended. */ 298 retval = rpm_suspend(dev, rpmflags); 299 goto out; 300 } 301 302 /* Carry out an asynchronous or a synchronous idle notification. */ 303 if (rpmflags & RPM_ASYNC) { 304 dev->power.request = RPM_REQ_IDLE; 305 if (!dev->power.request_pending) { 306 dev->power.request_pending = true; 307 queue_work(pm_wq, &dev->power.work); 308 } 309 goto out; 310 } 311 312 dev->power.idle_notification = true; 313 314 if (dev->pm_domain) 315 callback = dev->pm_domain->ops.runtime_idle; 316 else if (dev->type && dev->type->pm) 317 callback = dev->type->pm->runtime_idle; 318 else if (dev->class && dev->class->pm) 319 callback = dev->class->pm->runtime_idle; 320 else if (dev->bus && dev->bus->pm) 321 callback = dev->bus->pm->runtime_idle; 322 else 323 callback = NULL; 324 325 if (!callback && dev->driver && dev->driver->pm) 326 callback = dev->driver->pm->runtime_idle; 327 328 if (callback) 329 __rpm_callback(callback, dev); 330 331 dev->power.idle_notification = false; 332 wake_up_all(&dev->power.wait_queue); 333 334 out: 335 trace_rpm_return_int(dev, _THIS_IP_, retval); 336 return retval; 337 } 338 339 /** 340 * rpm_callback - Run a given runtime PM callback for a given device. 341 * @cb: Runtime PM callback to run. 342 * @dev: Device to run the callback for. 343 */ 344 static int rpm_callback(int (*cb)(struct device *), struct device *dev) 345 { 346 int retval; 347 348 if (!cb) 349 return -ENOSYS; 350 351 if (dev->power.memalloc_noio) { 352 unsigned int noio_flag; 353 354 /* 355 * Deadlock might be caused if memory allocation with 356 * GFP_KERNEL happens inside runtime_suspend and 357 * runtime_resume callbacks of one block device's 358 * ancestor or the block device itself. Network 359 * device might be thought as part of iSCSI block 360 * device, so network device and its ancestor should 361 * be marked as memalloc_noio too. 362 */ 363 noio_flag = memalloc_noio_save(); 364 retval = __rpm_callback(cb, dev); 365 memalloc_noio_restore(noio_flag); 366 } else { 367 retval = __rpm_callback(cb, dev); 368 } 369 370 dev->power.runtime_error = retval; 371 return retval != -EACCES ? retval : -EIO; 372 } 373 374 /** 375 * rpm_suspend - Carry out runtime suspend of given device. 376 * @dev: Device to suspend. 377 * @rpmflags: Flag bits. 378 * 379 * Check if the device's runtime PM status allows it to be suspended. 380 * Cancel a pending idle notification, autosuspend or suspend. If 381 * another suspend has been started earlier, either return immediately 382 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC 383 * flags. If the RPM_ASYNC flag is set then queue a suspend request; 384 * otherwise run the ->runtime_suspend() callback directly. When 385 * ->runtime_suspend succeeded, if a deferred resume was requested while 386 * the callback was running then carry it out, otherwise send an idle 387 * notification for its parent (if the suspend succeeded and both 388 * ignore_children of parent->power and irq_safe of dev->power are not set). 389 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO 390 * flag is set and the next autosuspend-delay expiration time is in the 391 * future, schedule another autosuspend attempt. 392 * 393 * This function must be called under dev->power.lock with interrupts disabled. 394 */ 395 static int rpm_suspend(struct device *dev, int rpmflags) 396 __releases(&dev->power.lock) __acquires(&dev->power.lock) 397 { 398 int (*callback)(struct device *); 399 struct device *parent = NULL; 400 int retval; 401 402 trace_rpm_suspend(dev, rpmflags); 403 404 repeat: 405 retval = rpm_check_suspend_allowed(dev); 406 407 if (retval < 0) 408 ; /* Conditions are wrong. */ 409 410 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 411 else if (dev->power.runtime_status == RPM_RESUMING && 412 !(rpmflags & RPM_ASYNC)) 413 retval = -EAGAIN; 414 if (retval) 415 goto out; 416 417 /* If the autosuspend_delay time hasn't expired yet, reschedule. */ 418 if ((rpmflags & RPM_AUTO) 419 && dev->power.runtime_status != RPM_SUSPENDING) { 420 unsigned long expires = pm_runtime_autosuspend_expiration(dev); 421 422 if (expires != 0) { 423 /* Pending requests need to be canceled. */ 424 dev->power.request = RPM_REQ_NONE; 425 426 /* 427 * Optimization: If the timer is already running and is 428 * set to expire at or before the autosuspend delay, 429 * avoid the overhead of resetting it. Just let it 430 * expire; pm_suspend_timer_fn() will take care of the 431 * rest. 432 */ 433 if (!(dev->power.timer_expires && time_before_eq( 434 dev->power.timer_expires, expires))) { 435 dev->power.timer_expires = expires; 436 mod_timer(&dev->power.suspend_timer, expires); 437 } 438 dev->power.timer_autosuspends = 1; 439 goto out; 440 } 441 } 442 443 /* Other scheduled or pending requests need to be canceled. */ 444 pm_runtime_cancel_pending(dev); 445 446 if (dev->power.runtime_status == RPM_SUSPENDING) { 447 DEFINE_WAIT(wait); 448 449 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 450 retval = -EINPROGRESS; 451 goto out; 452 } 453 454 if (dev->power.irq_safe) { 455 spin_unlock(&dev->power.lock); 456 457 cpu_relax(); 458 459 spin_lock(&dev->power.lock); 460 goto repeat; 461 } 462 463 /* Wait for the other suspend running in parallel with us. */ 464 for (;;) { 465 prepare_to_wait(&dev->power.wait_queue, &wait, 466 TASK_UNINTERRUPTIBLE); 467 if (dev->power.runtime_status != RPM_SUSPENDING) 468 break; 469 470 spin_unlock_irq(&dev->power.lock); 471 472 schedule(); 473 474 spin_lock_irq(&dev->power.lock); 475 } 476 finish_wait(&dev->power.wait_queue, &wait); 477 goto repeat; 478 } 479 480 if (dev->power.no_callbacks) 481 goto no_callback; /* Assume success. */ 482 483 /* Carry out an asynchronous or a synchronous suspend. */ 484 if (rpmflags & RPM_ASYNC) { 485 dev->power.request = (rpmflags & RPM_AUTO) ? 486 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; 487 if (!dev->power.request_pending) { 488 dev->power.request_pending = true; 489 queue_work(pm_wq, &dev->power.work); 490 } 491 goto out; 492 } 493 494 __update_runtime_status(dev, RPM_SUSPENDING); 495 496 if (dev->pm_domain) 497 callback = dev->pm_domain->ops.runtime_suspend; 498 else if (dev->type && dev->type->pm) 499 callback = dev->type->pm->runtime_suspend; 500 else if (dev->class && dev->class->pm) 501 callback = dev->class->pm->runtime_suspend; 502 else if (dev->bus && dev->bus->pm) 503 callback = dev->bus->pm->runtime_suspend; 504 else 505 callback = NULL; 506 507 if (!callback && dev->driver && dev->driver->pm) 508 callback = dev->driver->pm->runtime_suspend; 509 510 retval = rpm_callback(callback, dev); 511 if (retval) 512 goto fail; 513 514 no_callback: 515 __update_runtime_status(dev, RPM_SUSPENDED); 516 pm_runtime_deactivate_timer(dev); 517 518 if (dev->parent) { 519 parent = dev->parent; 520 atomic_add_unless(&parent->power.child_count, -1, 0); 521 } 522 wake_up_all(&dev->power.wait_queue); 523 524 if (dev->power.deferred_resume) { 525 dev->power.deferred_resume = false; 526 rpm_resume(dev, 0); 527 retval = -EAGAIN; 528 goto out; 529 } 530 531 /* Maybe the parent is now able to suspend. */ 532 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 533 spin_unlock(&dev->power.lock); 534 535 spin_lock(&parent->power.lock); 536 rpm_idle(parent, RPM_ASYNC); 537 spin_unlock(&parent->power.lock); 538 539 spin_lock(&dev->power.lock); 540 } 541 542 out: 543 trace_rpm_return_int(dev, _THIS_IP_, retval); 544 545 return retval; 546 547 fail: 548 __update_runtime_status(dev, RPM_ACTIVE); 549 dev->power.deferred_resume = false; 550 wake_up_all(&dev->power.wait_queue); 551 552 if (retval == -EAGAIN || retval == -EBUSY) { 553 dev->power.runtime_error = 0; 554 555 /* 556 * If the callback routine failed an autosuspend, and 557 * if the last_busy time has been updated so that there 558 * is a new autosuspend expiration time, automatically 559 * reschedule another autosuspend. 560 */ 561 if ((rpmflags & RPM_AUTO) && 562 pm_runtime_autosuspend_expiration(dev) != 0) 563 goto repeat; 564 } else { 565 pm_runtime_cancel_pending(dev); 566 } 567 goto out; 568 } 569 570 /** 571 * rpm_resume - Carry out runtime resume of given device. 572 * @dev: Device to resume. 573 * @rpmflags: Flag bits. 574 * 575 * Check if the device's runtime PM status allows it to be resumed. Cancel 576 * any scheduled or pending requests. If another resume has been started 577 * earlier, either return immediately or wait for it to finish, depending on the 578 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 579 * parallel with this function, either tell the other process to resume after 580 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 581 * flag is set then queue a resume request; otherwise run the 582 * ->runtime_resume() callback directly. Queue an idle notification for the 583 * device if the resume succeeded. 584 * 585 * This function must be called under dev->power.lock with interrupts disabled. 586 */ 587 static int rpm_resume(struct device *dev, int rpmflags) 588 __releases(&dev->power.lock) __acquires(&dev->power.lock) 589 { 590 int (*callback)(struct device *); 591 struct device *parent = NULL; 592 int retval = 0; 593 594 trace_rpm_resume(dev, rpmflags); 595 596 repeat: 597 if (dev->power.runtime_error) 598 retval = -EINVAL; 599 else if (dev->power.disable_depth == 1 && dev->power.is_suspended 600 && dev->power.runtime_status == RPM_ACTIVE) 601 retval = 1; 602 else if (dev->power.disable_depth > 0) 603 retval = -EACCES; 604 if (retval) 605 goto out; 606 607 /* 608 * Other scheduled or pending requests need to be canceled. Small 609 * optimization: If an autosuspend timer is running, leave it running 610 * rather than cancelling it now only to restart it again in the near 611 * future. 612 */ 613 dev->power.request = RPM_REQ_NONE; 614 if (!dev->power.timer_autosuspends) 615 pm_runtime_deactivate_timer(dev); 616 617 if (dev->power.runtime_status == RPM_ACTIVE) { 618 retval = 1; 619 goto out; 620 } 621 622 if (dev->power.runtime_status == RPM_RESUMING 623 || dev->power.runtime_status == RPM_SUSPENDING) { 624 DEFINE_WAIT(wait); 625 626 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 627 if (dev->power.runtime_status == RPM_SUSPENDING) 628 dev->power.deferred_resume = true; 629 else 630 retval = -EINPROGRESS; 631 goto out; 632 } 633 634 if (dev->power.irq_safe) { 635 spin_unlock(&dev->power.lock); 636 637 cpu_relax(); 638 639 spin_lock(&dev->power.lock); 640 goto repeat; 641 } 642 643 /* Wait for the operation carried out in parallel with us. */ 644 for (;;) { 645 prepare_to_wait(&dev->power.wait_queue, &wait, 646 TASK_UNINTERRUPTIBLE); 647 if (dev->power.runtime_status != RPM_RESUMING 648 && dev->power.runtime_status != RPM_SUSPENDING) 649 break; 650 651 spin_unlock_irq(&dev->power.lock); 652 653 schedule(); 654 655 spin_lock_irq(&dev->power.lock); 656 } 657 finish_wait(&dev->power.wait_queue, &wait); 658 goto repeat; 659 } 660 661 /* 662 * See if we can skip waking up the parent. This is safe only if 663 * power.no_callbacks is set, because otherwise we don't know whether 664 * the resume will actually succeed. 665 */ 666 if (dev->power.no_callbacks && !parent && dev->parent) { 667 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); 668 if (dev->parent->power.disable_depth > 0 669 || dev->parent->power.ignore_children 670 || dev->parent->power.runtime_status == RPM_ACTIVE) { 671 atomic_inc(&dev->parent->power.child_count); 672 spin_unlock(&dev->parent->power.lock); 673 retval = 1; 674 goto no_callback; /* Assume success. */ 675 } 676 spin_unlock(&dev->parent->power.lock); 677 } 678 679 /* Carry out an asynchronous or a synchronous resume. */ 680 if (rpmflags & RPM_ASYNC) { 681 dev->power.request = RPM_REQ_RESUME; 682 if (!dev->power.request_pending) { 683 dev->power.request_pending = true; 684 queue_work(pm_wq, &dev->power.work); 685 } 686 retval = 0; 687 goto out; 688 } 689 690 if (!parent && dev->parent) { 691 /* 692 * Increment the parent's usage counter and resume it if 693 * necessary. Not needed if dev is irq-safe; then the 694 * parent is permanently resumed. 695 */ 696 parent = dev->parent; 697 if (dev->power.irq_safe) 698 goto skip_parent; 699 spin_unlock(&dev->power.lock); 700 701 pm_runtime_get_noresume(parent); 702 703 spin_lock(&parent->power.lock); 704 /* 705 * We can resume if the parent's runtime PM is disabled or it 706 * is set to ignore children. 707 */ 708 if (!parent->power.disable_depth 709 && !parent->power.ignore_children) { 710 rpm_resume(parent, 0); 711 if (parent->power.runtime_status != RPM_ACTIVE) 712 retval = -EBUSY; 713 } 714 spin_unlock(&parent->power.lock); 715 716 spin_lock(&dev->power.lock); 717 if (retval) 718 goto out; 719 goto repeat; 720 } 721 skip_parent: 722 723 if (dev->power.no_callbacks) 724 goto no_callback; /* Assume success. */ 725 726 __update_runtime_status(dev, RPM_RESUMING); 727 728 if (dev->pm_domain) 729 callback = dev->pm_domain->ops.runtime_resume; 730 else if (dev->type && dev->type->pm) 731 callback = dev->type->pm->runtime_resume; 732 else if (dev->class && dev->class->pm) 733 callback = dev->class->pm->runtime_resume; 734 else if (dev->bus && dev->bus->pm) 735 callback = dev->bus->pm->runtime_resume; 736 else 737 callback = NULL; 738 739 if (!callback && dev->driver && dev->driver->pm) 740 callback = dev->driver->pm->runtime_resume; 741 742 retval = rpm_callback(callback, dev); 743 if (retval) { 744 __update_runtime_status(dev, RPM_SUSPENDED); 745 pm_runtime_cancel_pending(dev); 746 } else { 747 no_callback: 748 __update_runtime_status(dev, RPM_ACTIVE); 749 if (parent) 750 atomic_inc(&parent->power.child_count); 751 } 752 wake_up_all(&dev->power.wait_queue); 753 754 if (retval >= 0) 755 rpm_idle(dev, RPM_ASYNC); 756 757 out: 758 if (parent && !dev->power.irq_safe) { 759 spin_unlock_irq(&dev->power.lock); 760 761 pm_runtime_put(parent); 762 763 spin_lock_irq(&dev->power.lock); 764 } 765 766 trace_rpm_return_int(dev, _THIS_IP_, retval); 767 768 return retval; 769 } 770 771 /** 772 * pm_runtime_work - Universal runtime PM work function. 773 * @work: Work structure used for scheduling the execution of this function. 774 * 775 * Use @work to get the device object the work is to be done for, determine what 776 * is to be done and execute the appropriate runtime PM function. 777 */ 778 static void pm_runtime_work(struct work_struct *work) 779 { 780 struct device *dev = container_of(work, struct device, power.work); 781 enum rpm_request req; 782 783 spin_lock_irq(&dev->power.lock); 784 785 if (!dev->power.request_pending) 786 goto out; 787 788 req = dev->power.request; 789 dev->power.request = RPM_REQ_NONE; 790 dev->power.request_pending = false; 791 792 switch (req) { 793 case RPM_REQ_NONE: 794 break; 795 case RPM_REQ_IDLE: 796 rpm_idle(dev, RPM_NOWAIT); 797 break; 798 case RPM_REQ_SUSPEND: 799 rpm_suspend(dev, RPM_NOWAIT); 800 break; 801 case RPM_REQ_AUTOSUSPEND: 802 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); 803 break; 804 case RPM_REQ_RESUME: 805 rpm_resume(dev, RPM_NOWAIT); 806 break; 807 } 808 809 out: 810 spin_unlock_irq(&dev->power.lock); 811 } 812 813 /** 814 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 815 * @data: Device pointer passed by pm_schedule_suspend(). 816 * 817 * Check if the time is right and queue a suspend request. 818 */ 819 static void pm_suspend_timer_fn(unsigned long data) 820 { 821 struct device *dev = (struct device *)data; 822 unsigned long flags; 823 unsigned long expires; 824 825 spin_lock_irqsave(&dev->power.lock, flags); 826 827 expires = dev->power.timer_expires; 828 /* If 'expire' is after 'jiffies' we've been called too early. */ 829 if (expires > 0 && !time_after(expires, jiffies)) { 830 dev->power.timer_expires = 0; 831 rpm_suspend(dev, dev->power.timer_autosuspends ? 832 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 833 } 834 835 spin_unlock_irqrestore(&dev->power.lock, flags); 836 } 837 838 /** 839 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 840 * @dev: Device to suspend. 841 * @delay: Time to wait before submitting a suspend request, in milliseconds. 842 */ 843 int pm_schedule_suspend(struct device *dev, unsigned int delay) 844 { 845 unsigned long flags; 846 int retval; 847 848 spin_lock_irqsave(&dev->power.lock, flags); 849 850 if (!delay) { 851 retval = rpm_suspend(dev, RPM_ASYNC); 852 goto out; 853 } 854 855 retval = rpm_check_suspend_allowed(dev); 856 if (retval) 857 goto out; 858 859 /* Other scheduled or pending requests need to be canceled. */ 860 pm_runtime_cancel_pending(dev); 861 862 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 863 dev->power.timer_expires += !dev->power.timer_expires; 864 dev->power.timer_autosuspends = 0; 865 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 866 867 out: 868 spin_unlock_irqrestore(&dev->power.lock, flags); 869 870 return retval; 871 } 872 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 873 874 /** 875 * __pm_runtime_idle - Entry point for runtime idle operations. 876 * @dev: Device to send idle notification for. 877 * @rpmflags: Flag bits. 878 * 879 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 880 * return immediately if it is larger than zero. Then carry out an idle 881 * notification, either synchronous or asynchronous. 882 * 883 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 884 * or if pm_runtime_irq_safe() has been called. 885 */ 886 int __pm_runtime_idle(struct device *dev, int rpmflags) 887 { 888 unsigned long flags; 889 int retval; 890 891 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 892 893 if (rpmflags & RPM_GET_PUT) { 894 if (!atomic_dec_and_test(&dev->power.usage_count)) 895 return 0; 896 } 897 898 spin_lock_irqsave(&dev->power.lock, flags); 899 retval = rpm_idle(dev, rpmflags); 900 spin_unlock_irqrestore(&dev->power.lock, flags); 901 902 return retval; 903 } 904 EXPORT_SYMBOL_GPL(__pm_runtime_idle); 905 906 /** 907 * __pm_runtime_suspend - Entry point for runtime put/suspend operations. 908 * @dev: Device to suspend. 909 * @rpmflags: Flag bits. 910 * 911 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 912 * return immediately if it is larger than zero. Then carry out a suspend, 913 * either synchronous or asynchronous. 914 * 915 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 916 * or if pm_runtime_irq_safe() has been called. 917 */ 918 int __pm_runtime_suspend(struct device *dev, int rpmflags) 919 { 920 unsigned long flags; 921 int retval; 922 923 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 924 925 if (rpmflags & RPM_GET_PUT) { 926 if (!atomic_dec_and_test(&dev->power.usage_count)) 927 return 0; 928 } 929 930 spin_lock_irqsave(&dev->power.lock, flags); 931 retval = rpm_suspend(dev, rpmflags); 932 spin_unlock_irqrestore(&dev->power.lock, flags); 933 934 return retval; 935 } 936 EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 937 938 /** 939 * __pm_runtime_resume - Entry point for runtime resume operations. 940 * @dev: Device to resume. 941 * @rpmflags: Flag bits. 942 * 943 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 944 * carry out a resume, either synchronous or asynchronous. 945 * 946 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 947 * or if pm_runtime_irq_safe() has been called. 948 */ 949 int __pm_runtime_resume(struct device *dev, int rpmflags) 950 { 951 unsigned long flags; 952 int retval; 953 954 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 955 956 if (rpmflags & RPM_GET_PUT) 957 atomic_inc(&dev->power.usage_count); 958 959 spin_lock_irqsave(&dev->power.lock, flags); 960 retval = rpm_resume(dev, rpmflags); 961 spin_unlock_irqrestore(&dev->power.lock, flags); 962 963 return retval; 964 } 965 EXPORT_SYMBOL_GPL(__pm_runtime_resume); 966 967 /** 968 * __pm_runtime_set_status - Set runtime PM status of a device. 969 * @dev: Device to handle. 970 * @status: New runtime PM status of the device. 971 * 972 * If runtime PM of the device is disabled or its power.runtime_error field is 973 * different from zero, the status may be changed either to RPM_ACTIVE, or to 974 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 975 * However, if the device has a parent and the parent is not active, and the 976 * parent's power.ignore_children flag is unset, the device's status cannot be 977 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 978 * 979 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 980 * and the device parent's counter of unsuspended children is modified to 981 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 982 * notification request for the parent is submitted. 983 */ 984 int __pm_runtime_set_status(struct device *dev, unsigned int status) 985 { 986 struct device *parent = dev->parent; 987 unsigned long flags; 988 bool notify_parent = false; 989 int error = 0; 990 991 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 992 return -EINVAL; 993 994 spin_lock_irqsave(&dev->power.lock, flags); 995 996 if (!dev->power.runtime_error && !dev->power.disable_depth) { 997 error = -EAGAIN; 998 goto out; 999 } 1000 1001 if (dev->power.runtime_status == status) 1002 goto out_set; 1003 1004 if (status == RPM_SUSPENDED) { 1005 /* It always is possible to set the status to 'suspended'. */ 1006 if (parent) { 1007 atomic_add_unless(&parent->power.child_count, -1, 0); 1008 notify_parent = !parent->power.ignore_children; 1009 } 1010 goto out_set; 1011 } 1012 1013 if (parent) { 1014 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 1015 1016 /* 1017 * It is invalid to put an active child under a parent that is 1018 * not active, has runtime PM enabled and the 1019 * 'power.ignore_children' flag unset. 1020 */ 1021 if (!parent->power.disable_depth 1022 && !parent->power.ignore_children 1023 && parent->power.runtime_status != RPM_ACTIVE) 1024 error = -EBUSY; 1025 else if (dev->power.runtime_status == RPM_SUSPENDED) 1026 atomic_inc(&parent->power.child_count); 1027 1028 spin_unlock(&parent->power.lock); 1029 1030 if (error) 1031 goto out; 1032 } 1033 1034 out_set: 1035 __update_runtime_status(dev, status); 1036 dev->power.runtime_error = 0; 1037 out: 1038 spin_unlock_irqrestore(&dev->power.lock, flags); 1039 1040 if (notify_parent) 1041 pm_request_idle(parent); 1042 1043 return error; 1044 } 1045 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1046 1047 /** 1048 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 1049 * @dev: Device to handle. 1050 * 1051 * Flush all pending requests for the device from pm_wq and wait for all 1052 * runtime PM operations involving the device in progress to complete. 1053 * 1054 * Should be called under dev->power.lock with interrupts disabled. 1055 */ 1056 static void __pm_runtime_barrier(struct device *dev) 1057 { 1058 pm_runtime_deactivate_timer(dev); 1059 1060 if (dev->power.request_pending) { 1061 dev->power.request = RPM_REQ_NONE; 1062 spin_unlock_irq(&dev->power.lock); 1063 1064 cancel_work_sync(&dev->power.work); 1065 1066 spin_lock_irq(&dev->power.lock); 1067 dev->power.request_pending = false; 1068 } 1069 1070 if (dev->power.runtime_status == RPM_SUSPENDING 1071 || dev->power.runtime_status == RPM_RESUMING 1072 || dev->power.idle_notification) { 1073 DEFINE_WAIT(wait); 1074 1075 /* Suspend, wake-up or idle notification in progress. */ 1076 for (;;) { 1077 prepare_to_wait(&dev->power.wait_queue, &wait, 1078 TASK_UNINTERRUPTIBLE); 1079 if (dev->power.runtime_status != RPM_SUSPENDING 1080 && dev->power.runtime_status != RPM_RESUMING 1081 && !dev->power.idle_notification) 1082 break; 1083 spin_unlock_irq(&dev->power.lock); 1084 1085 schedule(); 1086 1087 spin_lock_irq(&dev->power.lock); 1088 } 1089 finish_wait(&dev->power.wait_queue, &wait); 1090 } 1091 } 1092 1093 /** 1094 * pm_runtime_barrier - Flush pending requests and wait for completions. 1095 * @dev: Device to handle. 1096 * 1097 * Prevent the device from being suspended by incrementing its usage counter and 1098 * if there's a pending resume request for the device, wake the device up. 1099 * Next, make sure that all pending requests for the device have been flushed 1100 * from pm_wq and wait for all runtime PM operations involving the device in 1101 * progress to complete. 1102 * 1103 * Return value: 1104 * 1, if there was a resume request pending and the device had to be woken up, 1105 * 0, otherwise 1106 */ 1107 int pm_runtime_barrier(struct device *dev) 1108 { 1109 int retval = 0; 1110 1111 pm_runtime_get_noresume(dev); 1112 spin_lock_irq(&dev->power.lock); 1113 1114 if (dev->power.request_pending 1115 && dev->power.request == RPM_REQ_RESUME) { 1116 rpm_resume(dev, 0); 1117 retval = 1; 1118 } 1119 1120 __pm_runtime_barrier(dev); 1121 1122 spin_unlock_irq(&dev->power.lock); 1123 pm_runtime_put_noidle(dev); 1124 1125 return retval; 1126 } 1127 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 1128 1129 /** 1130 * __pm_runtime_disable - Disable runtime PM of a device. 1131 * @dev: Device to handle. 1132 * @check_resume: If set, check if there's a resume request for the device. 1133 * 1134 * Increment power.disable_depth for the device and if was zero previously, 1135 * cancel all pending runtime PM requests for the device and wait for all 1136 * operations in progress to complete. The device can be either active or 1137 * suspended after its runtime PM has been disabled. 1138 * 1139 * If @check_resume is set and there's a resume request pending when 1140 * __pm_runtime_disable() is called and power.disable_depth is zero, the 1141 * function will wake up the device before disabling its runtime PM. 1142 */ 1143 void __pm_runtime_disable(struct device *dev, bool check_resume) 1144 { 1145 spin_lock_irq(&dev->power.lock); 1146 1147 if (dev->power.disable_depth > 0) { 1148 dev->power.disable_depth++; 1149 goto out; 1150 } 1151 1152 /* 1153 * Wake up the device if there's a resume request pending, because that 1154 * means there probably is some I/O to process and disabling runtime PM 1155 * shouldn't prevent the device from processing the I/O. 1156 */ 1157 if (check_resume && dev->power.request_pending 1158 && dev->power.request == RPM_REQ_RESUME) { 1159 /* 1160 * Prevent suspends and idle notifications from being carried 1161 * out after we have woken up the device. 1162 */ 1163 pm_runtime_get_noresume(dev); 1164 1165 rpm_resume(dev, 0); 1166 1167 pm_runtime_put_noidle(dev); 1168 } 1169 1170 if (!dev->power.disable_depth++) 1171 __pm_runtime_barrier(dev); 1172 1173 out: 1174 spin_unlock_irq(&dev->power.lock); 1175 } 1176 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1177 1178 /** 1179 * pm_runtime_enable - Enable runtime PM of a device. 1180 * @dev: Device to handle. 1181 */ 1182 void pm_runtime_enable(struct device *dev) 1183 { 1184 unsigned long flags; 1185 1186 spin_lock_irqsave(&dev->power.lock, flags); 1187 1188 if (dev->power.disable_depth > 0) 1189 dev->power.disable_depth--; 1190 else 1191 dev_warn(dev, "Unbalanced %s!\n", __func__); 1192 1193 spin_unlock_irqrestore(&dev->power.lock, flags); 1194 } 1195 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1196 1197 /** 1198 * pm_runtime_forbid - Block runtime PM of a device. 1199 * @dev: Device to handle. 1200 * 1201 * Increase the device's usage count and clear its power.runtime_auto flag, 1202 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1203 * for it. 1204 */ 1205 void pm_runtime_forbid(struct device *dev) 1206 { 1207 spin_lock_irq(&dev->power.lock); 1208 if (!dev->power.runtime_auto) 1209 goto out; 1210 1211 dev->power.runtime_auto = false; 1212 atomic_inc(&dev->power.usage_count); 1213 rpm_resume(dev, 0); 1214 1215 out: 1216 spin_unlock_irq(&dev->power.lock); 1217 } 1218 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1219 1220 /** 1221 * pm_runtime_allow - Unblock runtime PM of a device. 1222 * @dev: Device to handle. 1223 * 1224 * Decrease the device's usage count and set its power.runtime_auto flag. 1225 */ 1226 void pm_runtime_allow(struct device *dev) 1227 { 1228 spin_lock_irq(&dev->power.lock); 1229 if (dev->power.runtime_auto) 1230 goto out; 1231 1232 dev->power.runtime_auto = true; 1233 if (atomic_dec_and_test(&dev->power.usage_count)) 1234 rpm_idle(dev, RPM_AUTO); 1235 1236 out: 1237 spin_unlock_irq(&dev->power.lock); 1238 } 1239 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1240 1241 /** 1242 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. 1243 * @dev: Device to handle. 1244 * 1245 * Set the power.no_callbacks flag, which tells the PM core that this 1246 * device is power-managed through its parent and has no runtime PM 1247 * callbacks of its own. The runtime sysfs attributes will be removed. 1248 */ 1249 void pm_runtime_no_callbacks(struct device *dev) 1250 { 1251 spin_lock_irq(&dev->power.lock); 1252 dev->power.no_callbacks = 1; 1253 spin_unlock_irq(&dev->power.lock); 1254 if (device_is_registered(dev)) 1255 rpm_sysfs_remove(dev); 1256 } 1257 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1258 1259 /** 1260 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. 1261 * @dev: Device to handle 1262 * 1263 * Set the power.irq_safe flag, which tells the PM core that the 1264 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should 1265 * always be invoked with the spinlock held and interrupts disabled. It also 1266 * causes the parent's usage counter to be permanently incremented, preventing 1267 * the parent from runtime suspending -- otherwise an irq-safe child might have 1268 * to wait for a non-irq-safe parent. 1269 */ 1270 void pm_runtime_irq_safe(struct device *dev) 1271 { 1272 if (dev->parent) 1273 pm_runtime_get_sync(dev->parent); 1274 spin_lock_irq(&dev->power.lock); 1275 dev->power.irq_safe = 1; 1276 spin_unlock_irq(&dev->power.lock); 1277 } 1278 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); 1279 1280 /** 1281 * update_autosuspend - Handle a change to a device's autosuspend settings. 1282 * @dev: Device to handle. 1283 * @old_delay: The former autosuspend_delay value. 1284 * @old_use: The former use_autosuspend value. 1285 * 1286 * Prevent runtime suspend if the new delay is negative and use_autosuspend is 1287 * set; otherwise allow it. Send an idle notification if suspends are allowed. 1288 * 1289 * This function must be called under dev->power.lock with interrupts disabled. 1290 */ 1291 static void update_autosuspend(struct device *dev, int old_delay, int old_use) 1292 { 1293 int delay = dev->power.autosuspend_delay; 1294 1295 /* Should runtime suspend be prevented now? */ 1296 if (dev->power.use_autosuspend && delay < 0) { 1297 1298 /* If it used to be allowed then prevent it. */ 1299 if (!old_use || old_delay >= 0) { 1300 atomic_inc(&dev->power.usage_count); 1301 rpm_resume(dev, 0); 1302 } 1303 } 1304 1305 /* Runtime suspend should be allowed now. */ 1306 else { 1307 1308 /* If it used to be prevented then allow it. */ 1309 if (old_use && old_delay < 0) 1310 atomic_dec(&dev->power.usage_count); 1311 1312 /* Maybe we can autosuspend now. */ 1313 rpm_idle(dev, RPM_AUTO); 1314 } 1315 } 1316 1317 /** 1318 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. 1319 * @dev: Device to handle. 1320 * @delay: Value of the new delay in milliseconds. 1321 * 1322 * Set the device's power.autosuspend_delay value. If it changes to negative 1323 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it 1324 * changes the other way, allow runtime suspends. 1325 */ 1326 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1327 { 1328 int old_delay, old_use; 1329 1330 spin_lock_irq(&dev->power.lock); 1331 old_delay = dev->power.autosuspend_delay; 1332 old_use = dev->power.use_autosuspend; 1333 dev->power.autosuspend_delay = delay; 1334 update_autosuspend(dev, old_delay, old_use); 1335 spin_unlock_irq(&dev->power.lock); 1336 } 1337 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); 1338 1339 /** 1340 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. 1341 * @dev: Device to handle. 1342 * @use: New value for use_autosuspend. 1343 * 1344 * Set the device's power.use_autosuspend flag, and allow or prevent runtime 1345 * suspends as needed. 1346 */ 1347 void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1348 { 1349 int old_delay, old_use; 1350 1351 spin_lock_irq(&dev->power.lock); 1352 old_delay = dev->power.autosuspend_delay; 1353 old_use = dev->power.use_autosuspend; 1354 dev->power.use_autosuspend = use; 1355 update_autosuspend(dev, old_delay, old_use); 1356 spin_unlock_irq(&dev->power.lock); 1357 } 1358 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1359 1360 /** 1361 * pm_runtime_init - Initialize runtime PM fields in given device object. 1362 * @dev: Device object to initialize. 1363 */ 1364 void pm_runtime_init(struct device *dev) 1365 { 1366 dev->power.runtime_status = RPM_SUSPENDED; 1367 dev->power.idle_notification = false; 1368 1369 dev->power.disable_depth = 1; 1370 atomic_set(&dev->power.usage_count, 0); 1371 1372 dev->power.runtime_error = 0; 1373 1374 atomic_set(&dev->power.child_count, 0); 1375 pm_suspend_ignore_children(dev, false); 1376 dev->power.runtime_auto = true; 1377 1378 dev->power.request_pending = false; 1379 dev->power.request = RPM_REQ_NONE; 1380 dev->power.deferred_resume = false; 1381 dev->power.accounting_timestamp = jiffies; 1382 INIT_WORK(&dev->power.work, pm_runtime_work); 1383 1384 dev->power.timer_expires = 0; 1385 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1386 (unsigned long)dev); 1387 1388 init_waitqueue_head(&dev->power.wait_queue); 1389 } 1390 1391 /** 1392 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1393 * @dev: Device object being removed from device hierarchy. 1394 */ 1395 void pm_runtime_remove(struct device *dev) 1396 { 1397 __pm_runtime_disable(dev, false); 1398 1399 /* Change the status back to 'suspended' to match the initial status. */ 1400 if (dev->power.runtime_status == RPM_ACTIVE) 1401 pm_runtime_set_suspended(dev); 1402 if (dev->power.irq_safe && dev->parent) 1403 pm_runtime_put(dev->parent); 1404 } 1405