1 /* 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 6 * 7 * This file is released under the GPLv2. 8 */ 9 10 #include <linux/sched/mm.h> 11 #include <linux/ktime.h> 12 #include <linux/hrtimer.h> 13 #include <linux/export.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_wakeirq.h> 16 #include <trace/events/rpm.h> 17 18 #include "../base.h" 19 #include "power.h" 20 21 typedef int (*pm_callback_t)(struct device *); 22 23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) 24 { 25 pm_callback_t cb; 26 const struct dev_pm_ops *ops; 27 28 if (dev->pm_domain) 29 ops = &dev->pm_domain->ops; 30 else if (dev->type && dev->type->pm) 31 ops = dev->type->pm; 32 else if (dev->class && dev->class->pm) 33 ops = dev->class->pm; 34 else if (dev->bus && dev->bus->pm) 35 ops = dev->bus->pm; 36 else 37 ops = NULL; 38 39 if (ops) 40 cb = *(pm_callback_t *)((void *)ops + cb_offset); 41 else 42 cb = NULL; 43 44 if (!cb && dev->driver && dev->driver->pm) 45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); 46 47 return cb; 48 } 49 50 #define RPM_GET_CALLBACK(dev, callback) \ 51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) 52 53 static int rpm_resume(struct device *dev, int rpmflags); 54 static int rpm_suspend(struct device *dev, int rpmflags); 55 56 /** 57 * update_pm_runtime_accounting - Update the time accounting of power states 58 * @dev: Device to update the accounting for 59 * 60 * In order to be able to have time accounting of the various power states 61 * (as used by programs such as PowerTOP to show the effectiveness of runtime 62 * PM), we need to track the time spent in each state. 63 * update_pm_runtime_accounting must be called each time before the 64 * runtime_status field is updated, to account the time in the old state 65 * correctly. 66 */ 67 void update_pm_runtime_accounting(struct device *dev) 68 { 69 unsigned long now = jiffies; 70 unsigned long delta; 71 72 delta = now - dev->power.accounting_timestamp; 73 74 dev->power.accounting_timestamp = now; 75 76 if (dev->power.disable_depth > 0) 77 return; 78 79 if (dev->power.runtime_status == RPM_SUSPENDED) 80 dev->power.suspended_jiffies += delta; 81 else 82 dev->power.active_jiffies += delta; 83 } 84 85 static void __update_runtime_status(struct device *dev, enum rpm_status status) 86 { 87 update_pm_runtime_accounting(dev); 88 dev->power.runtime_status = status; 89 } 90 91 /** 92 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 93 * @dev: Device to handle. 94 */ 95 static void pm_runtime_deactivate_timer(struct device *dev) 96 { 97 if (dev->power.timer_expires > 0) { 98 hrtimer_cancel(&dev->power.suspend_timer); 99 dev->power.timer_expires = 0; 100 } 101 } 102 103 /** 104 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 105 * @dev: Device to handle. 106 */ 107 static void pm_runtime_cancel_pending(struct device *dev) 108 { 109 pm_runtime_deactivate_timer(dev); 110 /* 111 * In case there's a request pending, make sure its work function will 112 * return without doing anything. 113 */ 114 dev->power.request = RPM_REQ_NONE; 115 } 116 117 /* 118 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. 119 * @dev: Device to handle. 120 * 121 * Compute the autosuspend-delay expiration time based on the device's 122 * power.last_busy time. If the delay has already expired or is disabled 123 * (negative) or the power.use_autosuspend flag isn't set, return 0. 124 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). 125 * 126 * This function may be called either with or without dev->power.lock held. 127 * Either way it can be racy, since power.last_busy may be updated at any time. 128 */ 129 u64 pm_runtime_autosuspend_expiration(struct device *dev) 130 { 131 int autosuspend_delay; 132 u64 last_busy, expires = 0; 133 u64 now = ktime_get_mono_fast_ns(); 134 135 if (!dev->power.use_autosuspend) 136 goto out; 137 138 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); 139 if (autosuspend_delay < 0) 140 goto out; 141 142 last_busy = READ_ONCE(dev->power.last_busy); 143 144 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; 145 if (expires <= now) 146 expires = 0; /* Already expired. */ 147 148 out: 149 return expires; 150 } 151 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 152 153 static int dev_memalloc_noio(struct device *dev, void *data) 154 { 155 return dev->power.memalloc_noio; 156 } 157 158 /* 159 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. 160 * @dev: Device to handle. 161 * @enable: True for setting the flag and False for clearing the flag. 162 * 163 * Set the flag for all devices in the path from the device to the 164 * root device in the device tree if @enable is true, otherwise clear 165 * the flag for devices in the path whose siblings don't set the flag. 166 * 167 * The function should only be called by block device, or network 168 * device driver for solving the deadlock problem during runtime 169 * resume/suspend: 170 * 171 * If memory allocation with GFP_KERNEL is called inside runtime 172 * resume/suspend callback of any one of its ancestors(or the 173 * block device itself), the deadlock may be triggered inside the 174 * memory allocation since it might not complete until the block 175 * device becomes active and the involed page I/O finishes. The 176 * situation is pointed out first by Alan Stern. Network device 177 * are involved in iSCSI kind of situation. 178 * 179 * The lock of dev_hotplug_mutex is held in the function for handling 180 * hotplug race because pm_runtime_set_memalloc_noio() may be called 181 * in async probe(). 182 * 183 * The function should be called between device_add() and device_del() 184 * on the affected device(block/network device). 185 */ 186 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) 187 { 188 static DEFINE_MUTEX(dev_hotplug_mutex); 189 190 mutex_lock(&dev_hotplug_mutex); 191 for (;;) { 192 bool enabled; 193 194 /* hold power lock since bitfield is not SMP-safe. */ 195 spin_lock_irq(&dev->power.lock); 196 enabled = dev->power.memalloc_noio; 197 dev->power.memalloc_noio = enable; 198 spin_unlock_irq(&dev->power.lock); 199 200 /* 201 * not need to enable ancestors any more if the device 202 * has been enabled. 203 */ 204 if (enabled && enable) 205 break; 206 207 dev = dev->parent; 208 209 /* 210 * clear flag of the parent device only if all the 211 * children don't set the flag because ancestor's 212 * flag was set by any one of the descendants. 213 */ 214 if (!dev || (!enable && 215 device_for_each_child(dev, NULL, 216 dev_memalloc_noio))) 217 break; 218 } 219 mutex_unlock(&dev_hotplug_mutex); 220 } 221 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); 222 223 /** 224 * rpm_check_suspend_allowed - Test whether a device may be suspended. 225 * @dev: Device to test. 226 */ 227 static int rpm_check_suspend_allowed(struct device *dev) 228 { 229 int retval = 0; 230 231 if (dev->power.runtime_error) 232 retval = -EINVAL; 233 else if (dev->power.disable_depth > 0) 234 retval = -EACCES; 235 else if (atomic_read(&dev->power.usage_count) > 0) 236 retval = -EAGAIN; 237 else if (!dev->power.ignore_children && 238 atomic_read(&dev->power.child_count)) 239 retval = -EBUSY; 240 241 /* Pending resume requests take precedence over suspends. */ 242 else if ((dev->power.deferred_resume 243 && dev->power.runtime_status == RPM_SUSPENDING) 244 || (dev->power.request_pending 245 && dev->power.request == RPM_REQ_RESUME)) 246 retval = -EAGAIN; 247 else if (__dev_pm_qos_read_value(dev) == 0) 248 retval = -EPERM; 249 else if (dev->power.runtime_status == RPM_SUSPENDED) 250 retval = 1; 251 252 return retval; 253 } 254 255 static int rpm_get_suppliers(struct device *dev) 256 { 257 struct device_link *link; 258 259 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 260 int retval; 261 262 if (!(link->flags & DL_FLAG_PM_RUNTIME) || 263 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) 264 continue; 265 266 retval = pm_runtime_get_sync(link->supplier); 267 /* Ignore suppliers with disabled runtime PM. */ 268 if (retval < 0 && retval != -EACCES) { 269 pm_runtime_put_noidle(link->supplier); 270 return retval; 271 } 272 refcount_inc(&link->rpm_active); 273 } 274 return 0; 275 } 276 277 static void rpm_put_suppliers(struct device *dev) 278 { 279 struct device_link *link; 280 281 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 282 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) 283 continue; 284 285 while (refcount_dec_not_one(&link->rpm_active)) 286 pm_runtime_put(link->supplier); 287 } 288 } 289 290 /** 291 * __rpm_callback - Run a given runtime PM callback for a given device. 292 * @cb: Runtime PM callback to run. 293 * @dev: Device to run the callback for. 294 */ 295 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 296 __releases(&dev->power.lock) __acquires(&dev->power.lock) 297 { 298 int retval, idx; 299 bool use_links = dev->power.links_count > 0; 300 301 if (dev->power.irq_safe) { 302 spin_unlock(&dev->power.lock); 303 } else { 304 spin_unlock_irq(&dev->power.lock); 305 306 /* 307 * Resume suppliers if necessary. 308 * 309 * The device's runtime PM status cannot change until this 310 * routine returns, so it is safe to read the status outside of 311 * the lock. 312 */ 313 if (use_links && dev->power.runtime_status == RPM_RESUMING) { 314 idx = device_links_read_lock(); 315 316 retval = rpm_get_suppliers(dev); 317 if (retval) 318 goto fail; 319 320 device_links_read_unlock(idx); 321 } 322 } 323 324 retval = cb(dev); 325 326 if (dev->power.irq_safe) { 327 spin_lock(&dev->power.lock); 328 } else { 329 /* 330 * If the device is suspending and the callback has returned 331 * success, drop the usage counters of the suppliers that have 332 * been reference counted on its resume. 333 * 334 * Do that if resume fails too. 335 */ 336 if (use_links 337 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) 338 || (dev->power.runtime_status == RPM_RESUMING && retval))) { 339 idx = device_links_read_lock(); 340 341 fail: 342 rpm_put_suppliers(dev); 343 344 device_links_read_unlock(idx); 345 } 346 347 spin_lock_irq(&dev->power.lock); 348 } 349 350 return retval; 351 } 352 353 /** 354 * rpm_idle - Notify device bus type if the device can be suspended. 355 * @dev: Device to notify the bus type about. 356 * @rpmflags: Flag bits. 357 * 358 * Check if the device's runtime PM status allows it to be suspended. If 359 * another idle notification has been started earlier, return immediately. If 360 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 361 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback 362 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. 363 * 364 * This function must be called under dev->power.lock with interrupts disabled. 365 */ 366 static int rpm_idle(struct device *dev, int rpmflags) 367 { 368 int (*callback)(struct device *); 369 int retval; 370 371 trace_rpm_idle_rcuidle(dev, rpmflags); 372 retval = rpm_check_suspend_allowed(dev); 373 if (retval < 0) 374 ; /* Conditions are wrong. */ 375 376 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 377 else if (dev->power.runtime_status != RPM_ACTIVE) 378 retval = -EAGAIN; 379 380 /* 381 * Any pending request other than an idle notification takes 382 * precedence over us, except that the timer may be running. 383 */ 384 else if (dev->power.request_pending && 385 dev->power.request > RPM_REQ_IDLE) 386 retval = -EAGAIN; 387 388 /* Act as though RPM_NOWAIT is always set. */ 389 else if (dev->power.idle_notification) 390 retval = -EINPROGRESS; 391 if (retval) 392 goto out; 393 394 /* Pending requests need to be canceled. */ 395 dev->power.request = RPM_REQ_NONE; 396 397 if (dev->power.no_callbacks) 398 goto out; 399 400 /* Carry out an asynchronous or a synchronous idle notification. */ 401 if (rpmflags & RPM_ASYNC) { 402 dev->power.request = RPM_REQ_IDLE; 403 if (!dev->power.request_pending) { 404 dev->power.request_pending = true; 405 queue_work(pm_wq, &dev->power.work); 406 } 407 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); 408 return 0; 409 } 410 411 dev->power.idle_notification = true; 412 413 callback = RPM_GET_CALLBACK(dev, runtime_idle); 414 415 if (callback) 416 retval = __rpm_callback(callback, dev); 417 418 dev->power.idle_notification = false; 419 wake_up_all(&dev->power.wait_queue); 420 421 out: 422 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 423 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); 424 } 425 426 /** 427 * rpm_callback - Run a given runtime PM callback for a given device. 428 * @cb: Runtime PM callback to run. 429 * @dev: Device to run the callback for. 430 */ 431 static int rpm_callback(int (*cb)(struct device *), struct device *dev) 432 { 433 int retval; 434 435 if (!cb) 436 return -ENOSYS; 437 438 if (dev->power.memalloc_noio) { 439 unsigned int noio_flag; 440 441 /* 442 * Deadlock might be caused if memory allocation with 443 * GFP_KERNEL happens inside runtime_suspend and 444 * runtime_resume callbacks of one block device's 445 * ancestor or the block device itself. Network 446 * device might be thought as part of iSCSI block 447 * device, so network device and its ancestor should 448 * be marked as memalloc_noio too. 449 */ 450 noio_flag = memalloc_noio_save(); 451 retval = __rpm_callback(cb, dev); 452 memalloc_noio_restore(noio_flag); 453 } else { 454 retval = __rpm_callback(cb, dev); 455 } 456 457 dev->power.runtime_error = retval; 458 return retval != -EACCES ? retval : -EIO; 459 } 460 461 /** 462 * rpm_suspend - Carry out runtime suspend of given device. 463 * @dev: Device to suspend. 464 * @rpmflags: Flag bits. 465 * 466 * Check if the device's runtime PM status allows it to be suspended. 467 * Cancel a pending idle notification, autosuspend or suspend. If 468 * another suspend has been started earlier, either return immediately 469 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC 470 * flags. If the RPM_ASYNC flag is set then queue a suspend request; 471 * otherwise run the ->runtime_suspend() callback directly. When 472 * ->runtime_suspend succeeded, if a deferred resume was requested while 473 * the callback was running then carry it out, otherwise send an idle 474 * notification for its parent (if the suspend succeeded and both 475 * ignore_children of parent->power and irq_safe of dev->power are not set). 476 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO 477 * flag is set and the next autosuspend-delay expiration time is in the 478 * future, schedule another autosuspend attempt. 479 * 480 * This function must be called under dev->power.lock with interrupts disabled. 481 */ 482 static int rpm_suspend(struct device *dev, int rpmflags) 483 __releases(&dev->power.lock) __acquires(&dev->power.lock) 484 { 485 int (*callback)(struct device *); 486 struct device *parent = NULL; 487 int retval; 488 489 trace_rpm_suspend_rcuidle(dev, rpmflags); 490 491 repeat: 492 retval = rpm_check_suspend_allowed(dev); 493 494 if (retval < 0) 495 ; /* Conditions are wrong. */ 496 497 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 498 else if (dev->power.runtime_status == RPM_RESUMING && 499 !(rpmflags & RPM_ASYNC)) 500 retval = -EAGAIN; 501 if (retval) 502 goto out; 503 504 /* If the autosuspend_delay time hasn't expired yet, reschedule. */ 505 if ((rpmflags & RPM_AUTO) 506 && dev->power.runtime_status != RPM_SUSPENDING) { 507 u64 expires = pm_runtime_autosuspend_expiration(dev); 508 509 if (expires != 0) { 510 /* Pending requests need to be canceled. */ 511 dev->power.request = RPM_REQ_NONE; 512 513 /* 514 * Optimization: If the timer is already running and is 515 * set to expire at or before the autosuspend delay, 516 * avoid the overhead of resetting it. Just let it 517 * expire; pm_suspend_timer_fn() will take care of the 518 * rest. 519 */ 520 if (!(dev->power.timer_expires && 521 dev->power.timer_expires <= expires)) { 522 /* 523 * We add a slack of 25% to gather wakeups 524 * without sacrificing the granularity. 525 */ 526 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * 527 (NSEC_PER_MSEC >> 2); 528 529 dev->power.timer_expires = expires; 530 hrtimer_start_range_ns(&dev->power.suspend_timer, 531 ns_to_ktime(expires), 532 slack, 533 HRTIMER_MODE_ABS); 534 } 535 dev->power.timer_autosuspends = 1; 536 goto out; 537 } 538 } 539 540 /* Other scheduled or pending requests need to be canceled. */ 541 pm_runtime_cancel_pending(dev); 542 543 if (dev->power.runtime_status == RPM_SUSPENDING) { 544 DEFINE_WAIT(wait); 545 546 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 547 retval = -EINPROGRESS; 548 goto out; 549 } 550 551 if (dev->power.irq_safe) { 552 spin_unlock(&dev->power.lock); 553 554 cpu_relax(); 555 556 spin_lock(&dev->power.lock); 557 goto repeat; 558 } 559 560 /* Wait for the other suspend running in parallel with us. */ 561 for (;;) { 562 prepare_to_wait(&dev->power.wait_queue, &wait, 563 TASK_UNINTERRUPTIBLE); 564 if (dev->power.runtime_status != RPM_SUSPENDING) 565 break; 566 567 spin_unlock_irq(&dev->power.lock); 568 569 schedule(); 570 571 spin_lock_irq(&dev->power.lock); 572 } 573 finish_wait(&dev->power.wait_queue, &wait); 574 goto repeat; 575 } 576 577 if (dev->power.no_callbacks) 578 goto no_callback; /* Assume success. */ 579 580 /* Carry out an asynchronous or a synchronous suspend. */ 581 if (rpmflags & RPM_ASYNC) { 582 dev->power.request = (rpmflags & RPM_AUTO) ? 583 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; 584 if (!dev->power.request_pending) { 585 dev->power.request_pending = true; 586 queue_work(pm_wq, &dev->power.work); 587 } 588 goto out; 589 } 590 591 __update_runtime_status(dev, RPM_SUSPENDING); 592 593 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 594 595 dev_pm_enable_wake_irq_check(dev, true); 596 retval = rpm_callback(callback, dev); 597 if (retval) 598 goto fail; 599 600 no_callback: 601 __update_runtime_status(dev, RPM_SUSPENDED); 602 pm_runtime_deactivate_timer(dev); 603 604 if (dev->parent) { 605 parent = dev->parent; 606 atomic_add_unless(&parent->power.child_count, -1, 0); 607 } 608 wake_up_all(&dev->power.wait_queue); 609 610 if (dev->power.deferred_resume) { 611 dev->power.deferred_resume = false; 612 rpm_resume(dev, 0); 613 retval = -EAGAIN; 614 goto out; 615 } 616 617 /* Maybe the parent is now able to suspend. */ 618 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 619 spin_unlock(&dev->power.lock); 620 621 spin_lock(&parent->power.lock); 622 rpm_idle(parent, RPM_ASYNC); 623 spin_unlock(&parent->power.lock); 624 625 spin_lock(&dev->power.lock); 626 } 627 628 out: 629 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 630 631 return retval; 632 633 fail: 634 dev_pm_disable_wake_irq_check(dev); 635 __update_runtime_status(dev, RPM_ACTIVE); 636 dev->power.deferred_resume = false; 637 wake_up_all(&dev->power.wait_queue); 638 639 if (retval == -EAGAIN || retval == -EBUSY) { 640 dev->power.runtime_error = 0; 641 642 /* 643 * If the callback routine failed an autosuspend, and 644 * if the last_busy time has been updated so that there 645 * is a new autosuspend expiration time, automatically 646 * reschedule another autosuspend. 647 */ 648 if ((rpmflags & RPM_AUTO) && 649 pm_runtime_autosuspend_expiration(dev) != 0) 650 goto repeat; 651 } else { 652 pm_runtime_cancel_pending(dev); 653 } 654 goto out; 655 } 656 657 /** 658 * rpm_resume - Carry out runtime resume of given device. 659 * @dev: Device to resume. 660 * @rpmflags: Flag bits. 661 * 662 * Check if the device's runtime PM status allows it to be resumed. Cancel 663 * any scheduled or pending requests. If another resume has been started 664 * earlier, either return immediately or wait for it to finish, depending on the 665 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 666 * parallel with this function, either tell the other process to resume after 667 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 668 * flag is set then queue a resume request; otherwise run the 669 * ->runtime_resume() callback directly. Queue an idle notification for the 670 * device if the resume succeeded. 671 * 672 * This function must be called under dev->power.lock with interrupts disabled. 673 */ 674 static int rpm_resume(struct device *dev, int rpmflags) 675 __releases(&dev->power.lock) __acquires(&dev->power.lock) 676 { 677 int (*callback)(struct device *); 678 struct device *parent = NULL; 679 int retval = 0; 680 681 trace_rpm_resume_rcuidle(dev, rpmflags); 682 683 repeat: 684 if (dev->power.runtime_error) 685 retval = -EINVAL; 686 else if (dev->power.disable_depth == 1 && dev->power.is_suspended 687 && dev->power.runtime_status == RPM_ACTIVE) 688 retval = 1; 689 else if (dev->power.disable_depth > 0) 690 retval = -EACCES; 691 if (retval) 692 goto out; 693 694 /* 695 * Other scheduled or pending requests need to be canceled. Small 696 * optimization: If an autosuspend timer is running, leave it running 697 * rather than cancelling it now only to restart it again in the near 698 * future. 699 */ 700 dev->power.request = RPM_REQ_NONE; 701 if (!dev->power.timer_autosuspends) 702 pm_runtime_deactivate_timer(dev); 703 704 if (dev->power.runtime_status == RPM_ACTIVE) { 705 retval = 1; 706 goto out; 707 } 708 709 if (dev->power.runtime_status == RPM_RESUMING 710 || dev->power.runtime_status == RPM_SUSPENDING) { 711 DEFINE_WAIT(wait); 712 713 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 714 if (dev->power.runtime_status == RPM_SUSPENDING) 715 dev->power.deferred_resume = true; 716 else 717 retval = -EINPROGRESS; 718 goto out; 719 } 720 721 if (dev->power.irq_safe) { 722 spin_unlock(&dev->power.lock); 723 724 cpu_relax(); 725 726 spin_lock(&dev->power.lock); 727 goto repeat; 728 } 729 730 /* Wait for the operation carried out in parallel with us. */ 731 for (;;) { 732 prepare_to_wait(&dev->power.wait_queue, &wait, 733 TASK_UNINTERRUPTIBLE); 734 if (dev->power.runtime_status != RPM_RESUMING 735 && dev->power.runtime_status != RPM_SUSPENDING) 736 break; 737 738 spin_unlock_irq(&dev->power.lock); 739 740 schedule(); 741 742 spin_lock_irq(&dev->power.lock); 743 } 744 finish_wait(&dev->power.wait_queue, &wait); 745 goto repeat; 746 } 747 748 /* 749 * See if we can skip waking up the parent. This is safe only if 750 * power.no_callbacks is set, because otherwise we don't know whether 751 * the resume will actually succeed. 752 */ 753 if (dev->power.no_callbacks && !parent && dev->parent) { 754 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); 755 if (dev->parent->power.disable_depth > 0 756 || dev->parent->power.ignore_children 757 || dev->parent->power.runtime_status == RPM_ACTIVE) { 758 atomic_inc(&dev->parent->power.child_count); 759 spin_unlock(&dev->parent->power.lock); 760 retval = 1; 761 goto no_callback; /* Assume success. */ 762 } 763 spin_unlock(&dev->parent->power.lock); 764 } 765 766 /* Carry out an asynchronous or a synchronous resume. */ 767 if (rpmflags & RPM_ASYNC) { 768 dev->power.request = RPM_REQ_RESUME; 769 if (!dev->power.request_pending) { 770 dev->power.request_pending = true; 771 queue_work(pm_wq, &dev->power.work); 772 } 773 retval = 0; 774 goto out; 775 } 776 777 if (!parent && dev->parent) { 778 /* 779 * Increment the parent's usage counter and resume it if 780 * necessary. Not needed if dev is irq-safe; then the 781 * parent is permanently resumed. 782 */ 783 parent = dev->parent; 784 if (dev->power.irq_safe) 785 goto skip_parent; 786 spin_unlock(&dev->power.lock); 787 788 pm_runtime_get_noresume(parent); 789 790 spin_lock(&parent->power.lock); 791 /* 792 * Resume the parent if it has runtime PM enabled and not been 793 * set to ignore its children. 794 */ 795 if (!parent->power.disable_depth 796 && !parent->power.ignore_children) { 797 rpm_resume(parent, 0); 798 if (parent->power.runtime_status != RPM_ACTIVE) 799 retval = -EBUSY; 800 } 801 spin_unlock(&parent->power.lock); 802 803 spin_lock(&dev->power.lock); 804 if (retval) 805 goto out; 806 goto repeat; 807 } 808 skip_parent: 809 810 if (dev->power.no_callbacks) 811 goto no_callback; /* Assume success. */ 812 813 __update_runtime_status(dev, RPM_RESUMING); 814 815 callback = RPM_GET_CALLBACK(dev, runtime_resume); 816 817 dev_pm_disable_wake_irq_check(dev); 818 retval = rpm_callback(callback, dev); 819 if (retval) { 820 __update_runtime_status(dev, RPM_SUSPENDED); 821 pm_runtime_cancel_pending(dev); 822 dev_pm_enable_wake_irq_check(dev, false); 823 } else { 824 no_callback: 825 __update_runtime_status(dev, RPM_ACTIVE); 826 pm_runtime_mark_last_busy(dev); 827 if (parent) 828 atomic_inc(&parent->power.child_count); 829 } 830 wake_up_all(&dev->power.wait_queue); 831 832 if (retval >= 0) 833 rpm_idle(dev, RPM_ASYNC); 834 835 out: 836 if (parent && !dev->power.irq_safe) { 837 spin_unlock_irq(&dev->power.lock); 838 839 pm_runtime_put(parent); 840 841 spin_lock_irq(&dev->power.lock); 842 } 843 844 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 845 846 return retval; 847 } 848 849 /** 850 * pm_runtime_work - Universal runtime PM work function. 851 * @work: Work structure used for scheduling the execution of this function. 852 * 853 * Use @work to get the device object the work is to be done for, determine what 854 * is to be done and execute the appropriate runtime PM function. 855 */ 856 static void pm_runtime_work(struct work_struct *work) 857 { 858 struct device *dev = container_of(work, struct device, power.work); 859 enum rpm_request req; 860 861 spin_lock_irq(&dev->power.lock); 862 863 if (!dev->power.request_pending) 864 goto out; 865 866 req = dev->power.request; 867 dev->power.request = RPM_REQ_NONE; 868 dev->power.request_pending = false; 869 870 switch (req) { 871 case RPM_REQ_NONE: 872 break; 873 case RPM_REQ_IDLE: 874 rpm_idle(dev, RPM_NOWAIT); 875 break; 876 case RPM_REQ_SUSPEND: 877 rpm_suspend(dev, RPM_NOWAIT); 878 break; 879 case RPM_REQ_AUTOSUSPEND: 880 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); 881 break; 882 case RPM_REQ_RESUME: 883 rpm_resume(dev, RPM_NOWAIT); 884 break; 885 } 886 887 out: 888 spin_unlock_irq(&dev->power.lock); 889 } 890 891 /** 892 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 893 * @data: Device pointer passed by pm_schedule_suspend(). 894 * 895 * Check if the time is right and queue a suspend request. 896 */ 897 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) 898 { 899 struct device *dev = container_of(timer, struct device, power.suspend_timer); 900 unsigned long flags; 901 u64 expires; 902 903 spin_lock_irqsave(&dev->power.lock, flags); 904 905 expires = dev->power.timer_expires; 906 /* 907 * If 'expires' is after the current time, we've been called 908 * too early. 909 */ 910 if (expires > 0 && expires < ktime_get_mono_fast_ns()) { 911 dev->power.timer_expires = 0; 912 rpm_suspend(dev, dev->power.timer_autosuspends ? 913 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 914 } 915 916 spin_unlock_irqrestore(&dev->power.lock, flags); 917 918 return HRTIMER_NORESTART; 919 } 920 921 /** 922 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 923 * @dev: Device to suspend. 924 * @delay: Time to wait before submitting a suspend request, in milliseconds. 925 */ 926 int pm_schedule_suspend(struct device *dev, unsigned int delay) 927 { 928 unsigned long flags; 929 u64 expires; 930 int retval; 931 932 spin_lock_irqsave(&dev->power.lock, flags); 933 934 if (!delay) { 935 retval = rpm_suspend(dev, RPM_ASYNC); 936 goto out; 937 } 938 939 retval = rpm_check_suspend_allowed(dev); 940 if (retval) 941 goto out; 942 943 /* Other scheduled or pending requests need to be canceled. */ 944 pm_runtime_cancel_pending(dev); 945 946 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; 947 dev->power.timer_expires = expires; 948 dev->power.timer_autosuspends = 0; 949 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 950 951 out: 952 spin_unlock_irqrestore(&dev->power.lock, flags); 953 954 return retval; 955 } 956 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 957 958 /** 959 * __pm_runtime_idle - Entry point for runtime idle operations. 960 * @dev: Device to send idle notification for. 961 * @rpmflags: Flag bits. 962 * 963 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 964 * return immediately if it is larger than zero. Then carry out an idle 965 * notification, either synchronous or asynchronous. 966 * 967 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 968 * or if pm_runtime_irq_safe() has been called. 969 */ 970 int __pm_runtime_idle(struct device *dev, int rpmflags) 971 { 972 unsigned long flags; 973 int retval; 974 975 if (rpmflags & RPM_GET_PUT) { 976 if (!atomic_dec_and_test(&dev->power.usage_count)) 977 return 0; 978 } 979 980 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 981 982 spin_lock_irqsave(&dev->power.lock, flags); 983 retval = rpm_idle(dev, rpmflags); 984 spin_unlock_irqrestore(&dev->power.lock, flags); 985 986 return retval; 987 } 988 EXPORT_SYMBOL_GPL(__pm_runtime_idle); 989 990 /** 991 * __pm_runtime_suspend - Entry point for runtime put/suspend operations. 992 * @dev: Device to suspend. 993 * @rpmflags: Flag bits. 994 * 995 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 996 * return immediately if it is larger than zero. Then carry out a suspend, 997 * either synchronous or asynchronous. 998 * 999 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1000 * or if pm_runtime_irq_safe() has been called. 1001 */ 1002 int __pm_runtime_suspend(struct device *dev, int rpmflags) 1003 { 1004 unsigned long flags; 1005 int retval; 1006 1007 if (rpmflags & RPM_GET_PUT) { 1008 if (!atomic_dec_and_test(&dev->power.usage_count)) 1009 return 0; 1010 } 1011 1012 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1013 1014 spin_lock_irqsave(&dev->power.lock, flags); 1015 retval = rpm_suspend(dev, rpmflags); 1016 spin_unlock_irqrestore(&dev->power.lock, flags); 1017 1018 return retval; 1019 } 1020 EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 1021 1022 /** 1023 * __pm_runtime_resume - Entry point for runtime resume operations. 1024 * @dev: Device to resume. 1025 * @rpmflags: Flag bits. 1026 * 1027 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 1028 * carry out a resume, either synchronous or asynchronous. 1029 * 1030 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1031 * or if pm_runtime_irq_safe() has been called. 1032 */ 1033 int __pm_runtime_resume(struct device *dev, int rpmflags) 1034 { 1035 unsigned long flags; 1036 int retval; 1037 1038 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && 1039 dev->power.runtime_status != RPM_ACTIVE); 1040 1041 if (rpmflags & RPM_GET_PUT) 1042 atomic_inc(&dev->power.usage_count); 1043 1044 spin_lock_irqsave(&dev->power.lock, flags); 1045 retval = rpm_resume(dev, rpmflags); 1046 spin_unlock_irqrestore(&dev->power.lock, flags); 1047 1048 return retval; 1049 } 1050 EXPORT_SYMBOL_GPL(__pm_runtime_resume); 1051 1052 /** 1053 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. 1054 * @dev: Device to handle. 1055 * 1056 * Return -EINVAL if runtime PM is disabled for the device. 1057 * 1058 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE 1059 * and the runtime PM usage counter is nonzero, increment the counter and 1060 * return 1. Otherwise return 0 without changing the counter. 1061 */ 1062 int pm_runtime_get_if_in_use(struct device *dev) 1063 { 1064 unsigned long flags; 1065 int retval; 1066 1067 spin_lock_irqsave(&dev->power.lock, flags); 1068 retval = dev->power.disable_depth > 0 ? -EINVAL : 1069 dev->power.runtime_status == RPM_ACTIVE 1070 && atomic_inc_not_zero(&dev->power.usage_count); 1071 spin_unlock_irqrestore(&dev->power.lock, flags); 1072 return retval; 1073 } 1074 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); 1075 1076 /** 1077 * __pm_runtime_set_status - Set runtime PM status of a device. 1078 * @dev: Device to handle. 1079 * @status: New runtime PM status of the device. 1080 * 1081 * If runtime PM of the device is disabled or its power.runtime_error field is 1082 * different from zero, the status may be changed either to RPM_ACTIVE, or to 1083 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 1084 * However, if the device has a parent and the parent is not active, and the 1085 * parent's power.ignore_children flag is unset, the device's status cannot be 1086 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 1087 * 1088 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 1089 * and the device parent's counter of unsuspended children is modified to 1090 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 1091 * notification request for the parent is submitted. 1092 * 1093 * If @dev has any suppliers (as reflected by device links to them), and @status 1094 * is RPM_ACTIVE, they will be activated upfront and if the activation of one 1095 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead 1096 * of the @status value) and the suppliers will be deacticated on exit. The 1097 * error returned by the failing supplier activation will be returned in that 1098 * case. 1099 */ 1100 int __pm_runtime_set_status(struct device *dev, unsigned int status) 1101 { 1102 struct device *parent = dev->parent; 1103 bool notify_parent = false; 1104 int error = 0; 1105 1106 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 1107 return -EINVAL; 1108 1109 spin_lock_irq(&dev->power.lock); 1110 1111 /* 1112 * Prevent PM-runtime from being enabled for the device or return an 1113 * error if it is enabled already and working. 1114 */ 1115 if (dev->power.runtime_error || dev->power.disable_depth) 1116 dev->power.disable_depth++; 1117 else 1118 error = -EAGAIN; 1119 1120 spin_unlock_irq(&dev->power.lock); 1121 1122 if (error) 1123 return error; 1124 1125 /* 1126 * If the new status is RPM_ACTIVE, the suppliers can be activated 1127 * upfront regardless of the current status, because next time 1128 * rpm_put_suppliers() runs, the rpm_active refcounts of the links 1129 * involved will be dropped down to one anyway. 1130 */ 1131 if (status == RPM_ACTIVE) { 1132 int idx = device_links_read_lock(); 1133 1134 error = rpm_get_suppliers(dev); 1135 if (error) 1136 status = RPM_SUSPENDED; 1137 1138 device_links_read_unlock(idx); 1139 } 1140 1141 spin_lock_irq(&dev->power.lock); 1142 1143 if (dev->power.runtime_status == status || !parent) 1144 goto out_set; 1145 1146 if (status == RPM_SUSPENDED) { 1147 atomic_add_unless(&parent->power.child_count, -1, 0); 1148 notify_parent = !parent->power.ignore_children; 1149 } else { 1150 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 1151 1152 /* 1153 * It is invalid to put an active child under a parent that is 1154 * not active, has runtime PM enabled and the 1155 * 'power.ignore_children' flag unset. 1156 */ 1157 if (!parent->power.disable_depth 1158 && !parent->power.ignore_children 1159 && parent->power.runtime_status != RPM_ACTIVE) { 1160 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", 1161 dev_name(dev), 1162 dev_name(parent)); 1163 error = -EBUSY; 1164 } else if (dev->power.runtime_status == RPM_SUSPENDED) { 1165 atomic_inc(&parent->power.child_count); 1166 } 1167 1168 spin_unlock(&parent->power.lock); 1169 1170 if (error) { 1171 status = RPM_SUSPENDED; 1172 goto out; 1173 } 1174 } 1175 1176 out_set: 1177 __update_runtime_status(dev, status); 1178 if (!error) 1179 dev->power.runtime_error = 0; 1180 1181 out: 1182 spin_unlock_irq(&dev->power.lock); 1183 1184 if (notify_parent) 1185 pm_request_idle(parent); 1186 1187 if (status == RPM_SUSPENDED) { 1188 int idx = device_links_read_lock(); 1189 1190 rpm_put_suppliers(dev); 1191 1192 device_links_read_unlock(idx); 1193 } 1194 1195 pm_runtime_enable(dev); 1196 1197 return error; 1198 } 1199 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1200 1201 /** 1202 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 1203 * @dev: Device to handle. 1204 * 1205 * Flush all pending requests for the device from pm_wq and wait for all 1206 * runtime PM operations involving the device in progress to complete. 1207 * 1208 * Should be called under dev->power.lock with interrupts disabled. 1209 */ 1210 static void __pm_runtime_barrier(struct device *dev) 1211 { 1212 pm_runtime_deactivate_timer(dev); 1213 1214 if (dev->power.request_pending) { 1215 dev->power.request = RPM_REQ_NONE; 1216 spin_unlock_irq(&dev->power.lock); 1217 1218 cancel_work_sync(&dev->power.work); 1219 1220 spin_lock_irq(&dev->power.lock); 1221 dev->power.request_pending = false; 1222 } 1223 1224 if (dev->power.runtime_status == RPM_SUSPENDING 1225 || dev->power.runtime_status == RPM_RESUMING 1226 || dev->power.idle_notification) { 1227 DEFINE_WAIT(wait); 1228 1229 /* Suspend, wake-up or idle notification in progress. */ 1230 for (;;) { 1231 prepare_to_wait(&dev->power.wait_queue, &wait, 1232 TASK_UNINTERRUPTIBLE); 1233 if (dev->power.runtime_status != RPM_SUSPENDING 1234 && dev->power.runtime_status != RPM_RESUMING 1235 && !dev->power.idle_notification) 1236 break; 1237 spin_unlock_irq(&dev->power.lock); 1238 1239 schedule(); 1240 1241 spin_lock_irq(&dev->power.lock); 1242 } 1243 finish_wait(&dev->power.wait_queue, &wait); 1244 } 1245 } 1246 1247 /** 1248 * pm_runtime_barrier - Flush pending requests and wait for completions. 1249 * @dev: Device to handle. 1250 * 1251 * Prevent the device from being suspended by incrementing its usage counter and 1252 * if there's a pending resume request for the device, wake the device up. 1253 * Next, make sure that all pending requests for the device have been flushed 1254 * from pm_wq and wait for all runtime PM operations involving the device in 1255 * progress to complete. 1256 * 1257 * Return value: 1258 * 1, if there was a resume request pending and the device had to be woken up, 1259 * 0, otherwise 1260 */ 1261 int pm_runtime_barrier(struct device *dev) 1262 { 1263 int retval = 0; 1264 1265 pm_runtime_get_noresume(dev); 1266 spin_lock_irq(&dev->power.lock); 1267 1268 if (dev->power.request_pending 1269 && dev->power.request == RPM_REQ_RESUME) { 1270 rpm_resume(dev, 0); 1271 retval = 1; 1272 } 1273 1274 __pm_runtime_barrier(dev); 1275 1276 spin_unlock_irq(&dev->power.lock); 1277 pm_runtime_put_noidle(dev); 1278 1279 return retval; 1280 } 1281 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 1282 1283 /** 1284 * __pm_runtime_disable - Disable runtime PM of a device. 1285 * @dev: Device to handle. 1286 * @check_resume: If set, check if there's a resume request for the device. 1287 * 1288 * Increment power.disable_depth for the device and if it was zero previously, 1289 * cancel all pending runtime PM requests for the device and wait for all 1290 * operations in progress to complete. The device can be either active or 1291 * suspended after its runtime PM has been disabled. 1292 * 1293 * If @check_resume is set and there's a resume request pending when 1294 * __pm_runtime_disable() is called and power.disable_depth is zero, the 1295 * function will wake up the device before disabling its runtime PM. 1296 */ 1297 void __pm_runtime_disable(struct device *dev, bool check_resume) 1298 { 1299 spin_lock_irq(&dev->power.lock); 1300 1301 if (dev->power.disable_depth > 0) { 1302 dev->power.disable_depth++; 1303 goto out; 1304 } 1305 1306 /* 1307 * Wake up the device if there's a resume request pending, because that 1308 * means there probably is some I/O to process and disabling runtime PM 1309 * shouldn't prevent the device from processing the I/O. 1310 */ 1311 if (check_resume && dev->power.request_pending 1312 && dev->power.request == RPM_REQ_RESUME) { 1313 /* 1314 * Prevent suspends and idle notifications from being carried 1315 * out after we have woken up the device. 1316 */ 1317 pm_runtime_get_noresume(dev); 1318 1319 rpm_resume(dev, 0); 1320 1321 pm_runtime_put_noidle(dev); 1322 } 1323 1324 if (!dev->power.disable_depth++) 1325 __pm_runtime_barrier(dev); 1326 1327 out: 1328 spin_unlock_irq(&dev->power.lock); 1329 } 1330 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1331 1332 /** 1333 * pm_runtime_enable - Enable runtime PM of a device. 1334 * @dev: Device to handle. 1335 */ 1336 void pm_runtime_enable(struct device *dev) 1337 { 1338 unsigned long flags; 1339 1340 spin_lock_irqsave(&dev->power.lock, flags); 1341 1342 if (dev->power.disable_depth > 0) 1343 dev->power.disable_depth--; 1344 else 1345 dev_warn(dev, "Unbalanced %s!\n", __func__); 1346 1347 WARN(!dev->power.disable_depth && 1348 dev->power.runtime_status == RPM_SUSPENDED && 1349 !dev->power.ignore_children && 1350 atomic_read(&dev->power.child_count) > 0, 1351 "Enabling runtime PM for inactive device (%s) with active children\n", 1352 dev_name(dev)); 1353 1354 spin_unlock_irqrestore(&dev->power.lock, flags); 1355 } 1356 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1357 1358 /** 1359 * pm_runtime_forbid - Block runtime PM of a device. 1360 * @dev: Device to handle. 1361 * 1362 * Increase the device's usage count and clear its power.runtime_auto flag, 1363 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1364 * for it. 1365 */ 1366 void pm_runtime_forbid(struct device *dev) 1367 { 1368 spin_lock_irq(&dev->power.lock); 1369 if (!dev->power.runtime_auto) 1370 goto out; 1371 1372 dev->power.runtime_auto = false; 1373 atomic_inc(&dev->power.usage_count); 1374 rpm_resume(dev, 0); 1375 1376 out: 1377 spin_unlock_irq(&dev->power.lock); 1378 } 1379 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1380 1381 /** 1382 * pm_runtime_allow - Unblock runtime PM of a device. 1383 * @dev: Device to handle. 1384 * 1385 * Decrease the device's usage count and set its power.runtime_auto flag. 1386 */ 1387 void pm_runtime_allow(struct device *dev) 1388 { 1389 spin_lock_irq(&dev->power.lock); 1390 if (dev->power.runtime_auto) 1391 goto out; 1392 1393 dev->power.runtime_auto = true; 1394 if (atomic_dec_and_test(&dev->power.usage_count)) 1395 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); 1396 1397 out: 1398 spin_unlock_irq(&dev->power.lock); 1399 } 1400 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1401 1402 /** 1403 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. 1404 * @dev: Device to handle. 1405 * 1406 * Set the power.no_callbacks flag, which tells the PM core that this 1407 * device is power-managed through its parent and has no runtime PM 1408 * callbacks of its own. The runtime sysfs attributes will be removed. 1409 */ 1410 void pm_runtime_no_callbacks(struct device *dev) 1411 { 1412 spin_lock_irq(&dev->power.lock); 1413 dev->power.no_callbacks = 1; 1414 spin_unlock_irq(&dev->power.lock); 1415 if (device_is_registered(dev)) 1416 rpm_sysfs_remove(dev); 1417 } 1418 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1419 1420 /** 1421 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. 1422 * @dev: Device to handle 1423 * 1424 * Set the power.irq_safe flag, which tells the PM core that the 1425 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should 1426 * always be invoked with the spinlock held and interrupts disabled. It also 1427 * causes the parent's usage counter to be permanently incremented, preventing 1428 * the parent from runtime suspending -- otherwise an irq-safe child might have 1429 * to wait for a non-irq-safe parent. 1430 */ 1431 void pm_runtime_irq_safe(struct device *dev) 1432 { 1433 if (dev->parent) 1434 pm_runtime_get_sync(dev->parent); 1435 spin_lock_irq(&dev->power.lock); 1436 dev->power.irq_safe = 1; 1437 spin_unlock_irq(&dev->power.lock); 1438 } 1439 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); 1440 1441 /** 1442 * update_autosuspend - Handle a change to a device's autosuspend settings. 1443 * @dev: Device to handle. 1444 * @old_delay: The former autosuspend_delay value. 1445 * @old_use: The former use_autosuspend value. 1446 * 1447 * Prevent runtime suspend if the new delay is negative and use_autosuspend is 1448 * set; otherwise allow it. Send an idle notification if suspends are allowed. 1449 * 1450 * This function must be called under dev->power.lock with interrupts disabled. 1451 */ 1452 static void update_autosuspend(struct device *dev, int old_delay, int old_use) 1453 { 1454 int delay = dev->power.autosuspend_delay; 1455 1456 /* Should runtime suspend be prevented now? */ 1457 if (dev->power.use_autosuspend && delay < 0) { 1458 1459 /* If it used to be allowed then prevent it. */ 1460 if (!old_use || old_delay >= 0) { 1461 atomic_inc(&dev->power.usage_count); 1462 rpm_resume(dev, 0); 1463 } 1464 } 1465 1466 /* Runtime suspend should be allowed now. */ 1467 else { 1468 1469 /* If it used to be prevented then allow it. */ 1470 if (old_use && old_delay < 0) 1471 atomic_dec(&dev->power.usage_count); 1472 1473 /* Maybe we can autosuspend now. */ 1474 rpm_idle(dev, RPM_AUTO); 1475 } 1476 } 1477 1478 /** 1479 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. 1480 * @dev: Device to handle. 1481 * @delay: Value of the new delay in milliseconds. 1482 * 1483 * Set the device's power.autosuspend_delay value. If it changes to negative 1484 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it 1485 * changes the other way, allow runtime suspends. 1486 */ 1487 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1488 { 1489 int old_delay, old_use; 1490 1491 spin_lock_irq(&dev->power.lock); 1492 old_delay = dev->power.autosuspend_delay; 1493 old_use = dev->power.use_autosuspend; 1494 dev->power.autosuspend_delay = delay; 1495 update_autosuspend(dev, old_delay, old_use); 1496 spin_unlock_irq(&dev->power.lock); 1497 } 1498 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); 1499 1500 /** 1501 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. 1502 * @dev: Device to handle. 1503 * @use: New value for use_autosuspend. 1504 * 1505 * Set the device's power.use_autosuspend flag, and allow or prevent runtime 1506 * suspends as needed. 1507 */ 1508 void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1509 { 1510 int old_delay, old_use; 1511 1512 spin_lock_irq(&dev->power.lock); 1513 old_delay = dev->power.autosuspend_delay; 1514 old_use = dev->power.use_autosuspend; 1515 dev->power.use_autosuspend = use; 1516 update_autosuspend(dev, old_delay, old_use); 1517 spin_unlock_irq(&dev->power.lock); 1518 } 1519 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1520 1521 /** 1522 * pm_runtime_init - Initialize runtime PM fields in given device object. 1523 * @dev: Device object to initialize. 1524 */ 1525 void pm_runtime_init(struct device *dev) 1526 { 1527 dev->power.runtime_status = RPM_SUSPENDED; 1528 dev->power.idle_notification = false; 1529 1530 dev->power.disable_depth = 1; 1531 atomic_set(&dev->power.usage_count, 0); 1532 1533 dev->power.runtime_error = 0; 1534 1535 atomic_set(&dev->power.child_count, 0); 1536 pm_suspend_ignore_children(dev, false); 1537 dev->power.runtime_auto = true; 1538 1539 dev->power.request_pending = false; 1540 dev->power.request = RPM_REQ_NONE; 1541 dev->power.deferred_resume = false; 1542 dev->power.accounting_timestamp = jiffies; 1543 INIT_WORK(&dev->power.work, pm_runtime_work); 1544 1545 dev->power.timer_expires = 0; 1546 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1547 dev->power.suspend_timer.function = pm_suspend_timer_fn; 1548 1549 init_waitqueue_head(&dev->power.wait_queue); 1550 } 1551 1552 /** 1553 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. 1554 * @dev: Device object to re-initialize. 1555 */ 1556 void pm_runtime_reinit(struct device *dev) 1557 { 1558 if (!pm_runtime_enabled(dev)) { 1559 if (dev->power.runtime_status == RPM_ACTIVE) 1560 pm_runtime_set_suspended(dev); 1561 if (dev->power.irq_safe) { 1562 spin_lock_irq(&dev->power.lock); 1563 dev->power.irq_safe = 0; 1564 spin_unlock_irq(&dev->power.lock); 1565 if (dev->parent) 1566 pm_runtime_put(dev->parent); 1567 } 1568 } 1569 } 1570 1571 /** 1572 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1573 * @dev: Device object being removed from device hierarchy. 1574 */ 1575 void pm_runtime_remove(struct device *dev) 1576 { 1577 __pm_runtime_disable(dev, false); 1578 pm_runtime_reinit(dev); 1579 } 1580 1581 /** 1582 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. 1583 * @dev: Device whose driver is going to be removed. 1584 * 1585 * Check links from this device to any consumers and if any of them have active 1586 * runtime PM references to the device, drop the usage counter of the device 1587 * (as many times as needed). 1588 * 1589 * Links with the DL_FLAG_STATELESS flag set are ignored. 1590 * 1591 * Since the device is guaranteed to be runtime-active at the point this is 1592 * called, nothing else needs to be done here. 1593 * 1594 * Moreover, this is called after device_links_busy() has returned 'false', so 1595 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and 1596 * therefore rpm_active can't be manipulated concurrently. 1597 */ 1598 void pm_runtime_clean_up_links(struct device *dev) 1599 { 1600 struct device_link *link; 1601 int idx; 1602 1603 idx = device_links_read_lock(); 1604 1605 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { 1606 if (link->flags & DL_FLAG_STATELESS) 1607 continue; 1608 1609 while (refcount_dec_not_one(&link->rpm_active)) 1610 pm_runtime_put_noidle(dev); 1611 } 1612 1613 device_links_read_unlock(idx); 1614 } 1615 1616 /** 1617 * pm_runtime_get_suppliers - Resume and reference-count supplier devices. 1618 * @dev: Consumer device. 1619 */ 1620 void pm_runtime_get_suppliers(struct device *dev) 1621 { 1622 struct device_link *link; 1623 int idx; 1624 1625 idx = device_links_read_lock(); 1626 1627 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1628 if (link->flags & DL_FLAG_PM_RUNTIME) { 1629 link->supplier_preactivated = true; 1630 refcount_inc(&link->rpm_active); 1631 pm_runtime_get_sync(link->supplier); 1632 } 1633 1634 device_links_read_unlock(idx); 1635 } 1636 1637 /** 1638 * pm_runtime_put_suppliers - Drop references to supplier devices. 1639 * @dev: Consumer device. 1640 */ 1641 void pm_runtime_put_suppliers(struct device *dev) 1642 { 1643 struct device_link *link; 1644 int idx; 1645 1646 idx = device_links_read_lock(); 1647 1648 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1649 if (link->supplier_preactivated) { 1650 link->supplier_preactivated = false; 1651 if (refcount_dec_not_one(&link->rpm_active)) 1652 pm_runtime_put(link->supplier); 1653 } 1654 1655 device_links_read_unlock(idx); 1656 } 1657 1658 void pm_runtime_new_link(struct device *dev) 1659 { 1660 spin_lock_irq(&dev->power.lock); 1661 dev->power.links_count++; 1662 spin_unlock_irq(&dev->power.lock); 1663 } 1664 1665 void pm_runtime_drop_link(struct device *dev) 1666 { 1667 spin_lock_irq(&dev->power.lock); 1668 WARN_ON(dev->power.links_count == 0); 1669 dev->power.links_count--; 1670 spin_unlock_irq(&dev->power.lock); 1671 } 1672 1673 static bool pm_runtime_need_not_resume(struct device *dev) 1674 { 1675 return atomic_read(&dev->power.usage_count) <= 1 && 1676 (atomic_read(&dev->power.child_count) == 0 || 1677 dev->power.ignore_children); 1678 } 1679 1680 /** 1681 * pm_runtime_force_suspend - Force a device into suspend state if needed. 1682 * @dev: Device to suspend. 1683 * 1684 * Disable runtime PM so we safely can check the device's runtime PM status and 1685 * if it is active, invoke its ->runtime_suspend callback to suspend it and 1686 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's 1687 * usage and children counters don't indicate that the device was in use before 1688 * the system-wide transition under way, decrement its parent's children counter 1689 * (if there is a parent). Keep runtime PM disabled to preserve the state 1690 * unless we encounter errors. 1691 * 1692 * Typically this function may be invoked from a system suspend callback to make 1693 * sure the device is put into low power state and it should only be used during 1694 * system-wide PM transitions to sleep states. It assumes that the analogous 1695 * pm_runtime_force_resume() will be used to resume the device. 1696 */ 1697 int pm_runtime_force_suspend(struct device *dev) 1698 { 1699 int (*callback)(struct device *); 1700 int ret; 1701 1702 pm_runtime_disable(dev); 1703 if (pm_runtime_status_suspended(dev)) 1704 return 0; 1705 1706 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 1707 1708 ret = callback ? callback(dev) : 0; 1709 if (ret) 1710 goto err; 1711 1712 /* 1713 * If the device can stay in suspend after the system-wide transition 1714 * to the working state that will follow, drop the children counter of 1715 * its parent, but set its status to RPM_SUSPENDED anyway in case this 1716 * function will be called again for it in the meantime. 1717 */ 1718 if (pm_runtime_need_not_resume(dev)) 1719 pm_runtime_set_suspended(dev); 1720 else 1721 __update_runtime_status(dev, RPM_SUSPENDED); 1722 1723 return 0; 1724 1725 err: 1726 pm_runtime_enable(dev); 1727 return ret; 1728 } 1729 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); 1730 1731 /** 1732 * pm_runtime_force_resume - Force a device into resume state if needed. 1733 * @dev: Device to resume. 1734 * 1735 * Prior invoking this function we expect the user to have brought the device 1736 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse 1737 * those actions and bring the device into full power, if it is expected to be 1738 * used on system resume. In the other case, we defer the resume to be managed 1739 * via runtime PM. 1740 * 1741 * Typically this function may be invoked from a system resume callback. 1742 */ 1743 int pm_runtime_force_resume(struct device *dev) 1744 { 1745 int (*callback)(struct device *); 1746 int ret = 0; 1747 1748 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) 1749 goto out; 1750 1751 /* 1752 * The value of the parent's children counter is correct already, so 1753 * just update the status of the device. 1754 */ 1755 __update_runtime_status(dev, RPM_ACTIVE); 1756 1757 callback = RPM_GET_CALLBACK(dev, runtime_resume); 1758 1759 ret = callback ? callback(dev) : 0; 1760 if (ret) { 1761 pm_runtime_set_suspended(dev); 1762 goto out; 1763 } 1764 1765 pm_runtime_mark_last_busy(dev); 1766 out: 1767 pm_runtime_enable(dev); 1768 return ret; 1769 } 1770 EXPORT_SYMBOL_GPL(pm_runtime_force_resume); 1771