1 /* 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 6 * 7 * This file is released under the GPLv2. 8 */ 9 10 #include <linux/sched/mm.h> 11 #include <linux/ktime.h> 12 #include <linux/hrtimer.h> 13 #include <linux/export.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_wakeirq.h> 16 #include <trace/events/rpm.h> 17 18 #include "../base.h" 19 #include "power.h" 20 21 typedef int (*pm_callback_t)(struct device *); 22 23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) 24 { 25 pm_callback_t cb; 26 const struct dev_pm_ops *ops; 27 28 if (dev->pm_domain) 29 ops = &dev->pm_domain->ops; 30 else if (dev->type && dev->type->pm) 31 ops = dev->type->pm; 32 else if (dev->class && dev->class->pm) 33 ops = dev->class->pm; 34 else if (dev->bus && dev->bus->pm) 35 ops = dev->bus->pm; 36 else 37 ops = NULL; 38 39 if (ops) 40 cb = *(pm_callback_t *)((void *)ops + cb_offset); 41 else 42 cb = NULL; 43 44 if (!cb && dev->driver && dev->driver->pm) 45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); 46 47 return cb; 48 } 49 50 #define RPM_GET_CALLBACK(dev, callback) \ 51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) 52 53 static int rpm_resume(struct device *dev, int rpmflags); 54 static int rpm_suspend(struct device *dev, int rpmflags); 55 56 /** 57 * update_pm_runtime_accounting - Update the time accounting of power states 58 * @dev: Device to update the accounting for 59 * 60 * In order to be able to have time accounting of the various power states 61 * (as used by programs such as PowerTOP to show the effectiveness of runtime 62 * PM), we need to track the time spent in each state. 63 * update_pm_runtime_accounting must be called each time before the 64 * runtime_status field is updated, to account the time in the old state 65 * correctly. 66 */ 67 void update_pm_runtime_accounting(struct device *dev) 68 { 69 u64 now, last, delta; 70 71 if (dev->power.disable_depth > 0) 72 return; 73 74 last = dev->power.accounting_timestamp; 75 76 now = ktime_get_mono_fast_ns(); 77 dev->power.accounting_timestamp = now; 78 79 /* 80 * Because ktime_get_mono_fast_ns() is not monotonic during 81 * timekeeping updates, ensure that 'now' is after the last saved 82 * timesptamp. 83 */ 84 if (now < last) 85 return; 86 87 delta = now - last; 88 89 if (dev->power.runtime_status == RPM_SUSPENDED) 90 dev->power.suspended_time += delta; 91 else 92 dev->power.active_time += delta; 93 } 94 95 static void __update_runtime_status(struct device *dev, enum rpm_status status) 96 { 97 update_pm_runtime_accounting(dev); 98 dev->power.runtime_status = status; 99 } 100 101 u64 pm_runtime_suspended_time(struct device *dev) 102 { 103 u64 time; 104 unsigned long flags; 105 106 spin_lock_irqsave(&dev->power.lock, flags); 107 108 update_pm_runtime_accounting(dev); 109 time = dev->power.suspended_time; 110 111 spin_unlock_irqrestore(&dev->power.lock, flags); 112 113 return time; 114 } 115 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); 116 117 /** 118 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 119 * @dev: Device to handle. 120 */ 121 static void pm_runtime_deactivate_timer(struct device *dev) 122 { 123 if (dev->power.timer_expires > 0) { 124 hrtimer_try_to_cancel(&dev->power.suspend_timer); 125 dev->power.timer_expires = 0; 126 } 127 } 128 129 /** 130 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 131 * @dev: Device to handle. 132 */ 133 static void pm_runtime_cancel_pending(struct device *dev) 134 { 135 pm_runtime_deactivate_timer(dev); 136 /* 137 * In case there's a request pending, make sure its work function will 138 * return without doing anything. 139 */ 140 dev->power.request = RPM_REQ_NONE; 141 } 142 143 /* 144 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. 145 * @dev: Device to handle. 146 * 147 * Compute the autosuspend-delay expiration time based on the device's 148 * power.last_busy time. If the delay has already expired or is disabled 149 * (negative) or the power.use_autosuspend flag isn't set, return 0. 150 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). 151 * 152 * This function may be called either with or without dev->power.lock held. 153 * Either way it can be racy, since power.last_busy may be updated at any time. 154 */ 155 u64 pm_runtime_autosuspend_expiration(struct device *dev) 156 { 157 int autosuspend_delay; 158 u64 expires; 159 160 if (!dev->power.use_autosuspend) 161 return 0; 162 163 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); 164 if (autosuspend_delay < 0) 165 return 0; 166 167 expires = READ_ONCE(dev->power.last_busy); 168 expires += (u64)autosuspend_delay * NSEC_PER_MSEC; 169 if (expires > ktime_get_mono_fast_ns()) 170 return expires; /* Expires in the future */ 171 172 return 0; 173 } 174 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 175 176 static int dev_memalloc_noio(struct device *dev, void *data) 177 { 178 return dev->power.memalloc_noio; 179 } 180 181 /* 182 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. 183 * @dev: Device to handle. 184 * @enable: True for setting the flag and False for clearing the flag. 185 * 186 * Set the flag for all devices in the path from the device to the 187 * root device in the device tree if @enable is true, otherwise clear 188 * the flag for devices in the path whose siblings don't set the flag. 189 * 190 * The function should only be called by block device, or network 191 * device driver for solving the deadlock problem during runtime 192 * resume/suspend: 193 * 194 * If memory allocation with GFP_KERNEL is called inside runtime 195 * resume/suspend callback of any one of its ancestors(or the 196 * block device itself), the deadlock may be triggered inside the 197 * memory allocation since it might not complete until the block 198 * device becomes active and the involed page I/O finishes. The 199 * situation is pointed out first by Alan Stern. Network device 200 * are involved in iSCSI kind of situation. 201 * 202 * The lock of dev_hotplug_mutex is held in the function for handling 203 * hotplug race because pm_runtime_set_memalloc_noio() may be called 204 * in async probe(). 205 * 206 * The function should be called between device_add() and device_del() 207 * on the affected device(block/network device). 208 */ 209 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) 210 { 211 static DEFINE_MUTEX(dev_hotplug_mutex); 212 213 mutex_lock(&dev_hotplug_mutex); 214 for (;;) { 215 bool enabled; 216 217 /* hold power lock since bitfield is not SMP-safe. */ 218 spin_lock_irq(&dev->power.lock); 219 enabled = dev->power.memalloc_noio; 220 dev->power.memalloc_noio = enable; 221 spin_unlock_irq(&dev->power.lock); 222 223 /* 224 * not need to enable ancestors any more if the device 225 * has been enabled. 226 */ 227 if (enabled && enable) 228 break; 229 230 dev = dev->parent; 231 232 /* 233 * clear flag of the parent device only if all the 234 * children don't set the flag because ancestor's 235 * flag was set by any one of the descendants. 236 */ 237 if (!dev || (!enable && 238 device_for_each_child(dev, NULL, 239 dev_memalloc_noio))) 240 break; 241 } 242 mutex_unlock(&dev_hotplug_mutex); 243 } 244 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); 245 246 /** 247 * rpm_check_suspend_allowed - Test whether a device may be suspended. 248 * @dev: Device to test. 249 */ 250 static int rpm_check_suspend_allowed(struct device *dev) 251 { 252 int retval = 0; 253 254 if (dev->power.runtime_error) 255 retval = -EINVAL; 256 else if (dev->power.disable_depth > 0) 257 retval = -EACCES; 258 else if (atomic_read(&dev->power.usage_count) > 0) 259 retval = -EAGAIN; 260 else if (!dev->power.ignore_children && 261 atomic_read(&dev->power.child_count)) 262 retval = -EBUSY; 263 264 /* Pending resume requests take precedence over suspends. */ 265 else if ((dev->power.deferred_resume 266 && dev->power.runtime_status == RPM_SUSPENDING) 267 || (dev->power.request_pending 268 && dev->power.request == RPM_REQ_RESUME)) 269 retval = -EAGAIN; 270 else if (__dev_pm_qos_read_value(dev) == 0) 271 retval = -EPERM; 272 else if (dev->power.runtime_status == RPM_SUSPENDED) 273 retval = 1; 274 275 return retval; 276 } 277 278 static int rpm_get_suppliers(struct device *dev) 279 { 280 struct device_link *link; 281 282 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 283 int retval; 284 285 if (!(link->flags & DL_FLAG_PM_RUNTIME)) 286 continue; 287 288 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND || 289 link->rpm_active) 290 continue; 291 292 retval = pm_runtime_get_sync(link->supplier); 293 /* Ignore suppliers with disabled runtime PM. */ 294 if (retval < 0 && retval != -EACCES) { 295 pm_runtime_put_noidle(link->supplier); 296 return retval; 297 } 298 link->rpm_active = true; 299 } 300 return 0; 301 } 302 303 static void rpm_put_suppliers(struct device *dev) 304 { 305 struct device_link *link; 306 307 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 308 if (link->rpm_active && 309 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { 310 pm_runtime_put(link->supplier); 311 link->rpm_active = false; 312 } 313 } 314 315 /** 316 * __rpm_callback - Run a given runtime PM callback for a given device. 317 * @cb: Runtime PM callback to run. 318 * @dev: Device to run the callback for. 319 */ 320 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 321 __releases(&dev->power.lock) __acquires(&dev->power.lock) 322 { 323 int retval, idx; 324 bool use_links = dev->power.links_count > 0; 325 326 if (dev->power.irq_safe) { 327 spin_unlock(&dev->power.lock); 328 } else { 329 spin_unlock_irq(&dev->power.lock); 330 331 /* 332 * Resume suppliers if necessary. 333 * 334 * The device's runtime PM status cannot change until this 335 * routine returns, so it is safe to read the status outside of 336 * the lock. 337 */ 338 if (use_links && dev->power.runtime_status == RPM_RESUMING) { 339 idx = device_links_read_lock(); 340 341 retval = rpm_get_suppliers(dev); 342 if (retval) 343 goto fail; 344 345 device_links_read_unlock(idx); 346 } 347 } 348 349 retval = cb(dev); 350 351 if (dev->power.irq_safe) { 352 spin_lock(&dev->power.lock); 353 } else { 354 /* 355 * If the device is suspending and the callback has returned 356 * success, drop the usage counters of the suppliers that have 357 * been reference counted on its resume. 358 * 359 * Do that if resume fails too. 360 */ 361 if (use_links 362 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) 363 || (dev->power.runtime_status == RPM_RESUMING && retval))) { 364 idx = device_links_read_lock(); 365 366 fail: 367 rpm_put_suppliers(dev); 368 369 device_links_read_unlock(idx); 370 } 371 372 spin_lock_irq(&dev->power.lock); 373 } 374 375 return retval; 376 } 377 378 /** 379 * rpm_idle - Notify device bus type if the device can be suspended. 380 * @dev: Device to notify the bus type about. 381 * @rpmflags: Flag bits. 382 * 383 * Check if the device's runtime PM status allows it to be suspended. If 384 * another idle notification has been started earlier, return immediately. If 385 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 386 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback 387 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. 388 * 389 * This function must be called under dev->power.lock with interrupts disabled. 390 */ 391 static int rpm_idle(struct device *dev, int rpmflags) 392 { 393 int (*callback)(struct device *); 394 int retval; 395 396 trace_rpm_idle_rcuidle(dev, rpmflags); 397 retval = rpm_check_suspend_allowed(dev); 398 if (retval < 0) 399 ; /* Conditions are wrong. */ 400 401 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 402 else if (dev->power.runtime_status != RPM_ACTIVE) 403 retval = -EAGAIN; 404 405 /* 406 * Any pending request other than an idle notification takes 407 * precedence over us, except that the timer may be running. 408 */ 409 else if (dev->power.request_pending && 410 dev->power.request > RPM_REQ_IDLE) 411 retval = -EAGAIN; 412 413 /* Act as though RPM_NOWAIT is always set. */ 414 else if (dev->power.idle_notification) 415 retval = -EINPROGRESS; 416 if (retval) 417 goto out; 418 419 /* Pending requests need to be canceled. */ 420 dev->power.request = RPM_REQ_NONE; 421 422 if (dev->power.no_callbacks) 423 goto out; 424 425 /* Carry out an asynchronous or a synchronous idle notification. */ 426 if (rpmflags & RPM_ASYNC) { 427 dev->power.request = RPM_REQ_IDLE; 428 if (!dev->power.request_pending) { 429 dev->power.request_pending = true; 430 queue_work(pm_wq, &dev->power.work); 431 } 432 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); 433 return 0; 434 } 435 436 dev->power.idle_notification = true; 437 438 callback = RPM_GET_CALLBACK(dev, runtime_idle); 439 440 if (callback) 441 retval = __rpm_callback(callback, dev); 442 443 dev->power.idle_notification = false; 444 wake_up_all(&dev->power.wait_queue); 445 446 out: 447 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 448 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); 449 } 450 451 /** 452 * rpm_callback - Run a given runtime PM callback for a given device. 453 * @cb: Runtime PM callback to run. 454 * @dev: Device to run the callback for. 455 */ 456 static int rpm_callback(int (*cb)(struct device *), struct device *dev) 457 { 458 int retval; 459 460 if (!cb) 461 return -ENOSYS; 462 463 if (dev->power.memalloc_noio) { 464 unsigned int noio_flag; 465 466 /* 467 * Deadlock might be caused if memory allocation with 468 * GFP_KERNEL happens inside runtime_suspend and 469 * runtime_resume callbacks of one block device's 470 * ancestor or the block device itself. Network 471 * device might be thought as part of iSCSI block 472 * device, so network device and its ancestor should 473 * be marked as memalloc_noio too. 474 */ 475 noio_flag = memalloc_noio_save(); 476 retval = __rpm_callback(cb, dev); 477 memalloc_noio_restore(noio_flag); 478 } else { 479 retval = __rpm_callback(cb, dev); 480 } 481 482 dev->power.runtime_error = retval; 483 return retval != -EACCES ? retval : -EIO; 484 } 485 486 /** 487 * rpm_suspend - Carry out runtime suspend of given device. 488 * @dev: Device to suspend. 489 * @rpmflags: Flag bits. 490 * 491 * Check if the device's runtime PM status allows it to be suspended. 492 * Cancel a pending idle notification, autosuspend or suspend. If 493 * another suspend has been started earlier, either return immediately 494 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC 495 * flags. If the RPM_ASYNC flag is set then queue a suspend request; 496 * otherwise run the ->runtime_suspend() callback directly. When 497 * ->runtime_suspend succeeded, if a deferred resume was requested while 498 * the callback was running then carry it out, otherwise send an idle 499 * notification for its parent (if the suspend succeeded and both 500 * ignore_children of parent->power and irq_safe of dev->power are not set). 501 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO 502 * flag is set and the next autosuspend-delay expiration time is in the 503 * future, schedule another autosuspend attempt. 504 * 505 * This function must be called under dev->power.lock with interrupts disabled. 506 */ 507 static int rpm_suspend(struct device *dev, int rpmflags) 508 __releases(&dev->power.lock) __acquires(&dev->power.lock) 509 { 510 int (*callback)(struct device *); 511 struct device *parent = NULL; 512 int retval; 513 514 trace_rpm_suspend_rcuidle(dev, rpmflags); 515 516 repeat: 517 retval = rpm_check_suspend_allowed(dev); 518 519 if (retval < 0) 520 ; /* Conditions are wrong. */ 521 522 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 523 else if (dev->power.runtime_status == RPM_RESUMING && 524 !(rpmflags & RPM_ASYNC)) 525 retval = -EAGAIN; 526 if (retval) 527 goto out; 528 529 /* If the autosuspend_delay time hasn't expired yet, reschedule. */ 530 if ((rpmflags & RPM_AUTO) 531 && dev->power.runtime_status != RPM_SUSPENDING) { 532 u64 expires = pm_runtime_autosuspend_expiration(dev); 533 534 if (expires != 0) { 535 /* Pending requests need to be canceled. */ 536 dev->power.request = RPM_REQ_NONE; 537 538 /* 539 * Optimization: If the timer is already running and is 540 * set to expire at or before the autosuspend delay, 541 * avoid the overhead of resetting it. Just let it 542 * expire; pm_suspend_timer_fn() will take care of the 543 * rest. 544 */ 545 if (!(dev->power.timer_expires && 546 dev->power.timer_expires <= expires)) { 547 /* 548 * We add a slack of 25% to gather wakeups 549 * without sacrificing the granularity. 550 */ 551 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * 552 (NSEC_PER_MSEC >> 2); 553 554 dev->power.timer_expires = expires; 555 hrtimer_start_range_ns(&dev->power.suspend_timer, 556 ns_to_ktime(expires), 557 slack, 558 HRTIMER_MODE_ABS); 559 } 560 dev->power.timer_autosuspends = 1; 561 goto out; 562 } 563 } 564 565 /* Other scheduled or pending requests need to be canceled. */ 566 pm_runtime_cancel_pending(dev); 567 568 if (dev->power.runtime_status == RPM_SUSPENDING) { 569 DEFINE_WAIT(wait); 570 571 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 572 retval = -EINPROGRESS; 573 goto out; 574 } 575 576 if (dev->power.irq_safe) { 577 spin_unlock(&dev->power.lock); 578 579 cpu_relax(); 580 581 spin_lock(&dev->power.lock); 582 goto repeat; 583 } 584 585 /* Wait for the other suspend running in parallel with us. */ 586 for (;;) { 587 prepare_to_wait(&dev->power.wait_queue, &wait, 588 TASK_UNINTERRUPTIBLE); 589 if (dev->power.runtime_status != RPM_SUSPENDING) 590 break; 591 592 spin_unlock_irq(&dev->power.lock); 593 594 schedule(); 595 596 spin_lock_irq(&dev->power.lock); 597 } 598 finish_wait(&dev->power.wait_queue, &wait); 599 goto repeat; 600 } 601 602 if (dev->power.no_callbacks) 603 goto no_callback; /* Assume success. */ 604 605 /* Carry out an asynchronous or a synchronous suspend. */ 606 if (rpmflags & RPM_ASYNC) { 607 dev->power.request = (rpmflags & RPM_AUTO) ? 608 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; 609 if (!dev->power.request_pending) { 610 dev->power.request_pending = true; 611 queue_work(pm_wq, &dev->power.work); 612 } 613 goto out; 614 } 615 616 __update_runtime_status(dev, RPM_SUSPENDING); 617 618 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 619 620 dev_pm_enable_wake_irq_check(dev, true); 621 retval = rpm_callback(callback, dev); 622 if (retval) 623 goto fail; 624 625 no_callback: 626 __update_runtime_status(dev, RPM_SUSPENDED); 627 pm_runtime_deactivate_timer(dev); 628 629 if (dev->parent) { 630 parent = dev->parent; 631 atomic_add_unless(&parent->power.child_count, -1, 0); 632 } 633 wake_up_all(&dev->power.wait_queue); 634 635 if (dev->power.deferred_resume) { 636 dev->power.deferred_resume = false; 637 rpm_resume(dev, 0); 638 retval = -EAGAIN; 639 goto out; 640 } 641 642 /* Maybe the parent is now able to suspend. */ 643 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 644 spin_unlock(&dev->power.lock); 645 646 spin_lock(&parent->power.lock); 647 rpm_idle(parent, RPM_ASYNC); 648 spin_unlock(&parent->power.lock); 649 650 spin_lock(&dev->power.lock); 651 } 652 653 out: 654 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 655 656 return retval; 657 658 fail: 659 dev_pm_disable_wake_irq_check(dev); 660 __update_runtime_status(dev, RPM_ACTIVE); 661 dev->power.deferred_resume = false; 662 wake_up_all(&dev->power.wait_queue); 663 664 if (retval == -EAGAIN || retval == -EBUSY) { 665 dev->power.runtime_error = 0; 666 667 /* 668 * If the callback routine failed an autosuspend, and 669 * if the last_busy time has been updated so that there 670 * is a new autosuspend expiration time, automatically 671 * reschedule another autosuspend. 672 */ 673 if ((rpmflags & RPM_AUTO) && 674 pm_runtime_autosuspend_expiration(dev) != 0) 675 goto repeat; 676 } else { 677 pm_runtime_cancel_pending(dev); 678 } 679 goto out; 680 } 681 682 /** 683 * rpm_resume - Carry out runtime resume of given device. 684 * @dev: Device to resume. 685 * @rpmflags: Flag bits. 686 * 687 * Check if the device's runtime PM status allows it to be resumed. Cancel 688 * any scheduled or pending requests. If another resume has been started 689 * earlier, either return immediately or wait for it to finish, depending on the 690 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 691 * parallel with this function, either tell the other process to resume after 692 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 693 * flag is set then queue a resume request; otherwise run the 694 * ->runtime_resume() callback directly. Queue an idle notification for the 695 * device if the resume succeeded. 696 * 697 * This function must be called under dev->power.lock with interrupts disabled. 698 */ 699 static int rpm_resume(struct device *dev, int rpmflags) 700 __releases(&dev->power.lock) __acquires(&dev->power.lock) 701 { 702 int (*callback)(struct device *); 703 struct device *parent = NULL; 704 int retval = 0; 705 706 trace_rpm_resume_rcuidle(dev, rpmflags); 707 708 repeat: 709 if (dev->power.runtime_error) 710 retval = -EINVAL; 711 else if (dev->power.disable_depth == 1 && dev->power.is_suspended 712 && dev->power.runtime_status == RPM_ACTIVE) 713 retval = 1; 714 else if (dev->power.disable_depth > 0) 715 retval = -EACCES; 716 if (retval) 717 goto out; 718 719 /* 720 * Other scheduled or pending requests need to be canceled. Small 721 * optimization: If an autosuspend timer is running, leave it running 722 * rather than cancelling it now only to restart it again in the near 723 * future. 724 */ 725 dev->power.request = RPM_REQ_NONE; 726 if (!dev->power.timer_autosuspends) 727 pm_runtime_deactivate_timer(dev); 728 729 if (dev->power.runtime_status == RPM_ACTIVE) { 730 retval = 1; 731 goto out; 732 } 733 734 if (dev->power.runtime_status == RPM_RESUMING 735 || dev->power.runtime_status == RPM_SUSPENDING) { 736 DEFINE_WAIT(wait); 737 738 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 739 if (dev->power.runtime_status == RPM_SUSPENDING) 740 dev->power.deferred_resume = true; 741 else 742 retval = -EINPROGRESS; 743 goto out; 744 } 745 746 if (dev->power.irq_safe) { 747 spin_unlock(&dev->power.lock); 748 749 cpu_relax(); 750 751 spin_lock(&dev->power.lock); 752 goto repeat; 753 } 754 755 /* Wait for the operation carried out in parallel with us. */ 756 for (;;) { 757 prepare_to_wait(&dev->power.wait_queue, &wait, 758 TASK_UNINTERRUPTIBLE); 759 if (dev->power.runtime_status != RPM_RESUMING 760 && dev->power.runtime_status != RPM_SUSPENDING) 761 break; 762 763 spin_unlock_irq(&dev->power.lock); 764 765 schedule(); 766 767 spin_lock_irq(&dev->power.lock); 768 } 769 finish_wait(&dev->power.wait_queue, &wait); 770 goto repeat; 771 } 772 773 /* 774 * See if we can skip waking up the parent. This is safe only if 775 * power.no_callbacks is set, because otherwise we don't know whether 776 * the resume will actually succeed. 777 */ 778 if (dev->power.no_callbacks && !parent && dev->parent) { 779 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); 780 if (dev->parent->power.disable_depth > 0 781 || dev->parent->power.ignore_children 782 || dev->parent->power.runtime_status == RPM_ACTIVE) { 783 atomic_inc(&dev->parent->power.child_count); 784 spin_unlock(&dev->parent->power.lock); 785 retval = 1; 786 goto no_callback; /* Assume success. */ 787 } 788 spin_unlock(&dev->parent->power.lock); 789 } 790 791 /* Carry out an asynchronous or a synchronous resume. */ 792 if (rpmflags & RPM_ASYNC) { 793 dev->power.request = RPM_REQ_RESUME; 794 if (!dev->power.request_pending) { 795 dev->power.request_pending = true; 796 queue_work(pm_wq, &dev->power.work); 797 } 798 retval = 0; 799 goto out; 800 } 801 802 if (!parent && dev->parent) { 803 /* 804 * Increment the parent's usage counter and resume it if 805 * necessary. Not needed if dev is irq-safe; then the 806 * parent is permanently resumed. 807 */ 808 parent = dev->parent; 809 if (dev->power.irq_safe) 810 goto skip_parent; 811 spin_unlock(&dev->power.lock); 812 813 pm_runtime_get_noresume(parent); 814 815 spin_lock(&parent->power.lock); 816 /* 817 * Resume the parent if it has runtime PM enabled and not been 818 * set to ignore its children. 819 */ 820 if (!parent->power.disable_depth 821 && !parent->power.ignore_children) { 822 rpm_resume(parent, 0); 823 if (parent->power.runtime_status != RPM_ACTIVE) 824 retval = -EBUSY; 825 } 826 spin_unlock(&parent->power.lock); 827 828 spin_lock(&dev->power.lock); 829 if (retval) 830 goto out; 831 goto repeat; 832 } 833 skip_parent: 834 835 if (dev->power.no_callbacks) 836 goto no_callback; /* Assume success. */ 837 838 __update_runtime_status(dev, RPM_RESUMING); 839 840 callback = RPM_GET_CALLBACK(dev, runtime_resume); 841 842 dev_pm_disable_wake_irq_check(dev); 843 retval = rpm_callback(callback, dev); 844 if (retval) { 845 __update_runtime_status(dev, RPM_SUSPENDED); 846 pm_runtime_cancel_pending(dev); 847 dev_pm_enable_wake_irq_check(dev, false); 848 } else { 849 no_callback: 850 __update_runtime_status(dev, RPM_ACTIVE); 851 pm_runtime_mark_last_busy(dev); 852 if (parent) 853 atomic_inc(&parent->power.child_count); 854 } 855 wake_up_all(&dev->power.wait_queue); 856 857 if (retval >= 0) 858 rpm_idle(dev, RPM_ASYNC); 859 860 out: 861 if (parent && !dev->power.irq_safe) { 862 spin_unlock_irq(&dev->power.lock); 863 864 pm_runtime_put(parent); 865 866 spin_lock_irq(&dev->power.lock); 867 } 868 869 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 870 871 return retval; 872 } 873 874 /** 875 * pm_runtime_work - Universal runtime PM work function. 876 * @work: Work structure used for scheduling the execution of this function. 877 * 878 * Use @work to get the device object the work is to be done for, determine what 879 * is to be done and execute the appropriate runtime PM function. 880 */ 881 static void pm_runtime_work(struct work_struct *work) 882 { 883 struct device *dev = container_of(work, struct device, power.work); 884 enum rpm_request req; 885 886 spin_lock_irq(&dev->power.lock); 887 888 if (!dev->power.request_pending) 889 goto out; 890 891 req = dev->power.request; 892 dev->power.request = RPM_REQ_NONE; 893 dev->power.request_pending = false; 894 895 switch (req) { 896 case RPM_REQ_NONE: 897 break; 898 case RPM_REQ_IDLE: 899 rpm_idle(dev, RPM_NOWAIT); 900 break; 901 case RPM_REQ_SUSPEND: 902 rpm_suspend(dev, RPM_NOWAIT); 903 break; 904 case RPM_REQ_AUTOSUSPEND: 905 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); 906 break; 907 case RPM_REQ_RESUME: 908 rpm_resume(dev, RPM_NOWAIT); 909 break; 910 } 911 912 out: 913 spin_unlock_irq(&dev->power.lock); 914 } 915 916 /** 917 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 918 * @data: Device pointer passed by pm_schedule_suspend(). 919 * 920 * Check if the time is right and queue a suspend request. 921 */ 922 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) 923 { 924 struct device *dev = container_of(timer, struct device, power.suspend_timer); 925 unsigned long flags; 926 u64 expires; 927 928 spin_lock_irqsave(&dev->power.lock, flags); 929 930 expires = dev->power.timer_expires; 931 /* 932 * If 'expires' is after the current time, we've been called 933 * too early. 934 */ 935 if (expires > 0 && expires < ktime_get_mono_fast_ns()) { 936 dev->power.timer_expires = 0; 937 rpm_suspend(dev, dev->power.timer_autosuspends ? 938 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 939 } 940 941 spin_unlock_irqrestore(&dev->power.lock, flags); 942 943 return HRTIMER_NORESTART; 944 } 945 946 /** 947 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 948 * @dev: Device to suspend. 949 * @delay: Time to wait before submitting a suspend request, in milliseconds. 950 */ 951 int pm_schedule_suspend(struct device *dev, unsigned int delay) 952 { 953 unsigned long flags; 954 u64 expires; 955 int retval; 956 957 spin_lock_irqsave(&dev->power.lock, flags); 958 959 if (!delay) { 960 retval = rpm_suspend(dev, RPM_ASYNC); 961 goto out; 962 } 963 964 retval = rpm_check_suspend_allowed(dev); 965 if (retval) 966 goto out; 967 968 /* Other scheduled or pending requests need to be canceled. */ 969 pm_runtime_cancel_pending(dev); 970 971 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; 972 dev->power.timer_expires = expires; 973 dev->power.timer_autosuspends = 0; 974 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 975 976 out: 977 spin_unlock_irqrestore(&dev->power.lock, flags); 978 979 return retval; 980 } 981 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 982 983 /** 984 * __pm_runtime_idle - Entry point for runtime idle operations. 985 * @dev: Device to send idle notification for. 986 * @rpmflags: Flag bits. 987 * 988 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 989 * return immediately if it is larger than zero. Then carry out an idle 990 * notification, either synchronous or asynchronous. 991 * 992 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 993 * or if pm_runtime_irq_safe() has been called. 994 */ 995 int __pm_runtime_idle(struct device *dev, int rpmflags) 996 { 997 unsigned long flags; 998 int retval; 999 1000 if (rpmflags & RPM_GET_PUT) { 1001 if (!atomic_dec_and_test(&dev->power.usage_count)) 1002 return 0; 1003 } 1004 1005 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1006 1007 spin_lock_irqsave(&dev->power.lock, flags); 1008 retval = rpm_idle(dev, rpmflags); 1009 spin_unlock_irqrestore(&dev->power.lock, flags); 1010 1011 return retval; 1012 } 1013 EXPORT_SYMBOL_GPL(__pm_runtime_idle); 1014 1015 /** 1016 * __pm_runtime_suspend - Entry point for runtime put/suspend operations. 1017 * @dev: Device to suspend. 1018 * @rpmflags: Flag bits. 1019 * 1020 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 1021 * return immediately if it is larger than zero. Then carry out a suspend, 1022 * either synchronous or asynchronous. 1023 * 1024 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1025 * or if pm_runtime_irq_safe() has been called. 1026 */ 1027 int __pm_runtime_suspend(struct device *dev, int rpmflags) 1028 { 1029 unsigned long flags; 1030 int retval; 1031 1032 if (rpmflags & RPM_GET_PUT) { 1033 if (!atomic_dec_and_test(&dev->power.usage_count)) 1034 return 0; 1035 } 1036 1037 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1038 1039 spin_lock_irqsave(&dev->power.lock, flags); 1040 retval = rpm_suspend(dev, rpmflags); 1041 spin_unlock_irqrestore(&dev->power.lock, flags); 1042 1043 return retval; 1044 } 1045 EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 1046 1047 /** 1048 * __pm_runtime_resume - Entry point for runtime resume operations. 1049 * @dev: Device to resume. 1050 * @rpmflags: Flag bits. 1051 * 1052 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 1053 * carry out a resume, either synchronous or asynchronous. 1054 * 1055 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1056 * or if pm_runtime_irq_safe() has been called. 1057 */ 1058 int __pm_runtime_resume(struct device *dev, int rpmflags) 1059 { 1060 unsigned long flags; 1061 int retval; 1062 1063 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && 1064 dev->power.runtime_status != RPM_ACTIVE); 1065 1066 if (rpmflags & RPM_GET_PUT) 1067 atomic_inc(&dev->power.usage_count); 1068 1069 spin_lock_irqsave(&dev->power.lock, flags); 1070 retval = rpm_resume(dev, rpmflags); 1071 spin_unlock_irqrestore(&dev->power.lock, flags); 1072 1073 return retval; 1074 } 1075 EXPORT_SYMBOL_GPL(__pm_runtime_resume); 1076 1077 /** 1078 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. 1079 * @dev: Device to handle. 1080 * 1081 * Return -EINVAL if runtime PM is disabled for the device. 1082 * 1083 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE 1084 * and the runtime PM usage counter is nonzero, increment the counter and 1085 * return 1. Otherwise return 0 without changing the counter. 1086 */ 1087 int pm_runtime_get_if_in_use(struct device *dev) 1088 { 1089 unsigned long flags; 1090 int retval; 1091 1092 spin_lock_irqsave(&dev->power.lock, flags); 1093 retval = dev->power.disable_depth > 0 ? -EINVAL : 1094 dev->power.runtime_status == RPM_ACTIVE 1095 && atomic_inc_not_zero(&dev->power.usage_count); 1096 spin_unlock_irqrestore(&dev->power.lock, flags); 1097 return retval; 1098 } 1099 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); 1100 1101 /** 1102 * __pm_runtime_set_status - Set runtime PM status of a device. 1103 * @dev: Device to handle. 1104 * @status: New runtime PM status of the device. 1105 * 1106 * If runtime PM of the device is disabled or its power.runtime_error field is 1107 * different from zero, the status may be changed either to RPM_ACTIVE, or to 1108 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 1109 * However, if the device has a parent and the parent is not active, and the 1110 * parent's power.ignore_children flag is unset, the device's status cannot be 1111 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 1112 * 1113 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 1114 * and the device parent's counter of unsuspended children is modified to 1115 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 1116 * notification request for the parent is submitted. 1117 */ 1118 int __pm_runtime_set_status(struct device *dev, unsigned int status) 1119 { 1120 struct device *parent = dev->parent; 1121 unsigned long flags; 1122 bool notify_parent = false; 1123 int error = 0; 1124 1125 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 1126 return -EINVAL; 1127 1128 spin_lock_irqsave(&dev->power.lock, flags); 1129 1130 if (!dev->power.runtime_error && !dev->power.disable_depth) { 1131 error = -EAGAIN; 1132 goto out; 1133 } 1134 1135 if (dev->power.runtime_status == status || !parent) 1136 goto out_set; 1137 1138 if (status == RPM_SUSPENDED) { 1139 atomic_add_unless(&parent->power.child_count, -1, 0); 1140 notify_parent = !parent->power.ignore_children; 1141 } else { 1142 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 1143 1144 /* 1145 * It is invalid to put an active child under a parent that is 1146 * not active, has runtime PM enabled and the 1147 * 'power.ignore_children' flag unset. 1148 */ 1149 if (!parent->power.disable_depth 1150 && !parent->power.ignore_children 1151 && parent->power.runtime_status != RPM_ACTIVE) { 1152 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", 1153 dev_name(dev), 1154 dev_name(parent)); 1155 error = -EBUSY; 1156 } else if (dev->power.runtime_status == RPM_SUSPENDED) { 1157 atomic_inc(&parent->power.child_count); 1158 } 1159 1160 spin_unlock(&parent->power.lock); 1161 1162 if (error) 1163 goto out; 1164 } 1165 1166 out_set: 1167 __update_runtime_status(dev, status); 1168 dev->power.runtime_error = 0; 1169 out: 1170 spin_unlock_irqrestore(&dev->power.lock, flags); 1171 1172 if (notify_parent) 1173 pm_request_idle(parent); 1174 1175 return error; 1176 } 1177 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1178 1179 /** 1180 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 1181 * @dev: Device to handle. 1182 * 1183 * Flush all pending requests for the device from pm_wq and wait for all 1184 * runtime PM operations involving the device in progress to complete. 1185 * 1186 * Should be called under dev->power.lock with interrupts disabled. 1187 */ 1188 static void __pm_runtime_barrier(struct device *dev) 1189 { 1190 pm_runtime_deactivate_timer(dev); 1191 1192 if (dev->power.request_pending) { 1193 dev->power.request = RPM_REQ_NONE; 1194 spin_unlock_irq(&dev->power.lock); 1195 1196 cancel_work_sync(&dev->power.work); 1197 1198 spin_lock_irq(&dev->power.lock); 1199 dev->power.request_pending = false; 1200 } 1201 1202 if (dev->power.runtime_status == RPM_SUSPENDING 1203 || dev->power.runtime_status == RPM_RESUMING 1204 || dev->power.idle_notification) { 1205 DEFINE_WAIT(wait); 1206 1207 /* Suspend, wake-up or idle notification in progress. */ 1208 for (;;) { 1209 prepare_to_wait(&dev->power.wait_queue, &wait, 1210 TASK_UNINTERRUPTIBLE); 1211 if (dev->power.runtime_status != RPM_SUSPENDING 1212 && dev->power.runtime_status != RPM_RESUMING 1213 && !dev->power.idle_notification) 1214 break; 1215 spin_unlock_irq(&dev->power.lock); 1216 1217 schedule(); 1218 1219 spin_lock_irq(&dev->power.lock); 1220 } 1221 finish_wait(&dev->power.wait_queue, &wait); 1222 } 1223 } 1224 1225 /** 1226 * pm_runtime_barrier - Flush pending requests and wait for completions. 1227 * @dev: Device to handle. 1228 * 1229 * Prevent the device from being suspended by incrementing its usage counter and 1230 * if there's a pending resume request for the device, wake the device up. 1231 * Next, make sure that all pending requests for the device have been flushed 1232 * from pm_wq and wait for all runtime PM operations involving the device in 1233 * progress to complete. 1234 * 1235 * Return value: 1236 * 1, if there was a resume request pending and the device had to be woken up, 1237 * 0, otherwise 1238 */ 1239 int pm_runtime_barrier(struct device *dev) 1240 { 1241 int retval = 0; 1242 1243 pm_runtime_get_noresume(dev); 1244 spin_lock_irq(&dev->power.lock); 1245 1246 if (dev->power.request_pending 1247 && dev->power.request == RPM_REQ_RESUME) { 1248 rpm_resume(dev, 0); 1249 retval = 1; 1250 } 1251 1252 __pm_runtime_barrier(dev); 1253 1254 spin_unlock_irq(&dev->power.lock); 1255 pm_runtime_put_noidle(dev); 1256 1257 return retval; 1258 } 1259 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 1260 1261 /** 1262 * __pm_runtime_disable - Disable runtime PM of a device. 1263 * @dev: Device to handle. 1264 * @check_resume: If set, check if there's a resume request for the device. 1265 * 1266 * Increment power.disable_depth for the device and if it was zero previously, 1267 * cancel all pending runtime PM requests for the device and wait for all 1268 * operations in progress to complete. The device can be either active or 1269 * suspended after its runtime PM has been disabled. 1270 * 1271 * If @check_resume is set and there's a resume request pending when 1272 * __pm_runtime_disable() is called and power.disable_depth is zero, the 1273 * function will wake up the device before disabling its runtime PM. 1274 */ 1275 void __pm_runtime_disable(struct device *dev, bool check_resume) 1276 { 1277 spin_lock_irq(&dev->power.lock); 1278 1279 if (dev->power.disable_depth > 0) { 1280 dev->power.disable_depth++; 1281 goto out; 1282 } 1283 1284 /* 1285 * Wake up the device if there's a resume request pending, because that 1286 * means there probably is some I/O to process and disabling runtime PM 1287 * shouldn't prevent the device from processing the I/O. 1288 */ 1289 if (check_resume && dev->power.request_pending 1290 && dev->power.request == RPM_REQ_RESUME) { 1291 /* 1292 * Prevent suspends and idle notifications from being carried 1293 * out after we have woken up the device. 1294 */ 1295 pm_runtime_get_noresume(dev); 1296 1297 rpm_resume(dev, 0); 1298 1299 pm_runtime_put_noidle(dev); 1300 } 1301 1302 /* Update time accounting before disabling PM-runtime. */ 1303 update_pm_runtime_accounting(dev); 1304 1305 if (!dev->power.disable_depth++) 1306 __pm_runtime_barrier(dev); 1307 1308 out: 1309 spin_unlock_irq(&dev->power.lock); 1310 } 1311 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1312 1313 /** 1314 * pm_runtime_enable - Enable runtime PM of a device. 1315 * @dev: Device to handle. 1316 */ 1317 void pm_runtime_enable(struct device *dev) 1318 { 1319 unsigned long flags; 1320 1321 spin_lock_irqsave(&dev->power.lock, flags); 1322 1323 if (dev->power.disable_depth > 0) { 1324 dev->power.disable_depth--; 1325 1326 /* About to enable runtime pm, set accounting_timestamp to now */ 1327 if (!dev->power.disable_depth) 1328 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); 1329 } else { 1330 dev_warn(dev, "Unbalanced %s!\n", __func__); 1331 } 1332 1333 WARN(!dev->power.disable_depth && 1334 dev->power.runtime_status == RPM_SUSPENDED && 1335 !dev->power.ignore_children && 1336 atomic_read(&dev->power.child_count) > 0, 1337 "Enabling runtime PM for inactive device (%s) with active children\n", 1338 dev_name(dev)); 1339 1340 spin_unlock_irqrestore(&dev->power.lock, flags); 1341 } 1342 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1343 1344 /** 1345 * pm_runtime_forbid - Block runtime PM of a device. 1346 * @dev: Device to handle. 1347 * 1348 * Increase the device's usage count and clear its power.runtime_auto flag, 1349 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1350 * for it. 1351 */ 1352 void pm_runtime_forbid(struct device *dev) 1353 { 1354 spin_lock_irq(&dev->power.lock); 1355 if (!dev->power.runtime_auto) 1356 goto out; 1357 1358 dev->power.runtime_auto = false; 1359 atomic_inc(&dev->power.usage_count); 1360 rpm_resume(dev, 0); 1361 1362 out: 1363 spin_unlock_irq(&dev->power.lock); 1364 } 1365 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1366 1367 /** 1368 * pm_runtime_allow - Unblock runtime PM of a device. 1369 * @dev: Device to handle. 1370 * 1371 * Decrease the device's usage count and set its power.runtime_auto flag. 1372 */ 1373 void pm_runtime_allow(struct device *dev) 1374 { 1375 spin_lock_irq(&dev->power.lock); 1376 if (dev->power.runtime_auto) 1377 goto out; 1378 1379 dev->power.runtime_auto = true; 1380 if (atomic_dec_and_test(&dev->power.usage_count)) 1381 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); 1382 1383 out: 1384 spin_unlock_irq(&dev->power.lock); 1385 } 1386 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1387 1388 /** 1389 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. 1390 * @dev: Device to handle. 1391 * 1392 * Set the power.no_callbacks flag, which tells the PM core that this 1393 * device is power-managed through its parent and has no runtime PM 1394 * callbacks of its own. The runtime sysfs attributes will be removed. 1395 */ 1396 void pm_runtime_no_callbacks(struct device *dev) 1397 { 1398 spin_lock_irq(&dev->power.lock); 1399 dev->power.no_callbacks = 1; 1400 spin_unlock_irq(&dev->power.lock); 1401 if (device_is_registered(dev)) 1402 rpm_sysfs_remove(dev); 1403 } 1404 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1405 1406 /** 1407 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. 1408 * @dev: Device to handle 1409 * 1410 * Set the power.irq_safe flag, which tells the PM core that the 1411 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should 1412 * always be invoked with the spinlock held and interrupts disabled. It also 1413 * causes the parent's usage counter to be permanently incremented, preventing 1414 * the parent from runtime suspending -- otherwise an irq-safe child might have 1415 * to wait for a non-irq-safe parent. 1416 */ 1417 void pm_runtime_irq_safe(struct device *dev) 1418 { 1419 if (dev->parent) 1420 pm_runtime_get_sync(dev->parent); 1421 spin_lock_irq(&dev->power.lock); 1422 dev->power.irq_safe = 1; 1423 spin_unlock_irq(&dev->power.lock); 1424 } 1425 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); 1426 1427 /** 1428 * update_autosuspend - Handle a change to a device's autosuspend settings. 1429 * @dev: Device to handle. 1430 * @old_delay: The former autosuspend_delay value. 1431 * @old_use: The former use_autosuspend value. 1432 * 1433 * Prevent runtime suspend if the new delay is negative and use_autosuspend is 1434 * set; otherwise allow it. Send an idle notification if suspends are allowed. 1435 * 1436 * This function must be called under dev->power.lock with interrupts disabled. 1437 */ 1438 static void update_autosuspend(struct device *dev, int old_delay, int old_use) 1439 { 1440 int delay = dev->power.autosuspend_delay; 1441 1442 /* Should runtime suspend be prevented now? */ 1443 if (dev->power.use_autosuspend && delay < 0) { 1444 1445 /* If it used to be allowed then prevent it. */ 1446 if (!old_use || old_delay >= 0) { 1447 atomic_inc(&dev->power.usage_count); 1448 rpm_resume(dev, 0); 1449 } 1450 } 1451 1452 /* Runtime suspend should be allowed now. */ 1453 else { 1454 1455 /* If it used to be prevented then allow it. */ 1456 if (old_use && old_delay < 0) 1457 atomic_dec(&dev->power.usage_count); 1458 1459 /* Maybe we can autosuspend now. */ 1460 rpm_idle(dev, RPM_AUTO); 1461 } 1462 } 1463 1464 /** 1465 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. 1466 * @dev: Device to handle. 1467 * @delay: Value of the new delay in milliseconds. 1468 * 1469 * Set the device's power.autosuspend_delay value. If it changes to negative 1470 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it 1471 * changes the other way, allow runtime suspends. 1472 */ 1473 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1474 { 1475 int old_delay, old_use; 1476 1477 spin_lock_irq(&dev->power.lock); 1478 old_delay = dev->power.autosuspend_delay; 1479 old_use = dev->power.use_autosuspend; 1480 dev->power.autosuspend_delay = delay; 1481 update_autosuspend(dev, old_delay, old_use); 1482 spin_unlock_irq(&dev->power.lock); 1483 } 1484 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); 1485 1486 /** 1487 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. 1488 * @dev: Device to handle. 1489 * @use: New value for use_autosuspend. 1490 * 1491 * Set the device's power.use_autosuspend flag, and allow or prevent runtime 1492 * suspends as needed. 1493 */ 1494 void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1495 { 1496 int old_delay, old_use; 1497 1498 spin_lock_irq(&dev->power.lock); 1499 old_delay = dev->power.autosuspend_delay; 1500 old_use = dev->power.use_autosuspend; 1501 dev->power.use_autosuspend = use; 1502 update_autosuspend(dev, old_delay, old_use); 1503 spin_unlock_irq(&dev->power.lock); 1504 } 1505 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1506 1507 /** 1508 * pm_runtime_init - Initialize runtime PM fields in given device object. 1509 * @dev: Device object to initialize. 1510 */ 1511 void pm_runtime_init(struct device *dev) 1512 { 1513 dev->power.runtime_status = RPM_SUSPENDED; 1514 dev->power.idle_notification = false; 1515 1516 dev->power.disable_depth = 1; 1517 atomic_set(&dev->power.usage_count, 0); 1518 1519 dev->power.runtime_error = 0; 1520 1521 atomic_set(&dev->power.child_count, 0); 1522 pm_suspend_ignore_children(dev, false); 1523 dev->power.runtime_auto = true; 1524 1525 dev->power.request_pending = false; 1526 dev->power.request = RPM_REQ_NONE; 1527 dev->power.deferred_resume = false; 1528 INIT_WORK(&dev->power.work, pm_runtime_work); 1529 1530 dev->power.timer_expires = 0; 1531 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1532 dev->power.suspend_timer.function = pm_suspend_timer_fn; 1533 1534 init_waitqueue_head(&dev->power.wait_queue); 1535 } 1536 1537 /** 1538 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. 1539 * @dev: Device object to re-initialize. 1540 */ 1541 void pm_runtime_reinit(struct device *dev) 1542 { 1543 if (!pm_runtime_enabled(dev)) { 1544 if (dev->power.runtime_status == RPM_ACTIVE) 1545 pm_runtime_set_suspended(dev); 1546 if (dev->power.irq_safe) { 1547 spin_lock_irq(&dev->power.lock); 1548 dev->power.irq_safe = 0; 1549 spin_unlock_irq(&dev->power.lock); 1550 if (dev->parent) 1551 pm_runtime_put(dev->parent); 1552 } 1553 } 1554 } 1555 1556 /** 1557 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1558 * @dev: Device object being removed from device hierarchy. 1559 */ 1560 void pm_runtime_remove(struct device *dev) 1561 { 1562 __pm_runtime_disable(dev, false); 1563 pm_runtime_reinit(dev); 1564 } 1565 1566 /** 1567 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. 1568 * @dev: Device whose driver is going to be removed. 1569 * 1570 * Check links from this device to any consumers and if any of them have active 1571 * runtime PM references to the device, drop the usage counter of the device 1572 * (once per link). 1573 * 1574 * Links with the DL_FLAG_STATELESS flag set are ignored. 1575 * 1576 * Since the device is guaranteed to be runtime-active at the point this is 1577 * called, nothing else needs to be done here. 1578 * 1579 * Moreover, this is called after device_links_busy() has returned 'false', so 1580 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and 1581 * therefore rpm_active can't be manipulated concurrently. 1582 */ 1583 void pm_runtime_clean_up_links(struct device *dev) 1584 { 1585 struct device_link *link; 1586 int idx; 1587 1588 idx = device_links_read_lock(); 1589 1590 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { 1591 if (link->flags & DL_FLAG_STATELESS) 1592 continue; 1593 1594 if (link->rpm_active) { 1595 pm_runtime_put_noidle(dev); 1596 link->rpm_active = false; 1597 } 1598 } 1599 1600 device_links_read_unlock(idx); 1601 } 1602 1603 /** 1604 * pm_runtime_get_suppliers - Resume and reference-count supplier devices. 1605 * @dev: Consumer device. 1606 */ 1607 void pm_runtime_get_suppliers(struct device *dev) 1608 { 1609 struct device_link *link; 1610 int idx; 1611 1612 idx = device_links_read_lock(); 1613 1614 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1615 if (link->flags & DL_FLAG_PM_RUNTIME) 1616 pm_runtime_get_sync(link->supplier); 1617 1618 device_links_read_unlock(idx); 1619 } 1620 1621 /** 1622 * pm_runtime_put_suppliers - Drop references to supplier devices. 1623 * @dev: Consumer device. 1624 */ 1625 void pm_runtime_put_suppliers(struct device *dev) 1626 { 1627 struct device_link *link; 1628 int idx; 1629 1630 idx = device_links_read_lock(); 1631 1632 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1633 if (link->flags & DL_FLAG_PM_RUNTIME) 1634 pm_runtime_put(link->supplier); 1635 1636 device_links_read_unlock(idx); 1637 } 1638 1639 void pm_runtime_new_link(struct device *dev) 1640 { 1641 spin_lock_irq(&dev->power.lock); 1642 dev->power.links_count++; 1643 spin_unlock_irq(&dev->power.lock); 1644 } 1645 1646 void pm_runtime_drop_link(struct device *dev) 1647 { 1648 rpm_put_suppliers(dev); 1649 1650 spin_lock_irq(&dev->power.lock); 1651 WARN_ON(dev->power.links_count == 0); 1652 dev->power.links_count--; 1653 spin_unlock_irq(&dev->power.lock); 1654 } 1655 1656 static bool pm_runtime_need_not_resume(struct device *dev) 1657 { 1658 return atomic_read(&dev->power.usage_count) <= 1 && 1659 (atomic_read(&dev->power.child_count) == 0 || 1660 dev->power.ignore_children); 1661 } 1662 1663 /** 1664 * pm_runtime_force_suspend - Force a device into suspend state if needed. 1665 * @dev: Device to suspend. 1666 * 1667 * Disable runtime PM so we safely can check the device's runtime PM status and 1668 * if it is active, invoke its ->runtime_suspend callback to suspend it and 1669 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's 1670 * usage and children counters don't indicate that the device was in use before 1671 * the system-wide transition under way, decrement its parent's children counter 1672 * (if there is a parent). Keep runtime PM disabled to preserve the state 1673 * unless we encounter errors. 1674 * 1675 * Typically this function may be invoked from a system suspend callback to make 1676 * sure the device is put into low power state and it should only be used during 1677 * system-wide PM transitions to sleep states. It assumes that the analogous 1678 * pm_runtime_force_resume() will be used to resume the device. 1679 */ 1680 int pm_runtime_force_suspend(struct device *dev) 1681 { 1682 int (*callback)(struct device *); 1683 int ret; 1684 1685 pm_runtime_disable(dev); 1686 if (pm_runtime_status_suspended(dev)) 1687 return 0; 1688 1689 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 1690 1691 ret = callback ? callback(dev) : 0; 1692 if (ret) 1693 goto err; 1694 1695 /* 1696 * If the device can stay in suspend after the system-wide transition 1697 * to the working state that will follow, drop the children counter of 1698 * its parent, but set its status to RPM_SUSPENDED anyway in case this 1699 * function will be called again for it in the meantime. 1700 */ 1701 if (pm_runtime_need_not_resume(dev)) 1702 pm_runtime_set_suspended(dev); 1703 else 1704 __update_runtime_status(dev, RPM_SUSPENDED); 1705 1706 return 0; 1707 1708 err: 1709 pm_runtime_enable(dev); 1710 return ret; 1711 } 1712 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); 1713 1714 /** 1715 * pm_runtime_force_resume - Force a device into resume state if needed. 1716 * @dev: Device to resume. 1717 * 1718 * Prior invoking this function we expect the user to have brought the device 1719 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse 1720 * those actions and bring the device into full power, if it is expected to be 1721 * used on system resume. In the other case, we defer the resume to be managed 1722 * via runtime PM. 1723 * 1724 * Typically this function may be invoked from a system resume callback. 1725 */ 1726 int pm_runtime_force_resume(struct device *dev) 1727 { 1728 int (*callback)(struct device *); 1729 int ret = 0; 1730 1731 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) 1732 goto out; 1733 1734 /* 1735 * The value of the parent's children counter is correct already, so 1736 * just update the status of the device. 1737 */ 1738 __update_runtime_status(dev, RPM_ACTIVE); 1739 1740 callback = RPM_GET_CALLBACK(dev, runtime_resume); 1741 1742 ret = callback ? callback(dev) : 0; 1743 if (ret) { 1744 pm_runtime_set_suspended(dev); 1745 goto out; 1746 } 1747 1748 pm_runtime_mark_last_busy(dev); 1749 out: 1750 pm_runtime_enable(dev); 1751 return ret; 1752 } 1753 EXPORT_SYMBOL_GPL(pm_runtime_force_resume); 1754