1 /* 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 6 * 7 * This file is released under the GPLv2. 8 */ 9 10 #include <linux/sched/mm.h> 11 #include <linux/ktime.h> 12 #include <linux/hrtimer.h> 13 #include <linux/export.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_wakeirq.h> 16 #include <trace/events/rpm.h> 17 18 #include "../base.h" 19 #include "power.h" 20 21 typedef int (*pm_callback_t)(struct device *); 22 23 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) 24 { 25 pm_callback_t cb; 26 const struct dev_pm_ops *ops; 27 28 if (dev->pm_domain) 29 ops = &dev->pm_domain->ops; 30 else if (dev->type && dev->type->pm) 31 ops = dev->type->pm; 32 else if (dev->class && dev->class->pm) 33 ops = dev->class->pm; 34 else if (dev->bus && dev->bus->pm) 35 ops = dev->bus->pm; 36 else 37 ops = NULL; 38 39 if (ops) 40 cb = *(pm_callback_t *)((void *)ops + cb_offset); 41 else 42 cb = NULL; 43 44 if (!cb && dev->driver && dev->driver->pm) 45 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); 46 47 return cb; 48 } 49 50 #define RPM_GET_CALLBACK(dev, callback) \ 51 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) 52 53 static int rpm_resume(struct device *dev, int rpmflags); 54 static int rpm_suspend(struct device *dev, int rpmflags); 55 56 /** 57 * update_pm_runtime_accounting - Update the time accounting of power states 58 * @dev: Device to update the accounting for 59 * 60 * In order to be able to have time accounting of the various power states 61 * (as used by programs such as PowerTOP to show the effectiveness of runtime 62 * PM), we need to track the time spent in each state. 63 * update_pm_runtime_accounting must be called each time before the 64 * runtime_status field is updated, to account the time in the old state 65 * correctly. 66 */ 67 static void update_pm_runtime_accounting(struct device *dev) 68 { 69 u64 now, last, delta; 70 71 if (dev->power.disable_depth > 0) 72 return; 73 74 last = dev->power.accounting_timestamp; 75 76 now = ktime_get_mono_fast_ns(); 77 dev->power.accounting_timestamp = now; 78 79 /* 80 * Because ktime_get_mono_fast_ns() is not monotonic during 81 * timekeeping updates, ensure that 'now' is after the last saved 82 * timesptamp. 83 */ 84 if (now < last) 85 return; 86 87 delta = now - last; 88 89 if (dev->power.runtime_status == RPM_SUSPENDED) 90 dev->power.suspended_time += delta; 91 else 92 dev->power.active_time += delta; 93 } 94 95 static void __update_runtime_status(struct device *dev, enum rpm_status status) 96 { 97 update_pm_runtime_accounting(dev); 98 dev->power.runtime_status = status; 99 } 100 101 static u64 rpm_get_accounted_time(struct device *dev, bool suspended) 102 { 103 u64 time; 104 unsigned long flags; 105 106 spin_lock_irqsave(&dev->power.lock, flags); 107 108 update_pm_runtime_accounting(dev); 109 time = suspended ? dev->power.suspended_time : dev->power.active_time; 110 111 spin_unlock_irqrestore(&dev->power.lock, flags); 112 113 return time; 114 } 115 116 u64 pm_runtime_active_time(struct device *dev) 117 { 118 return rpm_get_accounted_time(dev, false); 119 } 120 121 u64 pm_runtime_suspended_time(struct device *dev) 122 { 123 return rpm_get_accounted_time(dev, true); 124 } 125 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); 126 127 /** 128 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 129 * @dev: Device to handle. 130 */ 131 static void pm_runtime_deactivate_timer(struct device *dev) 132 { 133 if (dev->power.timer_expires > 0) { 134 hrtimer_try_to_cancel(&dev->power.suspend_timer); 135 dev->power.timer_expires = 0; 136 } 137 } 138 139 /** 140 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 141 * @dev: Device to handle. 142 */ 143 static void pm_runtime_cancel_pending(struct device *dev) 144 { 145 pm_runtime_deactivate_timer(dev); 146 /* 147 * In case there's a request pending, make sure its work function will 148 * return without doing anything. 149 */ 150 dev->power.request = RPM_REQ_NONE; 151 } 152 153 /* 154 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. 155 * @dev: Device to handle. 156 * 157 * Compute the autosuspend-delay expiration time based on the device's 158 * power.last_busy time. If the delay has already expired or is disabled 159 * (negative) or the power.use_autosuspend flag isn't set, return 0. 160 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). 161 * 162 * This function may be called either with or without dev->power.lock held. 163 * Either way it can be racy, since power.last_busy may be updated at any time. 164 */ 165 u64 pm_runtime_autosuspend_expiration(struct device *dev) 166 { 167 int autosuspend_delay; 168 u64 expires; 169 170 if (!dev->power.use_autosuspend) 171 return 0; 172 173 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); 174 if (autosuspend_delay < 0) 175 return 0; 176 177 expires = READ_ONCE(dev->power.last_busy); 178 expires += (u64)autosuspend_delay * NSEC_PER_MSEC; 179 if (expires > ktime_get_mono_fast_ns()) 180 return expires; /* Expires in the future */ 181 182 return 0; 183 } 184 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 185 186 static int dev_memalloc_noio(struct device *dev, void *data) 187 { 188 return dev->power.memalloc_noio; 189 } 190 191 /* 192 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. 193 * @dev: Device to handle. 194 * @enable: True for setting the flag and False for clearing the flag. 195 * 196 * Set the flag for all devices in the path from the device to the 197 * root device in the device tree if @enable is true, otherwise clear 198 * the flag for devices in the path whose siblings don't set the flag. 199 * 200 * The function should only be called by block device, or network 201 * device driver for solving the deadlock problem during runtime 202 * resume/suspend: 203 * 204 * If memory allocation with GFP_KERNEL is called inside runtime 205 * resume/suspend callback of any one of its ancestors(or the 206 * block device itself), the deadlock may be triggered inside the 207 * memory allocation since it might not complete until the block 208 * device becomes active and the involed page I/O finishes. The 209 * situation is pointed out first by Alan Stern. Network device 210 * are involved in iSCSI kind of situation. 211 * 212 * The lock of dev_hotplug_mutex is held in the function for handling 213 * hotplug race because pm_runtime_set_memalloc_noio() may be called 214 * in async probe(). 215 * 216 * The function should be called between device_add() and device_del() 217 * on the affected device(block/network device). 218 */ 219 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) 220 { 221 static DEFINE_MUTEX(dev_hotplug_mutex); 222 223 mutex_lock(&dev_hotplug_mutex); 224 for (;;) { 225 bool enabled; 226 227 /* hold power lock since bitfield is not SMP-safe. */ 228 spin_lock_irq(&dev->power.lock); 229 enabled = dev->power.memalloc_noio; 230 dev->power.memalloc_noio = enable; 231 spin_unlock_irq(&dev->power.lock); 232 233 /* 234 * not need to enable ancestors any more if the device 235 * has been enabled. 236 */ 237 if (enabled && enable) 238 break; 239 240 dev = dev->parent; 241 242 /* 243 * clear flag of the parent device only if all the 244 * children don't set the flag because ancestor's 245 * flag was set by any one of the descendants. 246 */ 247 if (!dev || (!enable && 248 device_for_each_child(dev, NULL, 249 dev_memalloc_noio))) 250 break; 251 } 252 mutex_unlock(&dev_hotplug_mutex); 253 } 254 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); 255 256 /** 257 * rpm_check_suspend_allowed - Test whether a device may be suspended. 258 * @dev: Device to test. 259 */ 260 static int rpm_check_suspend_allowed(struct device *dev) 261 { 262 int retval = 0; 263 264 if (dev->power.runtime_error) 265 retval = -EINVAL; 266 else if (dev->power.disable_depth > 0) 267 retval = -EACCES; 268 else if (atomic_read(&dev->power.usage_count) > 0) 269 retval = -EAGAIN; 270 else if (!dev->power.ignore_children && 271 atomic_read(&dev->power.child_count)) 272 retval = -EBUSY; 273 274 /* Pending resume requests take precedence over suspends. */ 275 else if ((dev->power.deferred_resume 276 && dev->power.runtime_status == RPM_SUSPENDING) 277 || (dev->power.request_pending 278 && dev->power.request == RPM_REQ_RESUME)) 279 retval = -EAGAIN; 280 else if (__dev_pm_qos_read_value(dev) == 0) 281 retval = -EPERM; 282 else if (dev->power.runtime_status == RPM_SUSPENDED) 283 retval = 1; 284 285 return retval; 286 } 287 288 static int rpm_get_suppliers(struct device *dev) 289 { 290 struct device_link *link; 291 292 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { 293 int retval; 294 295 if (!(link->flags & DL_FLAG_PM_RUNTIME)) 296 continue; 297 298 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND || 299 link->rpm_active) 300 continue; 301 302 retval = pm_runtime_get_sync(link->supplier); 303 /* Ignore suppliers with disabled runtime PM. */ 304 if (retval < 0 && retval != -EACCES) { 305 pm_runtime_put_noidle(link->supplier); 306 return retval; 307 } 308 link->rpm_active = true; 309 } 310 return 0; 311 } 312 313 static void rpm_put_suppliers(struct device *dev) 314 { 315 struct device_link *link; 316 317 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 318 if (link->rpm_active && 319 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { 320 pm_runtime_put(link->supplier); 321 link->rpm_active = false; 322 } 323 } 324 325 /** 326 * __rpm_callback - Run a given runtime PM callback for a given device. 327 * @cb: Runtime PM callback to run. 328 * @dev: Device to run the callback for. 329 */ 330 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) 331 __releases(&dev->power.lock) __acquires(&dev->power.lock) 332 { 333 int retval, idx; 334 bool use_links = dev->power.links_count > 0; 335 336 if (dev->power.irq_safe) { 337 spin_unlock(&dev->power.lock); 338 } else { 339 spin_unlock_irq(&dev->power.lock); 340 341 /* 342 * Resume suppliers if necessary. 343 * 344 * The device's runtime PM status cannot change until this 345 * routine returns, so it is safe to read the status outside of 346 * the lock. 347 */ 348 if (use_links && dev->power.runtime_status == RPM_RESUMING) { 349 idx = device_links_read_lock(); 350 351 retval = rpm_get_suppliers(dev); 352 if (retval) 353 goto fail; 354 355 device_links_read_unlock(idx); 356 } 357 } 358 359 retval = cb(dev); 360 361 if (dev->power.irq_safe) { 362 spin_lock(&dev->power.lock); 363 } else { 364 /* 365 * If the device is suspending and the callback has returned 366 * success, drop the usage counters of the suppliers that have 367 * been reference counted on its resume. 368 * 369 * Do that if resume fails too. 370 */ 371 if (use_links 372 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) 373 || (dev->power.runtime_status == RPM_RESUMING && retval))) { 374 idx = device_links_read_lock(); 375 376 fail: 377 rpm_put_suppliers(dev); 378 379 device_links_read_unlock(idx); 380 } 381 382 spin_lock_irq(&dev->power.lock); 383 } 384 385 return retval; 386 } 387 388 /** 389 * rpm_idle - Notify device bus type if the device can be suspended. 390 * @dev: Device to notify the bus type about. 391 * @rpmflags: Flag bits. 392 * 393 * Check if the device's runtime PM status allows it to be suspended. If 394 * another idle notification has been started earlier, return immediately. If 395 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 396 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback 397 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. 398 * 399 * This function must be called under dev->power.lock with interrupts disabled. 400 */ 401 static int rpm_idle(struct device *dev, int rpmflags) 402 { 403 int (*callback)(struct device *); 404 int retval; 405 406 trace_rpm_idle_rcuidle(dev, rpmflags); 407 retval = rpm_check_suspend_allowed(dev); 408 if (retval < 0) 409 ; /* Conditions are wrong. */ 410 411 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ 412 else if (dev->power.runtime_status != RPM_ACTIVE) 413 retval = -EAGAIN; 414 415 /* 416 * Any pending request other than an idle notification takes 417 * precedence over us, except that the timer may be running. 418 */ 419 else if (dev->power.request_pending && 420 dev->power.request > RPM_REQ_IDLE) 421 retval = -EAGAIN; 422 423 /* Act as though RPM_NOWAIT is always set. */ 424 else if (dev->power.idle_notification) 425 retval = -EINPROGRESS; 426 if (retval) 427 goto out; 428 429 /* Pending requests need to be canceled. */ 430 dev->power.request = RPM_REQ_NONE; 431 432 if (dev->power.no_callbacks) 433 goto out; 434 435 /* Carry out an asynchronous or a synchronous idle notification. */ 436 if (rpmflags & RPM_ASYNC) { 437 dev->power.request = RPM_REQ_IDLE; 438 if (!dev->power.request_pending) { 439 dev->power.request_pending = true; 440 queue_work(pm_wq, &dev->power.work); 441 } 442 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); 443 return 0; 444 } 445 446 dev->power.idle_notification = true; 447 448 callback = RPM_GET_CALLBACK(dev, runtime_idle); 449 450 if (callback) 451 retval = __rpm_callback(callback, dev); 452 453 dev->power.idle_notification = false; 454 wake_up_all(&dev->power.wait_queue); 455 456 out: 457 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 458 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); 459 } 460 461 /** 462 * rpm_callback - Run a given runtime PM callback for a given device. 463 * @cb: Runtime PM callback to run. 464 * @dev: Device to run the callback for. 465 */ 466 static int rpm_callback(int (*cb)(struct device *), struct device *dev) 467 { 468 int retval; 469 470 if (!cb) 471 return -ENOSYS; 472 473 if (dev->power.memalloc_noio) { 474 unsigned int noio_flag; 475 476 /* 477 * Deadlock might be caused if memory allocation with 478 * GFP_KERNEL happens inside runtime_suspend and 479 * runtime_resume callbacks of one block device's 480 * ancestor or the block device itself. Network 481 * device might be thought as part of iSCSI block 482 * device, so network device and its ancestor should 483 * be marked as memalloc_noio too. 484 */ 485 noio_flag = memalloc_noio_save(); 486 retval = __rpm_callback(cb, dev); 487 memalloc_noio_restore(noio_flag); 488 } else { 489 retval = __rpm_callback(cb, dev); 490 } 491 492 dev->power.runtime_error = retval; 493 return retval != -EACCES ? retval : -EIO; 494 } 495 496 /** 497 * rpm_suspend - Carry out runtime suspend of given device. 498 * @dev: Device to suspend. 499 * @rpmflags: Flag bits. 500 * 501 * Check if the device's runtime PM status allows it to be suspended. 502 * Cancel a pending idle notification, autosuspend or suspend. If 503 * another suspend has been started earlier, either return immediately 504 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC 505 * flags. If the RPM_ASYNC flag is set then queue a suspend request; 506 * otherwise run the ->runtime_suspend() callback directly. When 507 * ->runtime_suspend succeeded, if a deferred resume was requested while 508 * the callback was running then carry it out, otherwise send an idle 509 * notification for its parent (if the suspend succeeded and both 510 * ignore_children of parent->power and irq_safe of dev->power are not set). 511 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO 512 * flag is set and the next autosuspend-delay expiration time is in the 513 * future, schedule another autosuspend attempt. 514 * 515 * This function must be called under dev->power.lock with interrupts disabled. 516 */ 517 static int rpm_suspend(struct device *dev, int rpmflags) 518 __releases(&dev->power.lock) __acquires(&dev->power.lock) 519 { 520 int (*callback)(struct device *); 521 struct device *parent = NULL; 522 int retval; 523 524 trace_rpm_suspend_rcuidle(dev, rpmflags); 525 526 repeat: 527 retval = rpm_check_suspend_allowed(dev); 528 529 if (retval < 0) 530 ; /* Conditions are wrong. */ 531 532 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ 533 else if (dev->power.runtime_status == RPM_RESUMING && 534 !(rpmflags & RPM_ASYNC)) 535 retval = -EAGAIN; 536 if (retval) 537 goto out; 538 539 /* If the autosuspend_delay time hasn't expired yet, reschedule. */ 540 if ((rpmflags & RPM_AUTO) 541 && dev->power.runtime_status != RPM_SUSPENDING) { 542 u64 expires = pm_runtime_autosuspend_expiration(dev); 543 544 if (expires != 0) { 545 /* Pending requests need to be canceled. */ 546 dev->power.request = RPM_REQ_NONE; 547 548 /* 549 * Optimization: If the timer is already running and is 550 * set to expire at or before the autosuspend delay, 551 * avoid the overhead of resetting it. Just let it 552 * expire; pm_suspend_timer_fn() will take care of the 553 * rest. 554 */ 555 if (!(dev->power.timer_expires && 556 dev->power.timer_expires <= expires)) { 557 /* 558 * We add a slack of 25% to gather wakeups 559 * without sacrificing the granularity. 560 */ 561 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * 562 (NSEC_PER_MSEC >> 2); 563 564 dev->power.timer_expires = expires; 565 hrtimer_start_range_ns(&dev->power.suspend_timer, 566 ns_to_ktime(expires), 567 slack, 568 HRTIMER_MODE_ABS); 569 } 570 dev->power.timer_autosuspends = 1; 571 goto out; 572 } 573 } 574 575 /* Other scheduled or pending requests need to be canceled. */ 576 pm_runtime_cancel_pending(dev); 577 578 if (dev->power.runtime_status == RPM_SUSPENDING) { 579 DEFINE_WAIT(wait); 580 581 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 582 retval = -EINPROGRESS; 583 goto out; 584 } 585 586 if (dev->power.irq_safe) { 587 spin_unlock(&dev->power.lock); 588 589 cpu_relax(); 590 591 spin_lock(&dev->power.lock); 592 goto repeat; 593 } 594 595 /* Wait for the other suspend running in parallel with us. */ 596 for (;;) { 597 prepare_to_wait(&dev->power.wait_queue, &wait, 598 TASK_UNINTERRUPTIBLE); 599 if (dev->power.runtime_status != RPM_SUSPENDING) 600 break; 601 602 spin_unlock_irq(&dev->power.lock); 603 604 schedule(); 605 606 spin_lock_irq(&dev->power.lock); 607 } 608 finish_wait(&dev->power.wait_queue, &wait); 609 goto repeat; 610 } 611 612 if (dev->power.no_callbacks) 613 goto no_callback; /* Assume success. */ 614 615 /* Carry out an asynchronous or a synchronous suspend. */ 616 if (rpmflags & RPM_ASYNC) { 617 dev->power.request = (rpmflags & RPM_AUTO) ? 618 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; 619 if (!dev->power.request_pending) { 620 dev->power.request_pending = true; 621 queue_work(pm_wq, &dev->power.work); 622 } 623 goto out; 624 } 625 626 __update_runtime_status(dev, RPM_SUSPENDING); 627 628 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 629 630 dev_pm_enable_wake_irq_check(dev, true); 631 retval = rpm_callback(callback, dev); 632 if (retval) 633 goto fail; 634 635 no_callback: 636 __update_runtime_status(dev, RPM_SUSPENDED); 637 pm_runtime_deactivate_timer(dev); 638 639 if (dev->parent) { 640 parent = dev->parent; 641 atomic_add_unless(&parent->power.child_count, -1, 0); 642 } 643 wake_up_all(&dev->power.wait_queue); 644 645 if (dev->power.deferred_resume) { 646 dev->power.deferred_resume = false; 647 rpm_resume(dev, 0); 648 retval = -EAGAIN; 649 goto out; 650 } 651 652 /* Maybe the parent is now able to suspend. */ 653 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 654 spin_unlock(&dev->power.lock); 655 656 spin_lock(&parent->power.lock); 657 rpm_idle(parent, RPM_ASYNC); 658 spin_unlock(&parent->power.lock); 659 660 spin_lock(&dev->power.lock); 661 } 662 663 out: 664 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 665 666 return retval; 667 668 fail: 669 dev_pm_disable_wake_irq_check(dev); 670 __update_runtime_status(dev, RPM_ACTIVE); 671 dev->power.deferred_resume = false; 672 wake_up_all(&dev->power.wait_queue); 673 674 if (retval == -EAGAIN || retval == -EBUSY) { 675 dev->power.runtime_error = 0; 676 677 /* 678 * If the callback routine failed an autosuspend, and 679 * if the last_busy time has been updated so that there 680 * is a new autosuspend expiration time, automatically 681 * reschedule another autosuspend. 682 */ 683 if ((rpmflags & RPM_AUTO) && 684 pm_runtime_autosuspend_expiration(dev) != 0) 685 goto repeat; 686 } else { 687 pm_runtime_cancel_pending(dev); 688 } 689 goto out; 690 } 691 692 /** 693 * rpm_resume - Carry out runtime resume of given device. 694 * @dev: Device to resume. 695 * @rpmflags: Flag bits. 696 * 697 * Check if the device's runtime PM status allows it to be resumed. Cancel 698 * any scheduled or pending requests. If another resume has been started 699 * earlier, either return immediately or wait for it to finish, depending on the 700 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 701 * parallel with this function, either tell the other process to resume after 702 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC 703 * flag is set then queue a resume request; otherwise run the 704 * ->runtime_resume() callback directly. Queue an idle notification for the 705 * device if the resume succeeded. 706 * 707 * This function must be called under dev->power.lock with interrupts disabled. 708 */ 709 static int rpm_resume(struct device *dev, int rpmflags) 710 __releases(&dev->power.lock) __acquires(&dev->power.lock) 711 { 712 int (*callback)(struct device *); 713 struct device *parent = NULL; 714 int retval = 0; 715 716 trace_rpm_resume_rcuidle(dev, rpmflags); 717 718 repeat: 719 if (dev->power.runtime_error) 720 retval = -EINVAL; 721 else if (dev->power.disable_depth == 1 && dev->power.is_suspended 722 && dev->power.runtime_status == RPM_ACTIVE) 723 retval = 1; 724 else if (dev->power.disable_depth > 0) 725 retval = -EACCES; 726 if (retval) 727 goto out; 728 729 /* 730 * Other scheduled or pending requests need to be canceled. Small 731 * optimization: If an autosuspend timer is running, leave it running 732 * rather than cancelling it now only to restart it again in the near 733 * future. 734 */ 735 dev->power.request = RPM_REQ_NONE; 736 if (!dev->power.timer_autosuspends) 737 pm_runtime_deactivate_timer(dev); 738 739 if (dev->power.runtime_status == RPM_ACTIVE) { 740 retval = 1; 741 goto out; 742 } 743 744 if (dev->power.runtime_status == RPM_RESUMING 745 || dev->power.runtime_status == RPM_SUSPENDING) { 746 DEFINE_WAIT(wait); 747 748 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { 749 if (dev->power.runtime_status == RPM_SUSPENDING) 750 dev->power.deferred_resume = true; 751 else 752 retval = -EINPROGRESS; 753 goto out; 754 } 755 756 if (dev->power.irq_safe) { 757 spin_unlock(&dev->power.lock); 758 759 cpu_relax(); 760 761 spin_lock(&dev->power.lock); 762 goto repeat; 763 } 764 765 /* Wait for the operation carried out in parallel with us. */ 766 for (;;) { 767 prepare_to_wait(&dev->power.wait_queue, &wait, 768 TASK_UNINTERRUPTIBLE); 769 if (dev->power.runtime_status != RPM_RESUMING 770 && dev->power.runtime_status != RPM_SUSPENDING) 771 break; 772 773 spin_unlock_irq(&dev->power.lock); 774 775 schedule(); 776 777 spin_lock_irq(&dev->power.lock); 778 } 779 finish_wait(&dev->power.wait_queue, &wait); 780 goto repeat; 781 } 782 783 /* 784 * See if we can skip waking up the parent. This is safe only if 785 * power.no_callbacks is set, because otherwise we don't know whether 786 * the resume will actually succeed. 787 */ 788 if (dev->power.no_callbacks && !parent && dev->parent) { 789 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); 790 if (dev->parent->power.disable_depth > 0 791 || dev->parent->power.ignore_children 792 || dev->parent->power.runtime_status == RPM_ACTIVE) { 793 atomic_inc(&dev->parent->power.child_count); 794 spin_unlock(&dev->parent->power.lock); 795 retval = 1; 796 goto no_callback; /* Assume success. */ 797 } 798 spin_unlock(&dev->parent->power.lock); 799 } 800 801 /* Carry out an asynchronous or a synchronous resume. */ 802 if (rpmflags & RPM_ASYNC) { 803 dev->power.request = RPM_REQ_RESUME; 804 if (!dev->power.request_pending) { 805 dev->power.request_pending = true; 806 queue_work(pm_wq, &dev->power.work); 807 } 808 retval = 0; 809 goto out; 810 } 811 812 if (!parent && dev->parent) { 813 /* 814 * Increment the parent's usage counter and resume it if 815 * necessary. Not needed if dev is irq-safe; then the 816 * parent is permanently resumed. 817 */ 818 parent = dev->parent; 819 if (dev->power.irq_safe) 820 goto skip_parent; 821 spin_unlock(&dev->power.lock); 822 823 pm_runtime_get_noresume(parent); 824 825 spin_lock(&parent->power.lock); 826 /* 827 * Resume the parent if it has runtime PM enabled and not been 828 * set to ignore its children. 829 */ 830 if (!parent->power.disable_depth 831 && !parent->power.ignore_children) { 832 rpm_resume(parent, 0); 833 if (parent->power.runtime_status != RPM_ACTIVE) 834 retval = -EBUSY; 835 } 836 spin_unlock(&parent->power.lock); 837 838 spin_lock(&dev->power.lock); 839 if (retval) 840 goto out; 841 goto repeat; 842 } 843 skip_parent: 844 845 if (dev->power.no_callbacks) 846 goto no_callback; /* Assume success. */ 847 848 __update_runtime_status(dev, RPM_RESUMING); 849 850 callback = RPM_GET_CALLBACK(dev, runtime_resume); 851 852 dev_pm_disable_wake_irq_check(dev); 853 retval = rpm_callback(callback, dev); 854 if (retval) { 855 __update_runtime_status(dev, RPM_SUSPENDED); 856 pm_runtime_cancel_pending(dev); 857 dev_pm_enable_wake_irq_check(dev, false); 858 } else { 859 no_callback: 860 __update_runtime_status(dev, RPM_ACTIVE); 861 pm_runtime_mark_last_busy(dev); 862 if (parent) 863 atomic_inc(&parent->power.child_count); 864 } 865 wake_up_all(&dev->power.wait_queue); 866 867 if (retval >= 0) 868 rpm_idle(dev, RPM_ASYNC); 869 870 out: 871 if (parent && !dev->power.irq_safe) { 872 spin_unlock_irq(&dev->power.lock); 873 874 pm_runtime_put(parent); 875 876 spin_lock_irq(&dev->power.lock); 877 } 878 879 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); 880 881 return retval; 882 } 883 884 /** 885 * pm_runtime_work - Universal runtime PM work function. 886 * @work: Work structure used for scheduling the execution of this function. 887 * 888 * Use @work to get the device object the work is to be done for, determine what 889 * is to be done and execute the appropriate runtime PM function. 890 */ 891 static void pm_runtime_work(struct work_struct *work) 892 { 893 struct device *dev = container_of(work, struct device, power.work); 894 enum rpm_request req; 895 896 spin_lock_irq(&dev->power.lock); 897 898 if (!dev->power.request_pending) 899 goto out; 900 901 req = dev->power.request; 902 dev->power.request = RPM_REQ_NONE; 903 dev->power.request_pending = false; 904 905 switch (req) { 906 case RPM_REQ_NONE: 907 break; 908 case RPM_REQ_IDLE: 909 rpm_idle(dev, RPM_NOWAIT); 910 break; 911 case RPM_REQ_SUSPEND: 912 rpm_suspend(dev, RPM_NOWAIT); 913 break; 914 case RPM_REQ_AUTOSUSPEND: 915 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); 916 break; 917 case RPM_REQ_RESUME: 918 rpm_resume(dev, RPM_NOWAIT); 919 break; 920 } 921 922 out: 923 spin_unlock_irq(&dev->power.lock); 924 } 925 926 /** 927 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 928 * @data: Device pointer passed by pm_schedule_suspend(). 929 * 930 * Check if the time is right and queue a suspend request. 931 */ 932 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) 933 { 934 struct device *dev = container_of(timer, struct device, power.suspend_timer); 935 unsigned long flags; 936 u64 expires; 937 938 spin_lock_irqsave(&dev->power.lock, flags); 939 940 expires = dev->power.timer_expires; 941 /* 942 * If 'expires' is after the current time, we've been called 943 * too early. 944 */ 945 if (expires > 0 && expires < ktime_get_mono_fast_ns()) { 946 dev->power.timer_expires = 0; 947 rpm_suspend(dev, dev->power.timer_autosuspends ? 948 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 949 } 950 951 spin_unlock_irqrestore(&dev->power.lock, flags); 952 953 return HRTIMER_NORESTART; 954 } 955 956 /** 957 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 958 * @dev: Device to suspend. 959 * @delay: Time to wait before submitting a suspend request, in milliseconds. 960 */ 961 int pm_schedule_suspend(struct device *dev, unsigned int delay) 962 { 963 unsigned long flags; 964 u64 expires; 965 int retval; 966 967 spin_lock_irqsave(&dev->power.lock, flags); 968 969 if (!delay) { 970 retval = rpm_suspend(dev, RPM_ASYNC); 971 goto out; 972 } 973 974 retval = rpm_check_suspend_allowed(dev); 975 if (retval) 976 goto out; 977 978 /* Other scheduled or pending requests need to be canceled. */ 979 pm_runtime_cancel_pending(dev); 980 981 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; 982 dev->power.timer_expires = expires; 983 dev->power.timer_autosuspends = 0; 984 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 985 986 out: 987 spin_unlock_irqrestore(&dev->power.lock, flags); 988 989 return retval; 990 } 991 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 992 993 /** 994 * __pm_runtime_idle - Entry point for runtime idle operations. 995 * @dev: Device to send idle notification for. 996 * @rpmflags: Flag bits. 997 * 998 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 999 * return immediately if it is larger than zero. Then carry out an idle 1000 * notification, either synchronous or asynchronous. 1001 * 1002 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1003 * or if pm_runtime_irq_safe() has been called. 1004 */ 1005 int __pm_runtime_idle(struct device *dev, int rpmflags) 1006 { 1007 unsigned long flags; 1008 int retval; 1009 1010 if (rpmflags & RPM_GET_PUT) { 1011 if (!atomic_dec_and_test(&dev->power.usage_count)) 1012 return 0; 1013 } 1014 1015 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1016 1017 spin_lock_irqsave(&dev->power.lock, flags); 1018 retval = rpm_idle(dev, rpmflags); 1019 spin_unlock_irqrestore(&dev->power.lock, flags); 1020 1021 return retval; 1022 } 1023 EXPORT_SYMBOL_GPL(__pm_runtime_idle); 1024 1025 /** 1026 * __pm_runtime_suspend - Entry point for runtime put/suspend operations. 1027 * @dev: Device to suspend. 1028 * @rpmflags: Flag bits. 1029 * 1030 * If the RPM_GET_PUT flag is set, decrement the device's usage count and 1031 * return immediately if it is larger than zero. Then carry out a suspend, 1032 * either synchronous or asynchronous. 1033 * 1034 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1035 * or if pm_runtime_irq_safe() has been called. 1036 */ 1037 int __pm_runtime_suspend(struct device *dev, int rpmflags) 1038 { 1039 unsigned long flags; 1040 int retval; 1041 1042 if (rpmflags & RPM_GET_PUT) { 1043 if (!atomic_dec_and_test(&dev->power.usage_count)) 1044 return 0; 1045 } 1046 1047 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1048 1049 spin_lock_irqsave(&dev->power.lock, flags); 1050 retval = rpm_suspend(dev, rpmflags); 1051 spin_unlock_irqrestore(&dev->power.lock, flags); 1052 1053 return retval; 1054 } 1055 EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 1056 1057 /** 1058 * __pm_runtime_resume - Entry point for runtime resume operations. 1059 * @dev: Device to resume. 1060 * @rpmflags: Flag bits. 1061 * 1062 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then 1063 * carry out a resume, either synchronous or asynchronous. 1064 * 1065 * This routine may be called in atomic context if the RPM_ASYNC flag is set, 1066 * or if pm_runtime_irq_safe() has been called. 1067 */ 1068 int __pm_runtime_resume(struct device *dev, int rpmflags) 1069 { 1070 unsigned long flags; 1071 int retval; 1072 1073 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && 1074 dev->power.runtime_status != RPM_ACTIVE); 1075 1076 if (rpmflags & RPM_GET_PUT) 1077 atomic_inc(&dev->power.usage_count); 1078 1079 spin_lock_irqsave(&dev->power.lock, flags); 1080 retval = rpm_resume(dev, rpmflags); 1081 spin_unlock_irqrestore(&dev->power.lock, flags); 1082 1083 return retval; 1084 } 1085 EXPORT_SYMBOL_GPL(__pm_runtime_resume); 1086 1087 /** 1088 * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. 1089 * @dev: Device to handle. 1090 * 1091 * Return -EINVAL if runtime PM is disabled for the device. 1092 * 1093 * If that's not the case and if the device's runtime PM status is RPM_ACTIVE 1094 * and the runtime PM usage counter is nonzero, increment the counter and 1095 * return 1. Otherwise return 0 without changing the counter. 1096 */ 1097 int pm_runtime_get_if_in_use(struct device *dev) 1098 { 1099 unsigned long flags; 1100 int retval; 1101 1102 spin_lock_irqsave(&dev->power.lock, flags); 1103 retval = dev->power.disable_depth > 0 ? -EINVAL : 1104 dev->power.runtime_status == RPM_ACTIVE 1105 && atomic_inc_not_zero(&dev->power.usage_count); 1106 spin_unlock_irqrestore(&dev->power.lock, flags); 1107 return retval; 1108 } 1109 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); 1110 1111 /** 1112 * __pm_runtime_set_status - Set runtime PM status of a device. 1113 * @dev: Device to handle. 1114 * @status: New runtime PM status of the device. 1115 * 1116 * If runtime PM of the device is disabled or its power.runtime_error field is 1117 * different from zero, the status may be changed either to RPM_ACTIVE, or to 1118 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 1119 * However, if the device has a parent and the parent is not active, and the 1120 * parent's power.ignore_children flag is unset, the device's status cannot be 1121 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 1122 * 1123 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 1124 * and the device parent's counter of unsuspended children is modified to 1125 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 1126 * notification request for the parent is submitted. 1127 */ 1128 int __pm_runtime_set_status(struct device *dev, unsigned int status) 1129 { 1130 struct device *parent = dev->parent; 1131 unsigned long flags; 1132 bool notify_parent = false; 1133 int error = 0; 1134 1135 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 1136 return -EINVAL; 1137 1138 spin_lock_irqsave(&dev->power.lock, flags); 1139 1140 if (!dev->power.runtime_error && !dev->power.disable_depth) { 1141 error = -EAGAIN; 1142 goto out; 1143 } 1144 1145 if (dev->power.runtime_status == status || !parent) 1146 goto out_set; 1147 1148 if (status == RPM_SUSPENDED) { 1149 atomic_add_unless(&parent->power.child_count, -1, 0); 1150 notify_parent = !parent->power.ignore_children; 1151 } else { 1152 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 1153 1154 /* 1155 * It is invalid to put an active child under a parent that is 1156 * not active, has runtime PM enabled and the 1157 * 'power.ignore_children' flag unset. 1158 */ 1159 if (!parent->power.disable_depth 1160 && !parent->power.ignore_children 1161 && parent->power.runtime_status != RPM_ACTIVE) { 1162 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", 1163 dev_name(dev), 1164 dev_name(parent)); 1165 error = -EBUSY; 1166 } else if (dev->power.runtime_status == RPM_SUSPENDED) { 1167 atomic_inc(&parent->power.child_count); 1168 } 1169 1170 spin_unlock(&parent->power.lock); 1171 1172 if (error) 1173 goto out; 1174 } 1175 1176 out_set: 1177 __update_runtime_status(dev, status); 1178 dev->power.runtime_error = 0; 1179 out: 1180 spin_unlock_irqrestore(&dev->power.lock, flags); 1181 1182 if (notify_parent) 1183 pm_request_idle(parent); 1184 1185 return error; 1186 } 1187 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 1188 1189 /** 1190 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 1191 * @dev: Device to handle. 1192 * 1193 * Flush all pending requests for the device from pm_wq and wait for all 1194 * runtime PM operations involving the device in progress to complete. 1195 * 1196 * Should be called under dev->power.lock with interrupts disabled. 1197 */ 1198 static void __pm_runtime_barrier(struct device *dev) 1199 { 1200 pm_runtime_deactivate_timer(dev); 1201 1202 if (dev->power.request_pending) { 1203 dev->power.request = RPM_REQ_NONE; 1204 spin_unlock_irq(&dev->power.lock); 1205 1206 cancel_work_sync(&dev->power.work); 1207 1208 spin_lock_irq(&dev->power.lock); 1209 dev->power.request_pending = false; 1210 } 1211 1212 if (dev->power.runtime_status == RPM_SUSPENDING 1213 || dev->power.runtime_status == RPM_RESUMING 1214 || dev->power.idle_notification) { 1215 DEFINE_WAIT(wait); 1216 1217 /* Suspend, wake-up or idle notification in progress. */ 1218 for (;;) { 1219 prepare_to_wait(&dev->power.wait_queue, &wait, 1220 TASK_UNINTERRUPTIBLE); 1221 if (dev->power.runtime_status != RPM_SUSPENDING 1222 && dev->power.runtime_status != RPM_RESUMING 1223 && !dev->power.idle_notification) 1224 break; 1225 spin_unlock_irq(&dev->power.lock); 1226 1227 schedule(); 1228 1229 spin_lock_irq(&dev->power.lock); 1230 } 1231 finish_wait(&dev->power.wait_queue, &wait); 1232 } 1233 } 1234 1235 /** 1236 * pm_runtime_barrier - Flush pending requests and wait for completions. 1237 * @dev: Device to handle. 1238 * 1239 * Prevent the device from being suspended by incrementing its usage counter and 1240 * if there's a pending resume request for the device, wake the device up. 1241 * Next, make sure that all pending requests for the device have been flushed 1242 * from pm_wq and wait for all runtime PM operations involving the device in 1243 * progress to complete. 1244 * 1245 * Return value: 1246 * 1, if there was a resume request pending and the device had to be woken up, 1247 * 0, otherwise 1248 */ 1249 int pm_runtime_barrier(struct device *dev) 1250 { 1251 int retval = 0; 1252 1253 pm_runtime_get_noresume(dev); 1254 spin_lock_irq(&dev->power.lock); 1255 1256 if (dev->power.request_pending 1257 && dev->power.request == RPM_REQ_RESUME) { 1258 rpm_resume(dev, 0); 1259 retval = 1; 1260 } 1261 1262 __pm_runtime_barrier(dev); 1263 1264 spin_unlock_irq(&dev->power.lock); 1265 pm_runtime_put_noidle(dev); 1266 1267 return retval; 1268 } 1269 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 1270 1271 /** 1272 * __pm_runtime_disable - Disable runtime PM of a device. 1273 * @dev: Device to handle. 1274 * @check_resume: If set, check if there's a resume request for the device. 1275 * 1276 * Increment power.disable_depth for the device and if it was zero previously, 1277 * cancel all pending runtime PM requests for the device and wait for all 1278 * operations in progress to complete. The device can be either active or 1279 * suspended after its runtime PM has been disabled. 1280 * 1281 * If @check_resume is set and there's a resume request pending when 1282 * __pm_runtime_disable() is called and power.disable_depth is zero, the 1283 * function will wake up the device before disabling its runtime PM. 1284 */ 1285 void __pm_runtime_disable(struct device *dev, bool check_resume) 1286 { 1287 spin_lock_irq(&dev->power.lock); 1288 1289 if (dev->power.disable_depth > 0) { 1290 dev->power.disable_depth++; 1291 goto out; 1292 } 1293 1294 /* 1295 * Wake up the device if there's a resume request pending, because that 1296 * means there probably is some I/O to process and disabling runtime PM 1297 * shouldn't prevent the device from processing the I/O. 1298 */ 1299 if (check_resume && dev->power.request_pending 1300 && dev->power.request == RPM_REQ_RESUME) { 1301 /* 1302 * Prevent suspends and idle notifications from being carried 1303 * out after we have woken up the device. 1304 */ 1305 pm_runtime_get_noresume(dev); 1306 1307 rpm_resume(dev, 0); 1308 1309 pm_runtime_put_noidle(dev); 1310 } 1311 1312 /* Update time accounting before disabling PM-runtime. */ 1313 update_pm_runtime_accounting(dev); 1314 1315 if (!dev->power.disable_depth++) 1316 __pm_runtime_barrier(dev); 1317 1318 out: 1319 spin_unlock_irq(&dev->power.lock); 1320 } 1321 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1322 1323 /** 1324 * pm_runtime_enable - Enable runtime PM of a device. 1325 * @dev: Device to handle. 1326 */ 1327 void pm_runtime_enable(struct device *dev) 1328 { 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&dev->power.lock, flags); 1332 1333 if (dev->power.disable_depth > 0) { 1334 dev->power.disable_depth--; 1335 1336 /* About to enable runtime pm, set accounting_timestamp to now */ 1337 if (!dev->power.disable_depth) 1338 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); 1339 } else { 1340 dev_warn(dev, "Unbalanced %s!\n", __func__); 1341 } 1342 1343 WARN(!dev->power.disable_depth && 1344 dev->power.runtime_status == RPM_SUSPENDED && 1345 !dev->power.ignore_children && 1346 atomic_read(&dev->power.child_count) > 0, 1347 "Enabling runtime PM for inactive device (%s) with active children\n", 1348 dev_name(dev)); 1349 1350 spin_unlock_irqrestore(&dev->power.lock, flags); 1351 } 1352 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1353 1354 /** 1355 * pm_runtime_forbid - Block runtime PM of a device. 1356 * @dev: Device to handle. 1357 * 1358 * Increase the device's usage count and clear its power.runtime_auto flag, 1359 * so that it cannot be suspended at run time until pm_runtime_allow() is called 1360 * for it. 1361 */ 1362 void pm_runtime_forbid(struct device *dev) 1363 { 1364 spin_lock_irq(&dev->power.lock); 1365 if (!dev->power.runtime_auto) 1366 goto out; 1367 1368 dev->power.runtime_auto = false; 1369 atomic_inc(&dev->power.usage_count); 1370 rpm_resume(dev, 0); 1371 1372 out: 1373 spin_unlock_irq(&dev->power.lock); 1374 } 1375 EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1376 1377 /** 1378 * pm_runtime_allow - Unblock runtime PM of a device. 1379 * @dev: Device to handle. 1380 * 1381 * Decrease the device's usage count and set its power.runtime_auto flag. 1382 */ 1383 void pm_runtime_allow(struct device *dev) 1384 { 1385 spin_lock_irq(&dev->power.lock); 1386 if (dev->power.runtime_auto) 1387 goto out; 1388 1389 dev->power.runtime_auto = true; 1390 if (atomic_dec_and_test(&dev->power.usage_count)) 1391 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); 1392 1393 out: 1394 spin_unlock_irq(&dev->power.lock); 1395 } 1396 EXPORT_SYMBOL_GPL(pm_runtime_allow); 1397 1398 /** 1399 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. 1400 * @dev: Device to handle. 1401 * 1402 * Set the power.no_callbacks flag, which tells the PM core that this 1403 * device is power-managed through its parent and has no runtime PM 1404 * callbacks of its own. The runtime sysfs attributes will be removed. 1405 */ 1406 void pm_runtime_no_callbacks(struct device *dev) 1407 { 1408 spin_lock_irq(&dev->power.lock); 1409 dev->power.no_callbacks = 1; 1410 spin_unlock_irq(&dev->power.lock); 1411 if (device_is_registered(dev)) 1412 rpm_sysfs_remove(dev); 1413 } 1414 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); 1415 1416 /** 1417 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. 1418 * @dev: Device to handle 1419 * 1420 * Set the power.irq_safe flag, which tells the PM core that the 1421 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should 1422 * always be invoked with the spinlock held and interrupts disabled. It also 1423 * causes the parent's usage counter to be permanently incremented, preventing 1424 * the parent from runtime suspending -- otherwise an irq-safe child might have 1425 * to wait for a non-irq-safe parent. 1426 */ 1427 void pm_runtime_irq_safe(struct device *dev) 1428 { 1429 if (dev->parent) 1430 pm_runtime_get_sync(dev->parent); 1431 spin_lock_irq(&dev->power.lock); 1432 dev->power.irq_safe = 1; 1433 spin_unlock_irq(&dev->power.lock); 1434 } 1435 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); 1436 1437 /** 1438 * update_autosuspend - Handle a change to a device's autosuspend settings. 1439 * @dev: Device to handle. 1440 * @old_delay: The former autosuspend_delay value. 1441 * @old_use: The former use_autosuspend value. 1442 * 1443 * Prevent runtime suspend if the new delay is negative and use_autosuspend is 1444 * set; otherwise allow it. Send an idle notification if suspends are allowed. 1445 * 1446 * This function must be called under dev->power.lock with interrupts disabled. 1447 */ 1448 static void update_autosuspend(struct device *dev, int old_delay, int old_use) 1449 { 1450 int delay = dev->power.autosuspend_delay; 1451 1452 /* Should runtime suspend be prevented now? */ 1453 if (dev->power.use_autosuspend && delay < 0) { 1454 1455 /* If it used to be allowed then prevent it. */ 1456 if (!old_use || old_delay >= 0) { 1457 atomic_inc(&dev->power.usage_count); 1458 rpm_resume(dev, 0); 1459 } 1460 } 1461 1462 /* Runtime suspend should be allowed now. */ 1463 else { 1464 1465 /* If it used to be prevented then allow it. */ 1466 if (old_use && old_delay < 0) 1467 atomic_dec(&dev->power.usage_count); 1468 1469 /* Maybe we can autosuspend now. */ 1470 rpm_idle(dev, RPM_AUTO); 1471 } 1472 } 1473 1474 /** 1475 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. 1476 * @dev: Device to handle. 1477 * @delay: Value of the new delay in milliseconds. 1478 * 1479 * Set the device's power.autosuspend_delay value. If it changes to negative 1480 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it 1481 * changes the other way, allow runtime suspends. 1482 */ 1483 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1484 { 1485 int old_delay, old_use; 1486 1487 spin_lock_irq(&dev->power.lock); 1488 old_delay = dev->power.autosuspend_delay; 1489 old_use = dev->power.use_autosuspend; 1490 dev->power.autosuspend_delay = delay; 1491 update_autosuspend(dev, old_delay, old_use); 1492 spin_unlock_irq(&dev->power.lock); 1493 } 1494 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); 1495 1496 /** 1497 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. 1498 * @dev: Device to handle. 1499 * @use: New value for use_autosuspend. 1500 * 1501 * Set the device's power.use_autosuspend flag, and allow or prevent runtime 1502 * suspends as needed. 1503 */ 1504 void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1505 { 1506 int old_delay, old_use; 1507 1508 spin_lock_irq(&dev->power.lock); 1509 old_delay = dev->power.autosuspend_delay; 1510 old_use = dev->power.use_autosuspend; 1511 dev->power.use_autosuspend = use; 1512 update_autosuspend(dev, old_delay, old_use); 1513 spin_unlock_irq(&dev->power.lock); 1514 } 1515 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1516 1517 /** 1518 * pm_runtime_init - Initialize runtime PM fields in given device object. 1519 * @dev: Device object to initialize. 1520 */ 1521 void pm_runtime_init(struct device *dev) 1522 { 1523 dev->power.runtime_status = RPM_SUSPENDED; 1524 dev->power.idle_notification = false; 1525 1526 dev->power.disable_depth = 1; 1527 atomic_set(&dev->power.usage_count, 0); 1528 1529 dev->power.runtime_error = 0; 1530 1531 atomic_set(&dev->power.child_count, 0); 1532 pm_suspend_ignore_children(dev, false); 1533 dev->power.runtime_auto = true; 1534 1535 dev->power.request_pending = false; 1536 dev->power.request = RPM_REQ_NONE; 1537 dev->power.deferred_resume = false; 1538 INIT_WORK(&dev->power.work, pm_runtime_work); 1539 1540 dev->power.timer_expires = 0; 1541 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1542 dev->power.suspend_timer.function = pm_suspend_timer_fn; 1543 1544 init_waitqueue_head(&dev->power.wait_queue); 1545 } 1546 1547 /** 1548 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. 1549 * @dev: Device object to re-initialize. 1550 */ 1551 void pm_runtime_reinit(struct device *dev) 1552 { 1553 if (!pm_runtime_enabled(dev)) { 1554 if (dev->power.runtime_status == RPM_ACTIVE) 1555 pm_runtime_set_suspended(dev); 1556 if (dev->power.irq_safe) { 1557 spin_lock_irq(&dev->power.lock); 1558 dev->power.irq_safe = 0; 1559 spin_unlock_irq(&dev->power.lock); 1560 if (dev->parent) 1561 pm_runtime_put(dev->parent); 1562 } 1563 } 1564 } 1565 1566 /** 1567 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1568 * @dev: Device object being removed from device hierarchy. 1569 */ 1570 void pm_runtime_remove(struct device *dev) 1571 { 1572 __pm_runtime_disable(dev, false); 1573 pm_runtime_reinit(dev); 1574 } 1575 1576 /** 1577 * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. 1578 * @dev: Device whose driver is going to be removed. 1579 * 1580 * Check links from this device to any consumers and if any of them have active 1581 * runtime PM references to the device, drop the usage counter of the device 1582 * (once per link). 1583 * 1584 * Links with the DL_FLAG_STATELESS flag set are ignored. 1585 * 1586 * Since the device is guaranteed to be runtime-active at the point this is 1587 * called, nothing else needs to be done here. 1588 * 1589 * Moreover, this is called after device_links_busy() has returned 'false', so 1590 * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and 1591 * therefore rpm_active can't be manipulated concurrently. 1592 */ 1593 void pm_runtime_clean_up_links(struct device *dev) 1594 { 1595 struct device_link *link; 1596 int idx; 1597 1598 idx = device_links_read_lock(); 1599 1600 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { 1601 if (link->flags & DL_FLAG_STATELESS) 1602 continue; 1603 1604 if (link->rpm_active) { 1605 pm_runtime_put_noidle(dev); 1606 link->rpm_active = false; 1607 } 1608 } 1609 1610 device_links_read_unlock(idx); 1611 } 1612 1613 /** 1614 * pm_runtime_get_suppliers - Resume and reference-count supplier devices. 1615 * @dev: Consumer device. 1616 */ 1617 void pm_runtime_get_suppliers(struct device *dev) 1618 { 1619 struct device_link *link; 1620 int idx; 1621 1622 idx = device_links_read_lock(); 1623 1624 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1625 if (link->flags & DL_FLAG_PM_RUNTIME) 1626 pm_runtime_get_sync(link->supplier); 1627 1628 device_links_read_unlock(idx); 1629 } 1630 1631 /** 1632 * pm_runtime_put_suppliers - Drop references to supplier devices. 1633 * @dev: Consumer device. 1634 */ 1635 void pm_runtime_put_suppliers(struct device *dev) 1636 { 1637 struct device_link *link; 1638 int idx; 1639 1640 idx = device_links_read_lock(); 1641 1642 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1643 if (link->flags & DL_FLAG_PM_RUNTIME) 1644 pm_runtime_put(link->supplier); 1645 1646 device_links_read_unlock(idx); 1647 } 1648 1649 void pm_runtime_new_link(struct device *dev) 1650 { 1651 spin_lock_irq(&dev->power.lock); 1652 dev->power.links_count++; 1653 spin_unlock_irq(&dev->power.lock); 1654 } 1655 1656 void pm_runtime_drop_link(struct device *dev) 1657 { 1658 rpm_put_suppliers(dev); 1659 1660 spin_lock_irq(&dev->power.lock); 1661 WARN_ON(dev->power.links_count == 0); 1662 dev->power.links_count--; 1663 spin_unlock_irq(&dev->power.lock); 1664 } 1665 1666 static bool pm_runtime_need_not_resume(struct device *dev) 1667 { 1668 return atomic_read(&dev->power.usage_count) <= 1 && 1669 (atomic_read(&dev->power.child_count) == 0 || 1670 dev->power.ignore_children); 1671 } 1672 1673 /** 1674 * pm_runtime_force_suspend - Force a device into suspend state if needed. 1675 * @dev: Device to suspend. 1676 * 1677 * Disable runtime PM so we safely can check the device's runtime PM status and 1678 * if it is active, invoke its ->runtime_suspend callback to suspend it and 1679 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's 1680 * usage and children counters don't indicate that the device was in use before 1681 * the system-wide transition under way, decrement its parent's children counter 1682 * (if there is a parent). Keep runtime PM disabled to preserve the state 1683 * unless we encounter errors. 1684 * 1685 * Typically this function may be invoked from a system suspend callback to make 1686 * sure the device is put into low power state and it should only be used during 1687 * system-wide PM transitions to sleep states. It assumes that the analogous 1688 * pm_runtime_force_resume() will be used to resume the device. 1689 */ 1690 int pm_runtime_force_suspend(struct device *dev) 1691 { 1692 int (*callback)(struct device *); 1693 int ret; 1694 1695 pm_runtime_disable(dev); 1696 if (pm_runtime_status_suspended(dev)) 1697 return 0; 1698 1699 callback = RPM_GET_CALLBACK(dev, runtime_suspend); 1700 1701 ret = callback ? callback(dev) : 0; 1702 if (ret) 1703 goto err; 1704 1705 /* 1706 * If the device can stay in suspend after the system-wide transition 1707 * to the working state that will follow, drop the children counter of 1708 * its parent, but set its status to RPM_SUSPENDED anyway in case this 1709 * function will be called again for it in the meantime. 1710 */ 1711 if (pm_runtime_need_not_resume(dev)) 1712 pm_runtime_set_suspended(dev); 1713 else 1714 __update_runtime_status(dev, RPM_SUSPENDED); 1715 1716 return 0; 1717 1718 err: 1719 pm_runtime_enable(dev); 1720 return ret; 1721 } 1722 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); 1723 1724 /** 1725 * pm_runtime_force_resume - Force a device into resume state if needed. 1726 * @dev: Device to resume. 1727 * 1728 * Prior invoking this function we expect the user to have brought the device 1729 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse 1730 * those actions and bring the device into full power, if it is expected to be 1731 * used on system resume. In the other case, we defer the resume to be managed 1732 * via runtime PM. 1733 * 1734 * Typically this function may be invoked from a system resume callback. 1735 */ 1736 int pm_runtime_force_resume(struct device *dev) 1737 { 1738 int (*callback)(struct device *); 1739 int ret = 0; 1740 1741 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) 1742 goto out; 1743 1744 /* 1745 * The value of the parent's children counter is correct already, so 1746 * just update the status of the device. 1747 */ 1748 __update_runtime_status(dev, RPM_ACTIVE); 1749 1750 callback = RPM_GET_CALLBACK(dev, runtime_resume); 1751 1752 ret = callback ? callback(dev) : 0; 1753 if (ret) { 1754 pm_runtime_set_suspended(dev); 1755 goto out; 1756 } 1757 1758 pm_runtime_mark_last_busy(dev); 1759 out: 1760 pm_runtime_enable(dev); 1761 return ret; 1762 } 1763 EXPORT_SYMBOL_GPL(pm_runtime_force_resume); 1764