1 /* 2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 3 * 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/sched.h> 10 #include <linux/pm_runtime.h> 11 #include <linux/jiffies.h> 12 13 static int __pm_runtime_resume(struct device *dev, bool from_wq); 14 static int __pm_request_idle(struct device *dev); 15 static int __pm_request_resume(struct device *dev); 16 17 /** 18 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. 19 * @dev: Device to handle. 20 */ 21 static void pm_runtime_deactivate_timer(struct device *dev) 22 { 23 if (dev->power.timer_expires > 0) { 24 del_timer(&dev->power.suspend_timer); 25 dev->power.timer_expires = 0; 26 } 27 } 28 29 /** 30 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. 31 * @dev: Device to handle. 32 */ 33 static void pm_runtime_cancel_pending(struct device *dev) 34 { 35 pm_runtime_deactivate_timer(dev); 36 /* 37 * In case there's a request pending, make sure its work function will 38 * return without doing anything. 39 */ 40 dev->power.request = RPM_REQ_NONE; 41 } 42 43 /** 44 * __pm_runtime_idle - Notify device bus type if the device can be suspended. 45 * @dev: Device to notify the bus type about. 46 * 47 * This function must be called under dev->power.lock with interrupts disabled. 48 */ 49 static int __pm_runtime_idle(struct device *dev) 50 __releases(&dev->power.lock) __acquires(&dev->power.lock) 51 { 52 int retval = 0; 53 54 if (dev->power.runtime_error) 55 retval = -EINVAL; 56 else if (dev->power.idle_notification) 57 retval = -EINPROGRESS; 58 else if (atomic_read(&dev->power.usage_count) > 0 59 || dev->power.disable_depth > 0 60 || dev->power.runtime_status != RPM_ACTIVE) 61 retval = -EAGAIN; 62 else if (!pm_children_suspended(dev)) 63 retval = -EBUSY; 64 if (retval) 65 goto out; 66 67 if (dev->power.request_pending) { 68 /* 69 * If an idle notification request is pending, cancel it. Any 70 * other pending request takes precedence over us. 71 */ 72 if (dev->power.request == RPM_REQ_IDLE) { 73 dev->power.request = RPM_REQ_NONE; 74 } else if (dev->power.request != RPM_REQ_NONE) { 75 retval = -EAGAIN; 76 goto out; 77 } 78 } 79 80 dev->power.idle_notification = true; 81 82 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { 83 spin_unlock_irq(&dev->power.lock); 84 85 dev->bus->pm->runtime_idle(dev); 86 87 spin_lock_irq(&dev->power.lock); 88 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { 89 spin_unlock_irq(&dev->power.lock); 90 91 dev->type->pm->runtime_idle(dev); 92 93 spin_lock_irq(&dev->power.lock); 94 } else if (dev->class && dev->class->pm 95 && dev->class->pm->runtime_idle) { 96 spin_unlock_irq(&dev->power.lock); 97 98 dev->class->pm->runtime_idle(dev); 99 100 spin_lock_irq(&dev->power.lock); 101 } 102 103 dev->power.idle_notification = false; 104 wake_up_all(&dev->power.wait_queue); 105 106 out: 107 return retval; 108 } 109 110 /** 111 * pm_runtime_idle - Notify device bus type if the device can be suspended. 112 * @dev: Device to notify the bus type about. 113 */ 114 int pm_runtime_idle(struct device *dev) 115 { 116 int retval; 117 118 spin_lock_irq(&dev->power.lock); 119 retval = __pm_runtime_idle(dev); 120 spin_unlock_irq(&dev->power.lock); 121 122 return retval; 123 } 124 EXPORT_SYMBOL_GPL(pm_runtime_idle); 125 126 /** 127 * __pm_runtime_suspend - Carry out run-time suspend of given device. 128 * @dev: Device to suspend. 129 * @from_wq: If set, the function has been called via pm_wq. 130 * 131 * Check if the device can be suspended and run the ->runtime_suspend() callback 132 * provided by its bus type. If another suspend has been started earlier, wait 133 * for it to finish. If an idle notification or suspend request is pending or 134 * scheduled, cancel it. 135 * 136 * This function must be called under dev->power.lock with interrupts disabled. 137 */ 138 int __pm_runtime_suspend(struct device *dev, bool from_wq) 139 __releases(&dev->power.lock) __acquires(&dev->power.lock) 140 { 141 struct device *parent = NULL; 142 bool notify = false; 143 int retval = 0; 144 145 dev_dbg(dev, "__pm_runtime_suspend()%s!\n", 146 from_wq ? " from workqueue" : ""); 147 148 repeat: 149 if (dev->power.runtime_error) { 150 retval = -EINVAL; 151 goto out; 152 } 153 154 /* Pending resume requests take precedence over us. */ 155 if (dev->power.request_pending 156 && dev->power.request == RPM_REQ_RESUME) { 157 retval = -EAGAIN; 158 goto out; 159 } 160 161 /* Other scheduled or pending requests need to be canceled. */ 162 pm_runtime_cancel_pending(dev); 163 164 if (dev->power.runtime_status == RPM_SUSPENDED) 165 retval = 1; 166 else if (dev->power.runtime_status == RPM_RESUMING 167 || dev->power.disable_depth > 0 168 || atomic_read(&dev->power.usage_count) > 0) 169 retval = -EAGAIN; 170 else if (!pm_children_suspended(dev)) 171 retval = -EBUSY; 172 if (retval) 173 goto out; 174 175 if (dev->power.runtime_status == RPM_SUSPENDING) { 176 DEFINE_WAIT(wait); 177 178 if (from_wq) { 179 retval = -EINPROGRESS; 180 goto out; 181 } 182 183 /* Wait for the other suspend running in parallel with us. */ 184 for (;;) { 185 prepare_to_wait(&dev->power.wait_queue, &wait, 186 TASK_UNINTERRUPTIBLE); 187 if (dev->power.runtime_status != RPM_SUSPENDING) 188 break; 189 190 spin_unlock_irq(&dev->power.lock); 191 192 schedule(); 193 194 spin_lock_irq(&dev->power.lock); 195 } 196 finish_wait(&dev->power.wait_queue, &wait); 197 goto repeat; 198 } 199 200 dev->power.runtime_status = RPM_SUSPENDING; 201 dev->power.deferred_resume = false; 202 203 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { 204 spin_unlock_irq(&dev->power.lock); 205 206 retval = dev->bus->pm->runtime_suspend(dev); 207 208 spin_lock_irq(&dev->power.lock); 209 dev->power.runtime_error = retval; 210 } else if (dev->type && dev->type->pm 211 && dev->type->pm->runtime_suspend) { 212 spin_unlock_irq(&dev->power.lock); 213 214 retval = dev->type->pm->runtime_suspend(dev); 215 216 spin_lock_irq(&dev->power.lock); 217 dev->power.runtime_error = retval; 218 } else if (dev->class && dev->class->pm 219 && dev->class->pm->runtime_suspend) { 220 spin_unlock_irq(&dev->power.lock); 221 222 retval = dev->class->pm->runtime_suspend(dev); 223 224 spin_lock_irq(&dev->power.lock); 225 dev->power.runtime_error = retval; 226 } else { 227 retval = -ENOSYS; 228 } 229 230 if (retval) { 231 dev->power.runtime_status = RPM_ACTIVE; 232 pm_runtime_cancel_pending(dev); 233 234 if (retval == -EAGAIN || retval == -EBUSY) { 235 notify = true; 236 dev->power.runtime_error = 0; 237 } 238 } else { 239 dev->power.runtime_status = RPM_SUSPENDED; 240 241 if (dev->parent) { 242 parent = dev->parent; 243 atomic_add_unless(&parent->power.child_count, -1, 0); 244 } 245 } 246 wake_up_all(&dev->power.wait_queue); 247 248 if (dev->power.deferred_resume) { 249 __pm_runtime_resume(dev, false); 250 retval = -EAGAIN; 251 goto out; 252 } 253 254 if (notify) 255 __pm_runtime_idle(dev); 256 257 if (parent && !parent->power.ignore_children) { 258 spin_unlock_irq(&dev->power.lock); 259 260 pm_request_idle(parent); 261 262 spin_lock_irq(&dev->power.lock); 263 } 264 265 out: 266 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); 267 268 return retval; 269 } 270 271 /** 272 * pm_runtime_suspend - Carry out run-time suspend of given device. 273 * @dev: Device to suspend. 274 */ 275 int pm_runtime_suspend(struct device *dev) 276 { 277 int retval; 278 279 spin_lock_irq(&dev->power.lock); 280 retval = __pm_runtime_suspend(dev, false); 281 spin_unlock_irq(&dev->power.lock); 282 283 return retval; 284 } 285 EXPORT_SYMBOL_GPL(pm_runtime_suspend); 286 287 /** 288 * __pm_runtime_resume - Carry out run-time resume of given device. 289 * @dev: Device to resume. 290 * @from_wq: If set, the function has been called via pm_wq. 291 * 292 * Check if the device can be woken up and run the ->runtime_resume() callback 293 * provided by its bus type. If another resume has been started earlier, wait 294 * for it to finish. If there's a suspend running in parallel with this 295 * function, wait for it to finish and resume the device. Cancel any scheduled 296 * or pending requests. 297 * 298 * This function must be called under dev->power.lock with interrupts disabled. 299 */ 300 int __pm_runtime_resume(struct device *dev, bool from_wq) 301 __releases(&dev->power.lock) __acquires(&dev->power.lock) 302 { 303 struct device *parent = NULL; 304 int retval = 0; 305 306 dev_dbg(dev, "__pm_runtime_resume()%s!\n", 307 from_wq ? " from workqueue" : ""); 308 309 repeat: 310 if (dev->power.runtime_error) { 311 retval = -EINVAL; 312 goto out; 313 } 314 315 pm_runtime_cancel_pending(dev); 316 317 if (dev->power.runtime_status == RPM_ACTIVE) 318 retval = 1; 319 else if (dev->power.disable_depth > 0) 320 retval = -EAGAIN; 321 if (retval) 322 goto out; 323 324 if (dev->power.runtime_status == RPM_RESUMING 325 || dev->power.runtime_status == RPM_SUSPENDING) { 326 DEFINE_WAIT(wait); 327 328 if (from_wq) { 329 if (dev->power.runtime_status == RPM_SUSPENDING) 330 dev->power.deferred_resume = true; 331 retval = -EINPROGRESS; 332 goto out; 333 } 334 335 /* Wait for the operation carried out in parallel with us. */ 336 for (;;) { 337 prepare_to_wait(&dev->power.wait_queue, &wait, 338 TASK_UNINTERRUPTIBLE); 339 if (dev->power.runtime_status != RPM_RESUMING 340 && dev->power.runtime_status != RPM_SUSPENDING) 341 break; 342 343 spin_unlock_irq(&dev->power.lock); 344 345 schedule(); 346 347 spin_lock_irq(&dev->power.lock); 348 } 349 finish_wait(&dev->power.wait_queue, &wait); 350 goto repeat; 351 } 352 353 if (!parent && dev->parent) { 354 /* 355 * Increment the parent's resume counter and resume it if 356 * necessary. 357 */ 358 parent = dev->parent; 359 spin_unlock(&dev->power.lock); 360 361 pm_runtime_get_noresume(parent); 362 363 spin_lock(&parent->power.lock); 364 /* 365 * We can resume if the parent's run-time PM is disabled or it 366 * is set to ignore children. 367 */ 368 if (!parent->power.disable_depth 369 && !parent->power.ignore_children) { 370 __pm_runtime_resume(parent, false); 371 if (parent->power.runtime_status != RPM_ACTIVE) 372 retval = -EBUSY; 373 } 374 spin_unlock(&parent->power.lock); 375 376 spin_lock(&dev->power.lock); 377 if (retval) 378 goto out; 379 goto repeat; 380 } 381 382 dev->power.runtime_status = RPM_RESUMING; 383 384 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { 385 spin_unlock_irq(&dev->power.lock); 386 387 retval = dev->bus->pm->runtime_resume(dev); 388 389 spin_lock_irq(&dev->power.lock); 390 dev->power.runtime_error = retval; 391 } else if (dev->type && dev->type->pm 392 && dev->type->pm->runtime_resume) { 393 spin_unlock_irq(&dev->power.lock); 394 395 retval = dev->type->pm->runtime_resume(dev); 396 397 spin_lock_irq(&dev->power.lock); 398 dev->power.runtime_error = retval; 399 } else if (dev->class && dev->class->pm 400 && dev->class->pm->runtime_resume) { 401 spin_unlock_irq(&dev->power.lock); 402 403 retval = dev->class->pm->runtime_resume(dev); 404 405 spin_lock_irq(&dev->power.lock); 406 dev->power.runtime_error = retval; 407 } else { 408 retval = -ENOSYS; 409 } 410 411 if (retval) { 412 dev->power.runtime_status = RPM_SUSPENDED; 413 pm_runtime_cancel_pending(dev); 414 } else { 415 dev->power.runtime_status = RPM_ACTIVE; 416 if (parent) 417 atomic_inc(&parent->power.child_count); 418 } 419 wake_up_all(&dev->power.wait_queue); 420 421 if (!retval) 422 __pm_request_idle(dev); 423 424 out: 425 if (parent) { 426 spin_unlock_irq(&dev->power.lock); 427 428 pm_runtime_put(parent); 429 430 spin_lock_irq(&dev->power.lock); 431 } 432 433 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); 434 435 return retval; 436 } 437 438 /** 439 * pm_runtime_resume - Carry out run-time resume of given device. 440 * @dev: Device to suspend. 441 */ 442 int pm_runtime_resume(struct device *dev) 443 { 444 int retval; 445 446 spin_lock_irq(&dev->power.lock); 447 retval = __pm_runtime_resume(dev, false); 448 spin_unlock_irq(&dev->power.lock); 449 450 return retval; 451 } 452 EXPORT_SYMBOL_GPL(pm_runtime_resume); 453 454 /** 455 * pm_runtime_work - Universal run-time PM work function. 456 * @work: Work structure used for scheduling the execution of this function. 457 * 458 * Use @work to get the device object the work is to be done for, determine what 459 * is to be done and execute the appropriate run-time PM function. 460 */ 461 static void pm_runtime_work(struct work_struct *work) 462 { 463 struct device *dev = container_of(work, struct device, power.work); 464 enum rpm_request req; 465 466 spin_lock_irq(&dev->power.lock); 467 468 if (!dev->power.request_pending) 469 goto out; 470 471 req = dev->power.request; 472 dev->power.request = RPM_REQ_NONE; 473 dev->power.request_pending = false; 474 475 switch (req) { 476 case RPM_REQ_NONE: 477 break; 478 case RPM_REQ_IDLE: 479 __pm_runtime_idle(dev); 480 break; 481 case RPM_REQ_SUSPEND: 482 __pm_runtime_suspend(dev, true); 483 break; 484 case RPM_REQ_RESUME: 485 __pm_runtime_resume(dev, true); 486 break; 487 } 488 489 out: 490 spin_unlock_irq(&dev->power.lock); 491 } 492 493 /** 494 * __pm_request_idle - Submit an idle notification request for given device. 495 * @dev: Device to handle. 496 * 497 * Check if the device's run-time PM status is correct for suspending the device 498 * and queue up a request to run __pm_runtime_idle() for it. 499 * 500 * This function must be called under dev->power.lock with interrupts disabled. 501 */ 502 static int __pm_request_idle(struct device *dev) 503 { 504 int retval = 0; 505 506 if (dev->power.runtime_error) 507 retval = -EINVAL; 508 else if (atomic_read(&dev->power.usage_count) > 0 509 || dev->power.disable_depth > 0 510 || dev->power.runtime_status == RPM_SUSPENDED 511 || dev->power.runtime_status == RPM_SUSPENDING) 512 retval = -EAGAIN; 513 else if (!pm_children_suspended(dev)) 514 retval = -EBUSY; 515 if (retval) 516 return retval; 517 518 if (dev->power.request_pending) { 519 /* Any requests other then RPM_REQ_IDLE take precedence. */ 520 if (dev->power.request == RPM_REQ_NONE) 521 dev->power.request = RPM_REQ_IDLE; 522 else if (dev->power.request != RPM_REQ_IDLE) 523 retval = -EAGAIN; 524 return retval; 525 } 526 527 dev->power.request = RPM_REQ_IDLE; 528 dev->power.request_pending = true; 529 queue_work(pm_wq, &dev->power.work); 530 531 return retval; 532 } 533 534 /** 535 * pm_request_idle - Submit an idle notification request for given device. 536 * @dev: Device to handle. 537 */ 538 int pm_request_idle(struct device *dev) 539 { 540 unsigned long flags; 541 int retval; 542 543 spin_lock_irqsave(&dev->power.lock, flags); 544 retval = __pm_request_idle(dev); 545 spin_unlock_irqrestore(&dev->power.lock, flags); 546 547 return retval; 548 } 549 EXPORT_SYMBOL_GPL(pm_request_idle); 550 551 /** 552 * __pm_request_suspend - Submit a suspend request for given device. 553 * @dev: Device to suspend. 554 * 555 * This function must be called under dev->power.lock with interrupts disabled. 556 */ 557 static int __pm_request_suspend(struct device *dev) 558 { 559 int retval = 0; 560 561 if (dev->power.runtime_error) 562 return -EINVAL; 563 564 if (dev->power.runtime_status == RPM_SUSPENDED) 565 retval = 1; 566 else if (atomic_read(&dev->power.usage_count) > 0 567 || dev->power.disable_depth > 0) 568 retval = -EAGAIN; 569 else if (dev->power.runtime_status == RPM_SUSPENDING) 570 retval = -EINPROGRESS; 571 else if (!pm_children_suspended(dev)) 572 retval = -EBUSY; 573 if (retval < 0) 574 return retval; 575 576 pm_runtime_deactivate_timer(dev); 577 578 if (dev->power.request_pending) { 579 /* 580 * Pending resume requests take precedence over us, but we can 581 * overtake any other pending request. 582 */ 583 if (dev->power.request == RPM_REQ_RESUME) 584 retval = -EAGAIN; 585 else if (dev->power.request != RPM_REQ_SUSPEND) 586 dev->power.request = retval ? 587 RPM_REQ_NONE : RPM_REQ_SUSPEND; 588 return retval; 589 } else if (retval) { 590 return retval; 591 } 592 593 dev->power.request = RPM_REQ_SUSPEND; 594 dev->power.request_pending = true; 595 queue_work(pm_wq, &dev->power.work); 596 597 return 0; 598 } 599 600 /** 601 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). 602 * @data: Device pointer passed by pm_schedule_suspend(). 603 * 604 * Check if the time is right and execute __pm_request_suspend() in that case. 605 */ 606 static void pm_suspend_timer_fn(unsigned long data) 607 { 608 struct device *dev = (struct device *)data; 609 unsigned long flags; 610 unsigned long expires; 611 612 spin_lock_irqsave(&dev->power.lock, flags); 613 614 expires = dev->power.timer_expires; 615 /* If 'expire' is after 'jiffies' we've been called too early. */ 616 if (expires > 0 && !time_after(expires, jiffies)) { 617 dev->power.timer_expires = 0; 618 __pm_request_suspend(dev); 619 } 620 621 spin_unlock_irqrestore(&dev->power.lock, flags); 622 } 623 624 /** 625 * pm_schedule_suspend - Set up a timer to submit a suspend request in future. 626 * @dev: Device to suspend. 627 * @delay: Time to wait before submitting a suspend request, in milliseconds. 628 */ 629 int pm_schedule_suspend(struct device *dev, unsigned int delay) 630 { 631 unsigned long flags; 632 int retval = 0; 633 634 spin_lock_irqsave(&dev->power.lock, flags); 635 636 if (dev->power.runtime_error) { 637 retval = -EINVAL; 638 goto out; 639 } 640 641 if (!delay) { 642 retval = __pm_request_suspend(dev); 643 goto out; 644 } 645 646 pm_runtime_deactivate_timer(dev); 647 648 if (dev->power.request_pending) { 649 /* 650 * Pending resume requests take precedence over us, but any 651 * other pending requests have to be canceled. 652 */ 653 if (dev->power.request == RPM_REQ_RESUME) { 654 retval = -EAGAIN; 655 goto out; 656 } 657 dev->power.request = RPM_REQ_NONE; 658 } 659 660 if (dev->power.runtime_status == RPM_SUSPENDED) 661 retval = 1; 662 else if (dev->power.runtime_status == RPM_SUSPENDING) 663 retval = -EINPROGRESS; 664 else if (atomic_read(&dev->power.usage_count) > 0 665 || dev->power.disable_depth > 0) 666 retval = -EAGAIN; 667 else if (!pm_children_suspended(dev)) 668 retval = -EBUSY; 669 if (retval) 670 goto out; 671 672 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 673 if (!dev->power.timer_expires) 674 dev->power.timer_expires = 1; 675 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 676 677 out: 678 spin_unlock_irqrestore(&dev->power.lock, flags); 679 680 return retval; 681 } 682 EXPORT_SYMBOL_GPL(pm_schedule_suspend); 683 684 /** 685 * pm_request_resume - Submit a resume request for given device. 686 * @dev: Device to resume. 687 * 688 * This function must be called under dev->power.lock with interrupts disabled. 689 */ 690 static int __pm_request_resume(struct device *dev) 691 { 692 int retval = 0; 693 694 if (dev->power.runtime_error) 695 return -EINVAL; 696 697 if (dev->power.runtime_status == RPM_ACTIVE) 698 retval = 1; 699 else if (dev->power.runtime_status == RPM_RESUMING) 700 retval = -EINPROGRESS; 701 else if (dev->power.disable_depth > 0) 702 retval = -EAGAIN; 703 if (retval < 0) 704 return retval; 705 706 pm_runtime_deactivate_timer(dev); 707 708 if (dev->power.runtime_status == RPM_SUSPENDING) { 709 dev->power.deferred_resume = true; 710 return retval; 711 } 712 if (dev->power.request_pending) { 713 /* If non-resume request is pending, we can overtake it. */ 714 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; 715 return retval; 716 } 717 if (retval) 718 return retval; 719 720 dev->power.request = RPM_REQ_RESUME; 721 dev->power.request_pending = true; 722 queue_work(pm_wq, &dev->power.work); 723 724 return retval; 725 } 726 727 /** 728 * pm_request_resume - Submit a resume request for given device. 729 * @dev: Device to resume. 730 */ 731 int pm_request_resume(struct device *dev) 732 { 733 unsigned long flags; 734 int retval; 735 736 spin_lock_irqsave(&dev->power.lock, flags); 737 retval = __pm_request_resume(dev); 738 spin_unlock_irqrestore(&dev->power.lock, flags); 739 740 return retval; 741 } 742 EXPORT_SYMBOL_GPL(pm_request_resume); 743 744 /** 745 * __pm_runtime_get - Reference count a device and wake it up, if necessary. 746 * @dev: Device to handle. 747 * @sync: If set and the device is suspended, resume it synchronously. 748 * 749 * Increment the usage count of the device and resume it or submit a resume 750 * request for it, depending on the value of @sync. 751 */ 752 int __pm_runtime_get(struct device *dev, bool sync) 753 { 754 int retval; 755 756 atomic_inc(&dev->power.usage_count); 757 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 758 759 return retval; 760 } 761 EXPORT_SYMBOL_GPL(__pm_runtime_get); 762 763 /** 764 * __pm_runtime_put - Decrement the device's usage counter and notify its bus. 765 * @dev: Device to handle. 766 * @sync: If the device's bus type is to be notified, do that synchronously. 767 * 768 * Decrement the usage count of the device and if it reaches zero, carry out a 769 * synchronous idle notification or submit an idle notification request for it, 770 * depending on the value of @sync. 771 */ 772 int __pm_runtime_put(struct device *dev, bool sync) 773 { 774 int retval = 0; 775 776 if (atomic_dec_and_test(&dev->power.usage_count)) 777 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); 778 779 return retval; 780 } 781 EXPORT_SYMBOL_GPL(__pm_runtime_put); 782 783 /** 784 * __pm_runtime_set_status - Set run-time PM status of a device. 785 * @dev: Device to handle. 786 * @status: New run-time PM status of the device. 787 * 788 * If run-time PM of the device is disabled or its power.runtime_error field is 789 * different from zero, the status may be changed either to RPM_ACTIVE, or to 790 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 791 * However, if the device has a parent and the parent is not active, and the 792 * parent's power.ignore_children flag is unset, the device's status cannot be 793 * set to RPM_ACTIVE, so -EBUSY is returned in that case. 794 * 795 * If successful, __pm_runtime_set_status() clears the power.runtime_error field 796 * and the device parent's counter of unsuspended children is modified to 797 * reflect the new status. If the new status is RPM_SUSPENDED, an idle 798 * notification request for the parent is submitted. 799 */ 800 int __pm_runtime_set_status(struct device *dev, unsigned int status) 801 { 802 struct device *parent = dev->parent; 803 unsigned long flags; 804 bool notify_parent = false; 805 int error = 0; 806 807 if (status != RPM_ACTIVE && status != RPM_SUSPENDED) 808 return -EINVAL; 809 810 spin_lock_irqsave(&dev->power.lock, flags); 811 812 if (!dev->power.runtime_error && !dev->power.disable_depth) { 813 error = -EAGAIN; 814 goto out; 815 } 816 817 if (dev->power.runtime_status == status) 818 goto out_set; 819 820 if (status == RPM_SUSPENDED) { 821 /* It always is possible to set the status to 'suspended'. */ 822 if (parent) { 823 atomic_add_unless(&parent->power.child_count, -1, 0); 824 notify_parent = !parent->power.ignore_children; 825 } 826 goto out_set; 827 } 828 829 if (parent) { 830 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); 831 832 /* 833 * It is invalid to put an active child under a parent that is 834 * not active, has run-time PM enabled and the 835 * 'power.ignore_children' flag unset. 836 */ 837 if (!parent->power.disable_depth 838 && !parent->power.ignore_children 839 && parent->power.runtime_status != RPM_ACTIVE) 840 error = -EBUSY; 841 else if (dev->power.runtime_status == RPM_SUSPENDED) 842 atomic_inc(&parent->power.child_count); 843 844 spin_unlock(&parent->power.lock); 845 846 if (error) 847 goto out; 848 } 849 850 out_set: 851 dev->power.runtime_status = status; 852 dev->power.runtime_error = 0; 853 out: 854 spin_unlock_irqrestore(&dev->power.lock, flags); 855 856 if (notify_parent) 857 pm_request_idle(parent); 858 859 return error; 860 } 861 EXPORT_SYMBOL_GPL(__pm_runtime_set_status); 862 863 /** 864 * __pm_runtime_barrier - Cancel pending requests and wait for completions. 865 * @dev: Device to handle. 866 * 867 * Flush all pending requests for the device from pm_wq and wait for all 868 * run-time PM operations involving the device in progress to complete. 869 * 870 * Should be called under dev->power.lock with interrupts disabled. 871 */ 872 static void __pm_runtime_barrier(struct device *dev) 873 { 874 pm_runtime_deactivate_timer(dev); 875 876 if (dev->power.request_pending) { 877 dev->power.request = RPM_REQ_NONE; 878 spin_unlock_irq(&dev->power.lock); 879 880 cancel_work_sync(&dev->power.work); 881 882 spin_lock_irq(&dev->power.lock); 883 dev->power.request_pending = false; 884 } 885 886 if (dev->power.runtime_status == RPM_SUSPENDING 887 || dev->power.runtime_status == RPM_RESUMING 888 || dev->power.idle_notification) { 889 DEFINE_WAIT(wait); 890 891 /* Suspend, wake-up or idle notification in progress. */ 892 for (;;) { 893 prepare_to_wait(&dev->power.wait_queue, &wait, 894 TASK_UNINTERRUPTIBLE); 895 if (dev->power.runtime_status != RPM_SUSPENDING 896 && dev->power.runtime_status != RPM_RESUMING 897 && !dev->power.idle_notification) 898 break; 899 spin_unlock_irq(&dev->power.lock); 900 901 schedule(); 902 903 spin_lock_irq(&dev->power.lock); 904 } 905 finish_wait(&dev->power.wait_queue, &wait); 906 } 907 } 908 909 /** 910 * pm_runtime_barrier - Flush pending requests and wait for completions. 911 * @dev: Device to handle. 912 * 913 * Prevent the device from being suspended by incrementing its usage counter and 914 * if there's a pending resume request for the device, wake the device up. 915 * Next, make sure that all pending requests for the device have been flushed 916 * from pm_wq and wait for all run-time PM operations involving the device in 917 * progress to complete. 918 * 919 * Return value: 920 * 1, if there was a resume request pending and the device had to be woken up, 921 * 0, otherwise 922 */ 923 int pm_runtime_barrier(struct device *dev) 924 { 925 int retval = 0; 926 927 pm_runtime_get_noresume(dev); 928 spin_lock_irq(&dev->power.lock); 929 930 if (dev->power.request_pending 931 && dev->power.request == RPM_REQ_RESUME) { 932 __pm_runtime_resume(dev, false); 933 retval = 1; 934 } 935 936 __pm_runtime_barrier(dev); 937 938 spin_unlock_irq(&dev->power.lock); 939 pm_runtime_put_noidle(dev); 940 941 return retval; 942 } 943 EXPORT_SYMBOL_GPL(pm_runtime_barrier); 944 945 /** 946 * __pm_runtime_disable - Disable run-time PM of a device. 947 * @dev: Device to handle. 948 * @check_resume: If set, check if there's a resume request for the device. 949 * 950 * Increment power.disable_depth for the device and if was zero previously, 951 * cancel all pending run-time PM requests for the device and wait for all 952 * operations in progress to complete. The device can be either active or 953 * suspended after its run-time PM has been disabled. 954 * 955 * If @check_resume is set and there's a resume request pending when 956 * __pm_runtime_disable() is called and power.disable_depth is zero, the 957 * function will wake up the device before disabling its run-time PM. 958 */ 959 void __pm_runtime_disable(struct device *dev, bool check_resume) 960 { 961 spin_lock_irq(&dev->power.lock); 962 963 if (dev->power.disable_depth > 0) { 964 dev->power.disable_depth++; 965 goto out; 966 } 967 968 /* 969 * Wake up the device if there's a resume request pending, because that 970 * means there probably is some I/O to process and disabling run-time PM 971 * shouldn't prevent the device from processing the I/O. 972 */ 973 if (check_resume && dev->power.request_pending 974 && dev->power.request == RPM_REQ_RESUME) { 975 /* 976 * Prevent suspends and idle notifications from being carried 977 * out after we have woken up the device. 978 */ 979 pm_runtime_get_noresume(dev); 980 981 __pm_runtime_resume(dev, false); 982 983 pm_runtime_put_noidle(dev); 984 } 985 986 if (!dev->power.disable_depth++) 987 __pm_runtime_barrier(dev); 988 989 out: 990 spin_unlock_irq(&dev->power.lock); 991 } 992 EXPORT_SYMBOL_GPL(__pm_runtime_disable); 993 994 /** 995 * pm_runtime_enable - Enable run-time PM of a device. 996 * @dev: Device to handle. 997 */ 998 void pm_runtime_enable(struct device *dev) 999 { 1000 unsigned long flags; 1001 1002 spin_lock_irqsave(&dev->power.lock, flags); 1003 1004 if (dev->power.disable_depth > 0) 1005 dev->power.disable_depth--; 1006 else 1007 dev_warn(dev, "Unbalanced %s!\n", __func__); 1008 1009 spin_unlock_irqrestore(&dev->power.lock, flags); 1010 } 1011 EXPORT_SYMBOL_GPL(pm_runtime_enable); 1012 1013 /** 1014 * pm_runtime_init - Initialize run-time PM fields in given device object. 1015 * @dev: Device object to initialize. 1016 */ 1017 void pm_runtime_init(struct device *dev) 1018 { 1019 spin_lock_init(&dev->power.lock); 1020 1021 dev->power.runtime_status = RPM_SUSPENDED; 1022 dev->power.idle_notification = false; 1023 1024 dev->power.disable_depth = 1; 1025 atomic_set(&dev->power.usage_count, 0); 1026 1027 dev->power.runtime_error = 0; 1028 1029 atomic_set(&dev->power.child_count, 0); 1030 pm_suspend_ignore_children(dev, false); 1031 1032 dev->power.request_pending = false; 1033 dev->power.request = RPM_REQ_NONE; 1034 dev->power.deferred_resume = false; 1035 INIT_WORK(&dev->power.work, pm_runtime_work); 1036 1037 dev->power.timer_expires = 0; 1038 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, 1039 (unsigned long)dev); 1040 1041 init_waitqueue_head(&dev->power.wait_queue); 1042 } 1043 1044 /** 1045 * pm_runtime_remove - Prepare for removing a device from device hierarchy. 1046 * @dev: Device object being removed from device hierarchy. 1047 */ 1048 void pm_runtime_remove(struct device *dev) 1049 { 1050 __pm_runtime_disable(dev, false); 1051 1052 /* Change the status back to 'suspended' to match the initial status. */ 1053 if (dev->power.runtime_status == RPM_ACTIVE) 1054 pm_runtime_set_suspended(dev); 1055 } 1056