1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/pm_domain.h> 15 #include <linux/pm_qos.h> 16 #include <linux/pm_clock.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 #include <linux/sched.h> 20 #include <linux/suspend.h> 21 #include <linux/export.h> 22 23 #include "power.h" 24 25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 26 27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 28 ({ \ 29 type (*__routine)(struct device *__d); \ 30 type __ret = (type)0; \ 31 \ 32 __routine = genpd->dev_ops.callback; \ 33 if (__routine) { \ 34 __ret = __routine(dev); \ 35 } \ 36 __ret; \ 37 }) 38 39 static LIST_HEAD(gpd_list); 40 static DEFINE_MUTEX(gpd_list_lock); 41 42 /* 43 * Get the generic PM domain for a particular struct device. 44 * This validates the struct device pointer, the PM domain pointer, 45 * and checks that the PM domain pointer is a real generic PM domain. 46 * Any failure results in NULL being returned. 47 */ 48 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) 49 { 50 struct generic_pm_domain *genpd = NULL, *gpd; 51 52 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 53 return NULL; 54 55 mutex_lock(&gpd_list_lock); 56 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 57 if (&gpd->domain == dev->pm_domain) { 58 genpd = gpd; 59 break; 60 } 61 } 62 mutex_unlock(&gpd_list_lock); 63 64 return genpd; 65 } 66 67 /* 68 * This should only be used where we are certain that the pm_domain 69 * attached to the device is a genpd domain. 70 */ 71 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 72 { 73 if (IS_ERR_OR_NULL(dev->pm_domain)) 74 return ERR_PTR(-EINVAL); 75 76 return pd_to_genpd(dev->pm_domain); 77 } 78 79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 80 { 81 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 82 } 83 84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 85 { 86 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 87 } 88 89 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 90 { 91 bool ret = false; 92 93 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 94 ret = !!atomic_dec_and_test(&genpd->sd_count); 95 96 return ret; 97 } 98 99 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 100 { 101 atomic_inc(&genpd->sd_count); 102 smp_mb__after_atomic(); 103 } 104 105 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) 106 { 107 ktime_t time_start; 108 s64 elapsed_ns; 109 int ret; 110 111 if (!genpd->power_on) 112 return 0; 113 114 if (!timed) 115 return genpd->power_on(genpd); 116 117 time_start = ktime_get(); 118 ret = genpd->power_on(genpd); 119 if (ret) 120 return ret; 121 122 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 123 if (elapsed_ns <= genpd->power_on_latency_ns) 124 return ret; 125 126 genpd->power_on_latency_ns = elapsed_ns; 127 genpd->max_off_time_changed = true; 128 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 129 genpd->name, "on", elapsed_ns); 130 131 return ret; 132 } 133 134 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) 135 { 136 ktime_t time_start; 137 s64 elapsed_ns; 138 int ret; 139 140 if (!genpd->power_off) 141 return 0; 142 143 if (!timed) 144 return genpd->power_off(genpd); 145 146 time_start = ktime_get(); 147 ret = genpd->power_off(genpd); 148 if (ret == -EBUSY) 149 return ret; 150 151 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 152 if (elapsed_ns <= genpd->power_off_latency_ns) 153 return ret; 154 155 genpd->power_off_latency_ns = elapsed_ns; 156 genpd->max_off_time_changed = true; 157 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 158 genpd->name, "off", elapsed_ns); 159 160 return ret; 161 } 162 163 /** 164 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). 165 * @genpd: PM domain to power off. 166 * 167 * Queue up the execution of genpd_poweroff() unless it's already been done 168 * before. 169 */ 170 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 171 { 172 queue_work(pm_wq, &genpd->power_off_work); 173 } 174 175 /** 176 * genpd_poweron - Restore power to a given PM domain and its masters. 177 * @genpd: PM domain to power up. 178 * @depth: nesting count for lockdep. 179 * 180 * Restore power to @genpd and all of its masters so that it is possible to 181 * resume a device belonging to it. 182 */ 183 static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) 184 { 185 struct gpd_link *link; 186 int ret = 0; 187 188 if (genpd->status == GPD_STATE_ACTIVE 189 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 190 return 0; 191 192 /* 193 * The list is guaranteed not to change while the loop below is being 194 * executed, unless one of the masters' .power_on() callbacks fiddles 195 * with it. 196 */ 197 list_for_each_entry(link, &genpd->slave_links, slave_node) { 198 struct generic_pm_domain *master = link->master; 199 200 genpd_sd_counter_inc(master); 201 202 mutex_lock_nested(&master->lock, depth + 1); 203 ret = genpd_poweron(master, depth + 1); 204 mutex_unlock(&master->lock); 205 206 if (ret) { 207 genpd_sd_counter_dec(master); 208 goto err; 209 } 210 } 211 212 ret = genpd_power_on(genpd, true); 213 if (ret) 214 goto err; 215 216 genpd->status = GPD_STATE_ACTIVE; 217 return 0; 218 219 err: 220 list_for_each_entry_continue_reverse(link, 221 &genpd->slave_links, 222 slave_node) { 223 genpd_sd_counter_dec(link->master); 224 genpd_queue_power_off_work(link->master); 225 } 226 227 return ret; 228 } 229 230 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 231 { 232 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); 233 } 234 235 static int genpd_restore_dev(struct generic_pm_domain *genpd, 236 struct device *dev) 237 { 238 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); 239 } 240 241 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 242 unsigned long val, void *ptr) 243 { 244 struct generic_pm_domain_data *gpd_data; 245 struct device *dev; 246 247 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 248 dev = gpd_data->base.dev; 249 250 for (;;) { 251 struct generic_pm_domain *genpd; 252 struct pm_domain_data *pdd; 253 254 spin_lock_irq(&dev->power.lock); 255 256 pdd = dev->power.subsys_data ? 257 dev->power.subsys_data->domain_data : NULL; 258 if (pdd && pdd->dev) { 259 to_gpd_data(pdd)->td.constraint_changed = true; 260 genpd = dev_to_genpd(dev); 261 } else { 262 genpd = ERR_PTR(-ENODATA); 263 } 264 265 spin_unlock_irq(&dev->power.lock); 266 267 if (!IS_ERR(genpd)) { 268 mutex_lock(&genpd->lock); 269 genpd->max_off_time_changed = true; 270 mutex_unlock(&genpd->lock); 271 } 272 273 dev = dev->parent; 274 if (!dev || dev->power.ignore_children) 275 break; 276 } 277 278 return NOTIFY_DONE; 279 } 280 281 /** 282 * genpd_poweroff - Remove power from a given PM domain. 283 * @genpd: PM domain to power down. 284 * @is_async: PM domain is powered down from a scheduled work 285 * 286 * If all of the @genpd's devices have been suspended and all of its subdomains 287 * have been powered down, remove power from @genpd. 288 */ 289 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) 290 { 291 struct pm_domain_data *pdd; 292 struct gpd_link *link; 293 unsigned int not_suspended = 0; 294 295 /* 296 * Do not try to power off the domain in the following situations: 297 * (1) The domain is already in the "power off" state. 298 * (2) System suspend is in progress. 299 */ 300 if (genpd->status == GPD_STATE_POWER_OFF 301 || genpd->prepared_count > 0) 302 return 0; 303 304 if (atomic_read(&genpd->sd_count) > 0) 305 return -EBUSY; 306 307 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 308 enum pm_qos_flags_status stat; 309 310 stat = dev_pm_qos_flags(pdd->dev, 311 PM_QOS_FLAG_NO_POWER_OFF 312 | PM_QOS_FLAG_REMOTE_WAKEUP); 313 if (stat > PM_QOS_FLAGS_NONE) 314 return -EBUSY; 315 316 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) 317 not_suspended++; 318 } 319 320 if (not_suspended > 1 || (not_suspended == 1 && is_async)) 321 return -EBUSY; 322 323 if (genpd->gov && genpd->gov->power_down_ok) { 324 if (!genpd->gov->power_down_ok(&genpd->domain)) 325 return -EAGAIN; 326 } 327 328 if (genpd->power_off) { 329 int ret; 330 331 if (atomic_read(&genpd->sd_count) > 0) 332 return -EBUSY; 333 334 /* 335 * If sd_count > 0 at this point, one of the subdomains hasn't 336 * managed to call genpd_poweron() for the master yet after 337 * incrementing it. In that case genpd_poweron() will wait 338 * for us to drop the lock, so we can call .power_off() and let 339 * the genpd_poweron() restore power for us (this shouldn't 340 * happen very often). 341 */ 342 ret = genpd_power_off(genpd, true); 343 if (ret) 344 return ret; 345 } 346 347 genpd->status = GPD_STATE_POWER_OFF; 348 349 list_for_each_entry(link, &genpd->slave_links, slave_node) { 350 genpd_sd_counter_dec(link->master); 351 genpd_queue_power_off_work(link->master); 352 } 353 354 return 0; 355 } 356 357 /** 358 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 359 * @work: Work structure used for scheduling the execution of this function. 360 */ 361 static void genpd_power_off_work_fn(struct work_struct *work) 362 { 363 struct generic_pm_domain *genpd; 364 365 genpd = container_of(work, struct generic_pm_domain, power_off_work); 366 367 mutex_lock(&genpd->lock); 368 genpd_poweroff(genpd, true); 369 mutex_unlock(&genpd->lock); 370 } 371 372 /** 373 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 374 * @dev: Device to suspend. 375 * 376 * Carry out a runtime suspend of a device under the assumption that its 377 * pm_domain field points to the domain member of an object of type 378 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 379 */ 380 static int pm_genpd_runtime_suspend(struct device *dev) 381 { 382 struct generic_pm_domain *genpd; 383 bool (*stop_ok)(struct device *__dev); 384 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 385 bool runtime_pm = pm_runtime_enabled(dev); 386 ktime_t time_start; 387 s64 elapsed_ns; 388 int ret; 389 390 dev_dbg(dev, "%s()\n", __func__); 391 392 genpd = dev_to_genpd(dev); 393 if (IS_ERR(genpd)) 394 return -EINVAL; 395 396 /* 397 * A runtime PM centric subsystem/driver may re-use the runtime PM 398 * callbacks for other purposes than runtime PM. In those scenarios 399 * runtime PM is disabled. Under these circumstances, we shall skip 400 * validating/measuring the PM QoS latency. 401 */ 402 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 403 if (runtime_pm && stop_ok && !stop_ok(dev)) 404 return -EBUSY; 405 406 /* Measure suspend latency. */ 407 if (runtime_pm) 408 time_start = ktime_get(); 409 410 ret = genpd_save_dev(genpd, dev); 411 if (ret) 412 return ret; 413 414 ret = genpd_stop_dev(genpd, dev); 415 if (ret) { 416 genpd_restore_dev(genpd, dev); 417 return ret; 418 } 419 420 /* Update suspend latency value if the measured time exceeds it. */ 421 if (runtime_pm) { 422 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 423 if (elapsed_ns > td->suspend_latency_ns) { 424 td->suspend_latency_ns = elapsed_ns; 425 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 426 elapsed_ns); 427 genpd->max_off_time_changed = true; 428 td->constraint_changed = true; 429 } 430 } 431 432 /* 433 * If power.irq_safe is set, this routine will be run with interrupts 434 * off, so it can't use mutexes. 435 */ 436 if (dev->power.irq_safe) 437 return 0; 438 439 mutex_lock(&genpd->lock); 440 genpd_poweroff(genpd, false); 441 mutex_unlock(&genpd->lock); 442 443 return 0; 444 } 445 446 /** 447 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 448 * @dev: Device to resume. 449 * 450 * Carry out a runtime resume of a device under the assumption that its 451 * pm_domain field points to the domain member of an object of type 452 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 453 */ 454 static int pm_genpd_runtime_resume(struct device *dev) 455 { 456 struct generic_pm_domain *genpd; 457 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 458 bool runtime_pm = pm_runtime_enabled(dev); 459 ktime_t time_start; 460 s64 elapsed_ns; 461 int ret; 462 bool timed = true; 463 464 dev_dbg(dev, "%s()\n", __func__); 465 466 genpd = dev_to_genpd(dev); 467 if (IS_ERR(genpd)) 468 return -EINVAL; 469 470 /* If power.irq_safe, the PM domain is never powered off. */ 471 if (dev->power.irq_safe) { 472 timed = false; 473 goto out; 474 } 475 476 mutex_lock(&genpd->lock); 477 ret = genpd_poweron(genpd, 0); 478 mutex_unlock(&genpd->lock); 479 480 if (ret) 481 return ret; 482 483 out: 484 /* Measure resume latency. */ 485 if (timed && runtime_pm) 486 time_start = ktime_get(); 487 488 genpd_start_dev(genpd, dev); 489 genpd_restore_dev(genpd, dev); 490 491 /* Update resume latency value if the measured time exceeds it. */ 492 if (timed && runtime_pm) { 493 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 494 if (elapsed_ns > td->resume_latency_ns) { 495 td->resume_latency_ns = elapsed_ns; 496 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 497 elapsed_ns); 498 genpd->max_off_time_changed = true; 499 td->constraint_changed = true; 500 } 501 } 502 503 return 0; 504 } 505 506 static bool pd_ignore_unused; 507 static int __init pd_ignore_unused_setup(char *__unused) 508 { 509 pd_ignore_unused = true; 510 return 1; 511 } 512 __setup("pd_ignore_unused", pd_ignore_unused_setup); 513 514 /** 515 * genpd_poweroff_unused - Power off all PM domains with no devices in use. 516 */ 517 static int __init genpd_poweroff_unused(void) 518 { 519 struct generic_pm_domain *genpd; 520 521 if (pd_ignore_unused) { 522 pr_warn("genpd: Not disabling unused power domains\n"); 523 return 0; 524 } 525 526 mutex_lock(&gpd_list_lock); 527 528 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 529 genpd_queue_power_off_work(genpd); 530 531 mutex_unlock(&gpd_list_lock); 532 533 return 0; 534 } 535 late_initcall(genpd_poweroff_unused); 536 537 #ifdef CONFIG_PM_SLEEP 538 539 /** 540 * pm_genpd_present - Check if the given PM domain has been initialized. 541 * @genpd: PM domain to check. 542 */ 543 static bool pm_genpd_present(const struct generic_pm_domain *genpd) 544 { 545 const struct generic_pm_domain *gpd; 546 547 if (IS_ERR_OR_NULL(genpd)) 548 return false; 549 550 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 551 if (gpd == genpd) 552 return true; 553 554 return false; 555 } 556 557 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 558 struct device *dev) 559 { 560 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 561 } 562 563 /** 564 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 565 * @genpd: PM domain to power off, if possible. 566 * @timed: True if latency measurements are allowed. 567 * 568 * Check if the given PM domain can be powered off (during system suspend or 569 * hibernation) and do that if so. Also, in that case propagate to its masters. 570 * 571 * This function is only called in "noirq" and "syscore" stages of system power 572 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 573 * executed sequentially, so it is guaranteed that it will never run twice in 574 * parallel). 575 */ 576 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, 577 bool timed) 578 { 579 struct gpd_link *link; 580 581 if (genpd->status == GPD_STATE_POWER_OFF) 582 return; 583 584 if (genpd->suspended_count != genpd->device_count 585 || atomic_read(&genpd->sd_count) > 0) 586 return; 587 588 genpd_power_off(genpd, timed); 589 590 genpd->status = GPD_STATE_POWER_OFF; 591 592 list_for_each_entry(link, &genpd->slave_links, slave_node) { 593 genpd_sd_counter_dec(link->master); 594 pm_genpd_sync_poweroff(link->master, timed); 595 } 596 } 597 598 /** 599 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 600 * @genpd: PM domain to power on. 601 * @timed: True if latency measurements are allowed. 602 * 603 * This function is only called in "noirq" and "syscore" stages of system power 604 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 605 * executed sequentially, so it is guaranteed that it will never run twice in 606 * parallel). 607 */ 608 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, 609 bool timed) 610 { 611 struct gpd_link *link; 612 613 if (genpd->status == GPD_STATE_ACTIVE) 614 return; 615 616 list_for_each_entry(link, &genpd->slave_links, slave_node) { 617 pm_genpd_sync_poweron(link->master, timed); 618 genpd_sd_counter_inc(link->master); 619 } 620 621 genpd_power_on(genpd, timed); 622 623 genpd->status = GPD_STATE_ACTIVE; 624 } 625 626 /** 627 * resume_needed - Check whether to resume a device before system suspend. 628 * @dev: Device to check. 629 * @genpd: PM domain the device belongs to. 630 * 631 * There are two cases in which a device that can wake up the system from sleep 632 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 633 * to wake up the system and it has to remain active for this purpose while the 634 * system is in the sleep state and (2) if the device is not enabled to wake up 635 * the system from sleep states and it generally doesn't generate wakeup signals 636 * by itself (those signals are generated on its behalf by other parts of the 637 * system). In the latter case it may be necessary to reconfigure the device's 638 * wakeup settings during system suspend, because it may have been set up to 639 * signal remote wakeup from the system's working state as needed by runtime PM. 640 * Return 'true' in either of the above cases. 641 */ 642 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 643 { 644 bool active_wakeup; 645 646 if (!device_can_wakeup(dev)) 647 return false; 648 649 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 650 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 651 } 652 653 /** 654 * pm_genpd_prepare - Start power transition of a device in a PM domain. 655 * @dev: Device to start the transition of. 656 * 657 * Start a power transition of a device (during a system-wide power transition) 658 * under the assumption that its pm_domain field points to the domain member of 659 * an object of type struct generic_pm_domain representing a PM domain 660 * consisting of I/O devices. 661 */ 662 static int pm_genpd_prepare(struct device *dev) 663 { 664 struct generic_pm_domain *genpd; 665 int ret; 666 667 dev_dbg(dev, "%s()\n", __func__); 668 669 genpd = dev_to_genpd(dev); 670 if (IS_ERR(genpd)) 671 return -EINVAL; 672 673 /* 674 * If a wakeup request is pending for the device, it should be woken up 675 * at this point and a system wakeup event should be reported if it's 676 * set up to wake up the system from sleep states. 677 */ 678 pm_runtime_get_noresume(dev); 679 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 680 pm_wakeup_event(dev, 0); 681 682 if (pm_wakeup_pending()) { 683 pm_runtime_put(dev); 684 return -EBUSY; 685 } 686 687 if (resume_needed(dev, genpd)) 688 pm_runtime_resume(dev); 689 690 mutex_lock(&genpd->lock); 691 692 if (genpd->prepared_count++ == 0) { 693 genpd->suspended_count = 0; 694 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 695 } 696 697 mutex_unlock(&genpd->lock); 698 699 if (genpd->suspend_power_off) { 700 pm_runtime_put_noidle(dev); 701 return 0; 702 } 703 704 /* 705 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 706 * so genpd_poweron() will return immediately, but if the device 707 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 708 * to make it operational. 709 */ 710 pm_runtime_resume(dev); 711 __pm_runtime_disable(dev, false); 712 713 ret = pm_generic_prepare(dev); 714 if (ret) { 715 mutex_lock(&genpd->lock); 716 717 if (--genpd->prepared_count == 0) 718 genpd->suspend_power_off = false; 719 720 mutex_unlock(&genpd->lock); 721 pm_runtime_enable(dev); 722 } 723 724 pm_runtime_put(dev); 725 return ret; 726 } 727 728 /** 729 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 730 * @dev: Device to suspend. 731 * 732 * Suspend a device under the assumption that its pm_domain field points to the 733 * domain member of an object of type struct generic_pm_domain representing 734 * a PM domain consisting of I/O devices. 735 */ 736 static int pm_genpd_suspend(struct device *dev) 737 { 738 struct generic_pm_domain *genpd; 739 740 dev_dbg(dev, "%s()\n", __func__); 741 742 genpd = dev_to_genpd(dev); 743 if (IS_ERR(genpd)) 744 return -EINVAL; 745 746 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 747 } 748 749 /** 750 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 751 * @dev: Device to suspend. 752 * 753 * Carry out a late suspend of a device under the assumption that its 754 * pm_domain field points to the domain member of an object of type 755 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 756 */ 757 static int pm_genpd_suspend_late(struct device *dev) 758 { 759 struct generic_pm_domain *genpd; 760 761 dev_dbg(dev, "%s()\n", __func__); 762 763 genpd = dev_to_genpd(dev); 764 if (IS_ERR(genpd)) 765 return -EINVAL; 766 767 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 768 } 769 770 /** 771 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 772 * @dev: Device to suspend. 773 * 774 * Stop the device and remove power from the domain if all devices in it have 775 * been stopped. 776 */ 777 static int pm_genpd_suspend_noirq(struct device *dev) 778 { 779 struct generic_pm_domain *genpd; 780 781 dev_dbg(dev, "%s()\n", __func__); 782 783 genpd = dev_to_genpd(dev); 784 if (IS_ERR(genpd)) 785 return -EINVAL; 786 787 if (genpd->suspend_power_off 788 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 789 return 0; 790 791 genpd_stop_dev(genpd, dev); 792 793 /* 794 * Since all of the "noirq" callbacks are executed sequentially, it is 795 * guaranteed that this function will never run twice in parallel for 796 * the same PM domain, so it is not necessary to use locking here. 797 */ 798 genpd->suspended_count++; 799 pm_genpd_sync_poweroff(genpd, true); 800 801 return 0; 802 } 803 804 /** 805 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 806 * @dev: Device to resume. 807 * 808 * Restore power to the device's PM domain, if necessary, and start the device. 809 */ 810 static int pm_genpd_resume_noirq(struct device *dev) 811 { 812 struct generic_pm_domain *genpd; 813 814 dev_dbg(dev, "%s()\n", __func__); 815 816 genpd = dev_to_genpd(dev); 817 if (IS_ERR(genpd)) 818 return -EINVAL; 819 820 if (genpd->suspend_power_off 821 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 822 return 0; 823 824 /* 825 * Since all of the "noirq" callbacks are executed sequentially, it is 826 * guaranteed that this function will never run twice in parallel for 827 * the same PM domain, so it is not necessary to use locking here. 828 */ 829 pm_genpd_sync_poweron(genpd, true); 830 genpd->suspended_count--; 831 832 return genpd_start_dev(genpd, dev); 833 } 834 835 /** 836 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 837 * @dev: Device to resume. 838 * 839 * Carry out an early resume of a device under the assumption that its 840 * pm_domain field points to the domain member of an object of type 841 * struct generic_pm_domain representing a power domain consisting of I/O 842 * devices. 843 */ 844 static int pm_genpd_resume_early(struct device *dev) 845 { 846 struct generic_pm_domain *genpd; 847 848 dev_dbg(dev, "%s()\n", __func__); 849 850 genpd = dev_to_genpd(dev); 851 if (IS_ERR(genpd)) 852 return -EINVAL; 853 854 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 855 } 856 857 /** 858 * pm_genpd_resume - Resume of device in an I/O PM domain. 859 * @dev: Device to resume. 860 * 861 * Resume a device under the assumption that its pm_domain field points to the 862 * domain member of an object of type struct generic_pm_domain representing 863 * a power domain consisting of I/O devices. 864 */ 865 static int pm_genpd_resume(struct device *dev) 866 { 867 struct generic_pm_domain *genpd; 868 869 dev_dbg(dev, "%s()\n", __func__); 870 871 genpd = dev_to_genpd(dev); 872 if (IS_ERR(genpd)) 873 return -EINVAL; 874 875 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 876 } 877 878 /** 879 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 880 * @dev: Device to freeze. 881 * 882 * Freeze a device under the assumption that its pm_domain field points to the 883 * domain member of an object of type struct generic_pm_domain representing 884 * a power domain consisting of I/O devices. 885 */ 886 static int pm_genpd_freeze(struct device *dev) 887 { 888 struct generic_pm_domain *genpd; 889 890 dev_dbg(dev, "%s()\n", __func__); 891 892 genpd = dev_to_genpd(dev); 893 if (IS_ERR(genpd)) 894 return -EINVAL; 895 896 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 897 } 898 899 /** 900 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 901 * @dev: Device to freeze. 902 * 903 * Carry out a late freeze of a device under the assumption that its 904 * pm_domain field points to the domain member of an object of type 905 * struct generic_pm_domain representing a power domain consisting of I/O 906 * devices. 907 */ 908 static int pm_genpd_freeze_late(struct device *dev) 909 { 910 struct generic_pm_domain *genpd; 911 912 dev_dbg(dev, "%s()\n", __func__); 913 914 genpd = dev_to_genpd(dev); 915 if (IS_ERR(genpd)) 916 return -EINVAL; 917 918 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 919 } 920 921 /** 922 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 923 * @dev: Device to freeze. 924 * 925 * Carry out a late freeze of a device under the assumption that its 926 * pm_domain field points to the domain member of an object of type 927 * struct generic_pm_domain representing a power domain consisting of I/O 928 * devices. 929 */ 930 static int pm_genpd_freeze_noirq(struct device *dev) 931 { 932 struct generic_pm_domain *genpd; 933 934 dev_dbg(dev, "%s()\n", __func__); 935 936 genpd = dev_to_genpd(dev); 937 if (IS_ERR(genpd)) 938 return -EINVAL; 939 940 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 941 } 942 943 /** 944 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 945 * @dev: Device to thaw. 946 * 947 * Start the device, unless power has been removed from the domain already 948 * before the system transition. 949 */ 950 static int pm_genpd_thaw_noirq(struct device *dev) 951 { 952 struct generic_pm_domain *genpd; 953 954 dev_dbg(dev, "%s()\n", __func__); 955 956 genpd = dev_to_genpd(dev); 957 if (IS_ERR(genpd)) 958 return -EINVAL; 959 960 return genpd->suspend_power_off ? 961 0 : genpd_start_dev(genpd, dev); 962 } 963 964 /** 965 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 966 * @dev: Device to thaw. 967 * 968 * Carry out an early thaw of a device under the assumption that its 969 * pm_domain field points to the domain member of an object of type 970 * struct generic_pm_domain representing a power domain consisting of I/O 971 * devices. 972 */ 973 static int pm_genpd_thaw_early(struct device *dev) 974 { 975 struct generic_pm_domain *genpd; 976 977 dev_dbg(dev, "%s()\n", __func__); 978 979 genpd = dev_to_genpd(dev); 980 if (IS_ERR(genpd)) 981 return -EINVAL; 982 983 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 984 } 985 986 /** 987 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 988 * @dev: Device to thaw. 989 * 990 * Thaw a device under the assumption that its pm_domain field points to the 991 * domain member of an object of type struct generic_pm_domain representing 992 * a power domain consisting of I/O devices. 993 */ 994 static int pm_genpd_thaw(struct device *dev) 995 { 996 struct generic_pm_domain *genpd; 997 998 dev_dbg(dev, "%s()\n", __func__); 999 1000 genpd = dev_to_genpd(dev); 1001 if (IS_ERR(genpd)) 1002 return -EINVAL; 1003 1004 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1005 } 1006 1007 /** 1008 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1009 * @dev: Device to resume. 1010 * 1011 * Make sure the domain will be in the same power state as before the 1012 * hibernation the system is resuming from and start the device if necessary. 1013 */ 1014 static int pm_genpd_restore_noirq(struct device *dev) 1015 { 1016 struct generic_pm_domain *genpd; 1017 1018 dev_dbg(dev, "%s()\n", __func__); 1019 1020 genpd = dev_to_genpd(dev); 1021 if (IS_ERR(genpd)) 1022 return -EINVAL; 1023 1024 /* 1025 * Since all of the "noirq" callbacks are executed sequentially, it is 1026 * guaranteed that this function will never run twice in parallel for 1027 * the same PM domain, so it is not necessary to use locking here. 1028 * 1029 * At this point suspended_count == 0 means we are being run for the 1030 * first time for the given domain in the present cycle. 1031 */ 1032 if (genpd->suspended_count++ == 0) { 1033 /* 1034 * The boot kernel might put the domain into arbitrary state, 1035 * so make it appear as powered off to pm_genpd_sync_poweron(), 1036 * so that it tries to power it on in case it was really off. 1037 */ 1038 genpd->status = GPD_STATE_POWER_OFF; 1039 if (genpd->suspend_power_off) { 1040 /* 1041 * If the domain was off before the hibernation, make 1042 * sure it will be off going forward. 1043 */ 1044 genpd_power_off(genpd, true); 1045 1046 return 0; 1047 } 1048 } 1049 1050 if (genpd->suspend_power_off) 1051 return 0; 1052 1053 pm_genpd_sync_poweron(genpd, true); 1054 1055 return genpd_start_dev(genpd, dev); 1056 } 1057 1058 /** 1059 * pm_genpd_complete - Complete power transition of a device in a power domain. 1060 * @dev: Device to complete the transition of. 1061 * 1062 * Complete a power transition of a device (during a system-wide power 1063 * transition) under the assumption that its pm_domain field points to the 1064 * domain member of an object of type struct generic_pm_domain representing 1065 * a power domain consisting of I/O devices. 1066 */ 1067 static void pm_genpd_complete(struct device *dev) 1068 { 1069 struct generic_pm_domain *genpd; 1070 bool run_complete; 1071 1072 dev_dbg(dev, "%s()\n", __func__); 1073 1074 genpd = dev_to_genpd(dev); 1075 if (IS_ERR(genpd)) 1076 return; 1077 1078 mutex_lock(&genpd->lock); 1079 1080 run_complete = !genpd->suspend_power_off; 1081 if (--genpd->prepared_count == 0) 1082 genpd->suspend_power_off = false; 1083 1084 mutex_unlock(&genpd->lock); 1085 1086 if (run_complete) { 1087 pm_generic_complete(dev); 1088 pm_runtime_set_active(dev); 1089 pm_runtime_enable(dev); 1090 pm_request_idle(dev); 1091 } 1092 } 1093 1094 /** 1095 * genpd_syscore_switch - Switch power during system core suspend or resume. 1096 * @dev: Device that normally is marked as "always on" to switch power for. 1097 * 1098 * This routine may only be called during the system core (syscore) suspend or 1099 * resume phase for devices whose "always on" flags are set. 1100 */ 1101 static void genpd_syscore_switch(struct device *dev, bool suspend) 1102 { 1103 struct generic_pm_domain *genpd; 1104 1105 genpd = dev_to_genpd(dev); 1106 if (!pm_genpd_present(genpd)) 1107 return; 1108 1109 if (suspend) { 1110 genpd->suspended_count++; 1111 pm_genpd_sync_poweroff(genpd, false); 1112 } else { 1113 pm_genpd_sync_poweron(genpd, false); 1114 genpd->suspended_count--; 1115 } 1116 } 1117 1118 void pm_genpd_syscore_poweroff(struct device *dev) 1119 { 1120 genpd_syscore_switch(dev, true); 1121 } 1122 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1123 1124 void pm_genpd_syscore_poweron(struct device *dev) 1125 { 1126 genpd_syscore_switch(dev, false); 1127 } 1128 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1129 1130 #else /* !CONFIG_PM_SLEEP */ 1131 1132 #define pm_genpd_prepare NULL 1133 #define pm_genpd_suspend NULL 1134 #define pm_genpd_suspend_late NULL 1135 #define pm_genpd_suspend_noirq NULL 1136 #define pm_genpd_resume_early NULL 1137 #define pm_genpd_resume_noirq NULL 1138 #define pm_genpd_resume NULL 1139 #define pm_genpd_freeze NULL 1140 #define pm_genpd_freeze_late NULL 1141 #define pm_genpd_freeze_noirq NULL 1142 #define pm_genpd_thaw_early NULL 1143 #define pm_genpd_thaw_noirq NULL 1144 #define pm_genpd_thaw NULL 1145 #define pm_genpd_restore_noirq NULL 1146 #define pm_genpd_complete NULL 1147 1148 #endif /* CONFIG_PM_SLEEP */ 1149 1150 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1151 struct generic_pm_domain *genpd, 1152 struct gpd_timing_data *td) 1153 { 1154 struct generic_pm_domain_data *gpd_data; 1155 int ret; 1156 1157 ret = dev_pm_get_subsys_data(dev); 1158 if (ret) 1159 return ERR_PTR(ret); 1160 1161 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1162 if (!gpd_data) { 1163 ret = -ENOMEM; 1164 goto err_put; 1165 } 1166 1167 if (td) 1168 gpd_data->td = *td; 1169 1170 gpd_data->base.dev = dev; 1171 gpd_data->td.constraint_changed = true; 1172 gpd_data->td.effective_constraint_ns = -1; 1173 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1174 1175 spin_lock_irq(&dev->power.lock); 1176 1177 if (dev->power.subsys_data->domain_data) { 1178 ret = -EINVAL; 1179 goto err_free; 1180 } 1181 1182 dev->power.subsys_data->domain_data = &gpd_data->base; 1183 1184 spin_unlock_irq(&dev->power.lock); 1185 1186 dev_pm_domain_set(dev, &genpd->domain); 1187 1188 return gpd_data; 1189 1190 err_free: 1191 spin_unlock_irq(&dev->power.lock); 1192 kfree(gpd_data); 1193 err_put: 1194 dev_pm_put_subsys_data(dev); 1195 return ERR_PTR(ret); 1196 } 1197 1198 static void genpd_free_dev_data(struct device *dev, 1199 struct generic_pm_domain_data *gpd_data) 1200 { 1201 dev_pm_domain_set(dev, NULL); 1202 1203 spin_lock_irq(&dev->power.lock); 1204 1205 dev->power.subsys_data->domain_data = NULL; 1206 1207 spin_unlock_irq(&dev->power.lock); 1208 1209 kfree(gpd_data); 1210 dev_pm_put_subsys_data(dev); 1211 } 1212 1213 /** 1214 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1215 * @genpd: PM domain to add the device to. 1216 * @dev: Device to be added. 1217 * @td: Set of PM QoS timing parameters to attach to the device. 1218 */ 1219 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1220 struct gpd_timing_data *td) 1221 { 1222 struct generic_pm_domain_data *gpd_data; 1223 int ret = 0; 1224 1225 dev_dbg(dev, "%s()\n", __func__); 1226 1227 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1228 return -EINVAL; 1229 1230 gpd_data = genpd_alloc_dev_data(dev, genpd, td); 1231 if (IS_ERR(gpd_data)) 1232 return PTR_ERR(gpd_data); 1233 1234 mutex_lock(&genpd->lock); 1235 1236 if (genpd->prepared_count > 0) { 1237 ret = -EAGAIN; 1238 goto out; 1239 } 1240 1241 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1242 if (ret) 1243 goto out; 1244 1245 genpd->device_count++; 1246 genpd->max_off_time_changed = true; 1247 1248 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1249 1250 out: 1251 mutex_unlock(&genpd->lock); 1252 1253 if (ret) 1254 genpd_free_dev_data(dev, gpd_data); 1255 else 1256 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1257 1258 return ret; 1259 } 1260 EXPORT_SYMBOL_GPL(__pm_genpd_add_device); 1261 1262 /** 1263 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1264 * @genpd: PM domain to remove the device from. 1265 * @dev: Device to be removed. 1266 */ 1267 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1268 struct device *dev) 1269 { 1270 struct generic_pm_domain_data *gpd_data; 1271 struct pm_domain_data *pdd; 1272 int ret = 0; 1273 1274 dev_dbg(dev, "%s()\n", __func__); 1275 1276 if (!genpd || genpd != pm_genpd_lookup_dev(dev)) 1277 return -EINVAL; 1278 1279 /* The above validation also means we have existing domain_data. */ 1280 pdd = dev->power.subsys_data->domain_data; 1281 gpd_data = to_gpd_data(pdd); 1282 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1283 1284 mutex_lock(&genpd->lock); 1285 1286 if (genpd->prepared_count > 0) { 1287 ret = -EAGAIN; 1288 goto out; 1289 } 1290 1291 genpd->device_count--; 1292 genpd->max_off_time_changed = true; 1293 1294 if (genpd->detach_dev) 1295 genpd->detach_dev(genpd, dev); 1296 1297 list_del_init(&pdd->list_node); 1298 1299 mutex_unlock(&genpd->lock); 1300 1301 genpd_free_dev_data(dev, gpd_data); 1302 1303 return 0; 1304 1305 out: 1306 mutex_unlock(&genpd->lock); 1307 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1308 1309 return ret; 1310 } 1311 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1312 1313 /** 1314 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1315 * @genpd: Master PM domain to add the subdomain to. 1316 * @subdomain: Subdomain to be added. 1317 */ 1318 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1319 struct generic_pm_domain *subdomain) 1320 { 1321 struct gpd_link *link, *itr; 1322 int ret = 0; 1323 1324 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1325 || genpd == subdomain) 1326 return -EINVAL; 1327 1328 link = kzalloc(sizeof(*link), GFP_KERNEL); 1329 if (!link) 1330 return -ENOMEM; 1331 1332 mutex_lock(&subdomain->lock); 1333 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 1334 1335 if (genpd->status == GPD_STATE_POWER_OFF 1336 && subdomain->status != GPD_STATE_POWER_OFF) { 1337 ret = -EINVAL; 1338 goto out; 1339 } 1340 1341 list_for_each_entry(itr, &genpd->master_links, master_node) { 1342 if (itr->slave == subdomain && itr->master == genpd) { 1343 ret = -EINVAL; 1344 goto out; 1345 } 1346 } 1347 1348 link->master = genpd; 1349 list_add_tail(&link->master_node, &genpd->master_links); 1350 link->slave = subdomain; 1351 list_add_tail(&link->slave_node, &subdomain->slave_links); 1352 if (subdomain->status != GPD_STATE_POWER_OFF) 1353 genpd_sd_counter_inc(genpd); 1354 1355 out: 1356 mutex_unlock(&genpd->lock); 1357 mutex_unlock(&subdomain->lock); 1358 if (ret) 1359 kfree(link); 1360 return ret; 1361 } 1362 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1363 1364 /** 1365 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1366 * @genpd: Master PM domain to remove the subdomain from. 1367 * @subdomain: Subdomain to be removed. 1368 */ 1369 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1370 struct generic_pm_domain *subdomain) 1371 { 1372 struct gpd_link *link; 1373 int ret = -EINVAL; 1374 1375 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1376 return -EINVAL; 1377 1378 mutex_lock(&subdomain->lock); 1379 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 1380 1381 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { 1382 pr_warn("%s: unable to remove subdomain %s\n", genpd->name, 1383 subdomain->name); 1384 ret = -EBUSY; 1385 goto out; 1386 } 1387 1388 list_for_each_entry(link, &genpd->master_links, master_node) { 1389 if (link->slave != subdomain) 1390 continue; 1391 1392 list_del(&link->master_node); 1393 list_del(&link->slave_node); 1394 kfree(link); 1395 if (subdomain->status != GPD_STATE_POWER_OFF) 1396 genpd_sd_counter_dec(genpd); 1397 1398 ret = 0; 1399 break; 1400 } 1401 1402 out: 1403 mutex_unlock(&genpd->lock); 1404 mutex_unlock(&subdomain->lock); 1405 1406 return ret; 1407 } 1408 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1409 1410 /* Default device callbacks for generic PM domains. */ 1411 1412 /** 1413 * pm_genpd_default_save_state - Default "save device state" for PM domains. 1414 * @dev: Device to handle. 1415 */ 1416 static int pm_genpd_default_save_state(struct device *dev) 1417 { 1418 int (*cb)(struct device *__dev); 1419 1420 if (dev->type && dev->type->pm) 1421 cb = dev->type->pm->runtime_suspend; 1422 else if (dev->class && dev->class->pm) 1423 cb = dev->class->pm->runtime_suspend; 1424 else if (dev->bus && dev->bus->pm) 1425 cb = dev->bus->pm->runtime_suspend; 1426 else 1427 cb = NULL; 1428 1429 if (!cb && dev->driver && dev->driver->pm) 1430 cb = dev->driver->pm->runtime_suspend; 1431 1432 return cb ? cb(dev) : 0; 1433 } 1434 1435 /** 1436 * pm_genpd_default_restore_state - Default PM domains "restore device state". 1437 * @dev: Device to handle. 1438 */ 1439 static int pm_genpd_default_restore_state(struct device *dev) 1440 { 1441 int (*cb)(struct device *__dev); 1442 1443 if (dev->type && dev->type->pm) 1444 cb = dev->type->pm->runtime_resume; 1445 else if (dev->class && dev->class->pm) 1446 cb = dev->class->pm->runtime_resume; 1447 else if (dev->bus && dev->bus->pm) 1448 cb = dev->bus->pm->runtime_resume; 1449 else 1450 cb = NULL; 1451 1452 if (!cb && dev->driver && dev->driver->pm) 1453 cb = dev->driver->pm->runtime_resume; 1454 1455 return cb ? cb(dev) : 0; 1456 } 1457 1458 /** 1459 * pm_genpd_init - Initialize a generic I/O PM domain object. 1460 * @genpd: PM domain object to initialize. 1461 * @gov: PM domain governor to associate with the domain (may be NULL). 1462 * @is_off: Initial value of the domain's power_is_off field. 1463 */ 1464 void pm_genpd_init(struct generic_pm_domain *genpd, 1465 struct dev_power_governor *gov, bool is_off) 1466 { 1467 if (IS_ERR_OR_NULL(genpd)) 1468 return; 1469 1470 INIT_LIST_HEAD(&genpd->master_links); 1471 INIT_LIST_HEAD(&genpd->slave_links); 1472 INIT_LIST_HEAD(&genpd->dev_list); 1473 mutex_init(&genpd->lock); 1474 genpd->gov = gov; 1475 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1476 atomic_set(&genpd->sd_count, 0); 1477 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1478 genpd->device_count = 0; 1479 genpd->max_off_time_ns = -1; 1480 genpd->max_off_time_changed = true; 1481 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1482 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1483 genpd->domain.ops.prepare = pm_genpd_prepare; 1484 genpd->domain.ops.suspend = pm_genpd_suspend; 1485 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1486 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1487 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1488 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1489 genpd->domain.ops.resume = pm_genpd_resume; 1490 genpd->domain.ops.freeze = pm_genpd_freeze; 1491 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1492 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1493 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1494 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1495 genpd->domain.ops.thaw = pm_genpd_thaw; 1496 genpd->domain.ops.poweroff = pm_genpd_suspend; 1497 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1498 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1499 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1500 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1501 genpd->domain.ops.restore = pm_genpd_resume; 1502 genpd->domain.ops.complete = pm_genpd_complete; 1503 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1504 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1505 1506 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1507 genpd->dev_ops.stop = pm_clk_suspend; 1508 genpd->dev_ops.start = pm_clk_resume; 1509 } 1510 1511 mutex_lock(&gpd_list_lock); 1512 list_add(&genpd->gpd_list_node, &gpd_list); 1513 mutex_unlock(&gpd_list_lock); 1514 } 1515 EXPORT_SYMBOL_GPL(pm_genpd_init); 1516 1517 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1518 /* 1519 * Device Tree based PM domain providers. 1520 * 1521 * The code below implements generic device tree based PM domain providers that 1522 * bind device tree nodes with generic PM domains registered in the system. 1523 * 1524 * Any driver that registers generic PM domains and needs to support binding of 1525 * devices to these domains is supposed to register a PM domain provider, which 1526 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1527 * 1528 * Two simple mapping functions have been provided for convenience: 1529 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1530 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1531 * index. 1532 */ 1533 1534 /** 1535 * struct of_genpd_provider - PM domain provider registration structure 1536 * @link: Entry in global list of PM domain providers 1537 * @node: Pointer to device tree node of PM domain provider 1538 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1539 * into a PM domain. 1540 * @data: context pointer to be passed into @xlate callback 1541 */ 1542 struct of_genpd_provider { 1543 struct list_head link; 1544 struct device_node *node; 1545 genpd_xlate_t xlate; 1546 void *data; 1547 }; 1548 1549 /* List of registered PM domain providers. */ 1550 static LIST_HEAD(of_genpd_providers); 1551 /* Mutex to protect the list above. */ 1552 static DEFINE_MUTEX(of_genpd_mutex); 1553 1554 /** 1555 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1556 * @genpdspec: OF phandle args to map into a PM domain 1557 * @data: xlate function private data - pointer to struct generic_pm_domain 1558 * 1559 * This is a generic xlate function that can be used to model PM domains that 1560 * have their own device tree nodes. The private data of xlate function needs 1561 * to be a valid pointer to struct generic_pm_domain. 1562 */ 1563 struct generic_pm_domain *__of_genpd_xlate_simple( 1564 struct of_phandle_args *genpdspec, 1565 void *data) 1566 { 1567 if (genpdspec->args_count != 0) 1568 return ERR_PTR(-EINVAL); 1569 return data; 1570 } 1571 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 1572 1573 /** 1574 * __of_genpd_xlate_onecell() - Xlate function using a single index. 1575 * @genpdspec: OF phandle args to map into a PM domain 1576 * @data: xlate function private data - pointer to struct genpd_onecell_data 1577 * 1578 * This is a generic xlate function that can be used to model simple PM domain 1579 * controllers that have one device tree node and provide multiple PM domains. 1580 * A single cell is used as an index into an array of PM domains specified in 1581 * the genpd_onecell_data struct when registering the provider. 1582 */ 1583 struct generic_pm_domain *__of_genpd_xlate_onecell( 1584 struct of_phandle_args *genpdspec, 1585 void *data) 1586 { 1587 struct genpd_onecell_data *genpd_data = data; 1588 unsigned int idx = genpdspec->args[0]; 1589 1590 if (genpdspec->args_count != 1) 1591 return ERR_PTR(-EINVAL); 1592 1593 if (idx >= genpd_data->num_domains) { 1594 pr_err("%s: invalid domain index %u\n", __func__, idx); 1595 return ERR_PTR(-EINVAL); 1596 } 1597 1598 if (!genpd_data->domains[idx]) 1599 return ERR_PTR(-ENOENT); 1600 1601 return genpd_data->domains[idx]; 1602 } 1603 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 1604 1605 /** 1606 * __of_genpd_add_provider() - Register a PM domain provider for a node 1607 * @np: Device node pointer associated with the PM domain provider. 1608 * @xlate: Callback for decoding PM domain from phandle arguments. 1609 * @data: Context pointer for @xlate callback. 1610 */ 1611 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 1612 void *data) 1613 { 1614 struct of_genpd_provider *cp; 1615 1616 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 1617 if (!cp) 1618 return -ENOMEM; 1619 1620 cp->node = of_node_get(np); 1621 cp->data = data; 1622 cp->xlate = xlate; 1623 1624 mutex_lock(&of_genpd_mutex); 1625 list_add(&cp->link, &of_genpd_providers); 1626 mutex_unlock(&of_genpd_mutex); 1627 pr_debug("Added domain provider from %s\n", np->full_name); 1628 1629 return 0; 1630 } 1631 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 1632 1633 /** 1634 * of_genpd_del_provider() - Remove a previously registered PM domain provider 1635 * @np: Device node pointer associated with the PM domain provider 1636 */ 1637 void of_genpd_del_provider(struct device_node *np) 1638 { 1639 struct of_genpd_provider *cp; 1640 1641 mutex_lock(&of_genpd_mutex); 1642 list_for_each_entry(cp, &of_genpd_providers, link) { 1643 if (cp->node == np) { 1644 list_del(&cp->link); 1645 of_node_put(cp->node); 1646 kfree(cp); 1647 break; 1648 } 1649 } 1650 mutex_unlock(&of_genpd_mutex); 1651 } 1652 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 1653 1654 /** 1655 * of_genpd_get_from_provider() - Look-up PM domain 1656 * @genpdspec: OF phandle args to use for look-up 1657 * 1658 * Looks for a PM domain provider under the node specified by @genpdspec and if 1659 * found, uses xlate function of the provider to map phandle args to a PM 1660 * domain. 1661 * 1662 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 1663 * on failure. 1664 */ 1665 struct generic_pm_domain *of_genpd_get_from_provider( 1666 struct of_phandle_args *genpdspec) 1667 { 1668 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 1669 struct of_genpd_provider *provider; 1670 1671 mutex_lock(&of_genpd_mutex); 1672 1673 /* Check if we have such a provider in our array */ 1674 list_for_each_entry(provider, &of_genpd_providers, link) { 1675 if (provider->node == genpdspec->np) 1676 genpd = provider->xlate(genpdspec, provider->data); 1677 if (!IS_ERR(genpd)) 1678 break; 1679 } 1680 1681 mutex_unlock(&of_genpd_mutex); 1682 1683 return genpd; 1684 } 1685 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); 1686 1687 /** 1688 * genpd_dev_pm_detach - Detach a device from its PM domain. 1689 * @dev: Device to detach. 1690 * @power_off: Currently not used 1691 * 1692 * Try to locate a corresponding generic PM domain, which the device was 1693 * attached to previously. If such is found, the device is detached from it. 1694 */ 1695 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 1696 { 1697 struct generic_pm_domain *pd; 1698 unsigned int i; 1699 int ret = 0; 1700 1701 pd = pm_genpd_lookup_dev(dev); 1702 if (!pd) 1703 return; 1704 1705 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 1706 1707 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1708 ret = pm_genpd_remove_device(pd, dev); 1709 if (ret != -EAGAIN) 1710 break; 1711 1712 mdelay(i); 1713 cond_resched(); 1714 } 1715 1716 if (ret < 0) { 1717 dev_err(dev, "failed to remove from PM domain %s: %d", 1718 pd->name, ret); 1719 return; 1720 } 1721 1722 /* Check if PM domain can be powered off after removing this device. */ 1723 genpd_queue_power_off_work(pd); 1724 } 1725 1726 static void genpd_dev_pm_sync(struct device *dev) 1727 { 1728 struct generic_pm_domain *pd; 1729 1730 pd = dev_to_genpd(dev); 1731 if (IS_ERR(pd)) 1732 return; 1733 1734 genpd_queue_power_off_work(pd); 1735 } 1736 1737 /** 1738 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 1739 * @dev: Device to attach. 1740 * 1741 * Parse device's OF node to find a PM domain specifier. If such is found, 1742 * attaches the device to retrieved pm_domain ops. 1743 * 1744 * Both generic and legacy Samsung-specific DT bindings are supported to keep 1745 * backwards compatibility with existing DTBs. 1746 * 1747 * Returns 0 on successfully attached PM domain or negative error code. Note 1748 * that if a power-domain exists for the device, but it cannot be found or 1749 * turned on, then return -EPROBE_DEFER to ensure that the device is not 1750 * probed and to re-try again later. 1751 */ 1752 int genpd_dev_pm_attach(struct device *dev) 1753 { 1754 struct of_phandle_args pd_args; 1755 struct generic_pm_domain *pd; 1756 unsigned int i; 1757 int ret; 1758 1759 if (!dev->of_node) 1760 return -ENODEV; 1761 1762 if (dev->pm_domain) 1763 return -EEXIST; 1764 1765 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 1766 "#power-domain-cells", 0, &pd_args); 1767 if (ret < 0) { 1768 if (ret != -ENOENT) 1769 return ret; 1770 1771 /* 1772 * Try legacy Samsung-specific bindings 1773 * (for backwards compatibility of DT ABI) 1774 */ 1775 pd_args.args_count = 0; 1776 pd_args.np = of_parse_phandle(dev->of_node, 1777 "samsung,power-domain", 0); 1778 if (!pd_args.np) 1779 return -ENOENT; 1780 } 1781 1782 pd = of_genpd_get_from_provider(&pd_args); 1783 of_node_put(pd_args.np); 1784 if (IS_ERR(pd)) { 1785 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1786 __func__, PTR_ERR(pd)); 1787 return -EPROBE_DEFER; 1788 } 1789 1790 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 1791 1792 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1793 ret = pm_genpd_add_device(pd, dev); 1794 if (ret != -EAGAIN) 1795 break; 1796 1797 mdelay(i); 1798 cond_resched(); 1799 } 1800 1801 if (ret < 0) { 1802 dev_err(dev, "failed to add to PM domain %s: %d", 1803 pd->name, ret); 1804 goto out; 1805 } 1806 1807 dev->pm_domain->detach = genpd_dev_pm_detach; 1808 dev->pm_domain->sync = genpd_dev_pm_sync; 1809 1810 mutex_lock(&pd->lock); 1811 ret = genpd_poweron(pd, 0); 1812 mutex_unlock(&pd->lock); 1813 out: 1814 return ret ? -EPROBE_DEFER : 0; 1815 } 1816 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 1817 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 1818 1819 1820 /*** debugfs support ***/ 1821 1822 #ifdef CONFIG_PM_ADVANCED_DEBUG 1823 #include <linux/pm.h> 1824 #include <linux/device.h> 1825 #include <linux/debugfs.h> 1826 #include <linux/seq_file.h> 1827 #include <linux/init.h> 1828 #include <linux/kobject.h> 1829 static struct dentry *pm_genpd_debugfs_dir; 1830 1831 /* 1832 * TODO: This function is a slightly modified version of rtpm_status_show 1833 * from sysfs.c, so generalize it. 1834 */ 1835 static void rtpm_status_str(struct seq_file *s, struct device *dev) 1836 { 1837 static const char * const status_lookup[] = { 1838 [RPM_ACTIVE] = "active", 1839 [RPM_RESUMING] = "resuming", 1840 [RPM_SUSPENDED] = "suspended", 1841 [RPM_SUSPENDING] = "suspending" 1842 }; 1843 const char *p = ""; 1844 1845 if (dev->power.runtime_error) 1846 p = "error"; 1847 else if (dev->power.disable_depth) 1848 p = "unsupported"; 1849 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 1850 p = status_lookup[dev->power.runtime_status]; 1851 else 1852 WARN_ON(1); 1853 1854 seq_puts(s, p); 1855 } 1856 1857 static int pm_genpd_summary_one(struct seq_file *s, 1858 struct generic_pm_domain *genpd) 1859 { 1860 static const char * const status_lookup[] = { 1861 [GPD_STATE_ACTIVE] = "on", 1862 [GPD_STATE_POWER_OFF] = "off" 1863 }; 1864 struct pm_domain_data *pm_data; 1865 const char *kobj_path; 1866 struct gpd_link *link; 1867 int ret; 1868 1869 ret = mutex_lock_interruptible(&genpd->lock); 1870 if (ret) 1871 return -ERESTARTSYS; 1872 1873 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 1874 goto exit; 1875 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); 1876 1877 /* 1878 * Modifications on the list require holding locks on both 1879 * master and slave, so we are safe. 1880 * Also genpd->name is immutable. 1881 */ 1882 list_for_each_entry(link, &genpd->master_links, master_node) { 1883 seq_printf(s, "%s", link->slave->name); 1884 if (!list_is_last(&link->master_node, &genpd->master_links)) 1885 seq_puts(s, ", "); 1886 } 1887 1888 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 1889 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 1890 if (kobj_path == NULL) 1891 continue; 1892 1893 seq_printf(s, "\n %-50s ", kobj_path); 1894 rtpm_status_str(s, pm_data->dev); 1895 kfree(kobj_path); 1896 } 1897 1898 seq_puts(s, "\n"); 1899 exit: 1900 mutex_unlock(&genpd->lock); 1901 1902 return 0; 1903 } 1904 1905 static int pm_genpd_summary_show(struct seq_file *s, void *data) 1906 { 1907 struct generic_pm_domain *genpd; 1908 int ret = 0; 1909 1910 seq_puts(s, "domain status slaves\n"); 1911 seq_puts(s, " /device runtime status\n"); 1912 seq_puts(s, "----------------------------------------------------------------------\n"); 1913 1914 ret = mutex_lock_interruptible(&gpd_list_lock); 1915 if (ret) 1916 return -ERESTARTSYS; 1917 1918 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 1919 ret = pm_genpd_summary_one(s, genpd); 1920 if (ret) 1921 break; 1922 } 1923 mutex_unlock(&gpd_list_lock); 1924 1925 return ret; 1926 } 1927 1928 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 1929 { 1930 return single_open(file, pm_genpd_summary_show, NULL); 1931 } 1932 1933 static const struct file_operations pm_genpd_summary_fops = { 1934 .open = pm_genpd_summary_open, 1935 .read = seq_read, 1936 .llseek = seq_lseek, 1937 .release = single_release, 1938 }; 1939 1940 static int __init pm_genpd_debug_init(void) 1941 { 1942 struct dentry *d; 1943 1944 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 1945 1946 if (!pm_genpd_debugfs_dir) 1947 return -ENOMEM; 1948 1949 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 1950 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 1951 if (!d) 1952 return -ENOMEM; 1953 1954 return 0; 1955 } 1956 late_initcall(pm_genpd_debug_init); 1957 1958 static void __exit pm_genpd_debug_exit(void) 1959 { 1960 debugfs_remove_recursive(pm_genpd_debugfs_dir); 1961 } 1962 __exitcall(pm_genpd_debug_exit); 1963 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 1964