1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/pm_domain.h> 15 #include <linux/pm_qos.h> 16 #include <linux/pm_clock.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 #include <linux/sched.h> 20 #include <linux/suspend.h> 21 #include <linux/export.h> 22 23 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 24 25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 26 ({ \ 27 type (*__routine)(struct device *__d); \ 28 type __ret = (type)0; \ 29 \ 30 __routine = genpd->dev_ops.callback; \ 31 if (__routine) { \ 32 __ret = __routine(dev); \ 33 } \ 34 __ret; \ 35 }) 36 37 static LIST_HEAD(gpd_list); 38 static DEFINE_MUTEX(gpd_list_lock); 39 40 /* 41 * Get the generic PM domain for a particular struct device. 42 * This validates the struct device pointer, the PM domain pointer, 43 * and checks that the PM domain pointer is a real generic PM domain. 44 * Any failure results in NULL being returned. 45 */ 46 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) 47 { 48 struct generic_pm_domain *genpd = NULL, *gpd; 49 50 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 51 return NULL; 52 53 mutex_lock(&gpd_list_lock); 54 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 55 if (&gpd->domain == dev->pm_domain) { 56 genpd = gpd; 57 break; 58 } 59 } 60 mutex_unlock(&gpd_list_lock); 61 62 return genpd; 63 } 64 65 /* 66 * This should only be used where we are certain that the pm_domain 67 * attached to the device is a genpd domain. 68 */ 69 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 70 { 71 if (IS_ERR_OR_NULL(dev->pm_domain)) 72 return ERR_PTR(-EINVAL); 73 74 return pd_to_genpd(dev->pm_domain); 75 } 76 77 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 78 { 79 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 80 } 81 82 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 83 { 84 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 85 } 86 87 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 88 { 89 bool ret = false; 90 91 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 92 ret = !!atomic_dec_and_test(&genpd->sd_count); 93 94 return ret; 95 } 96 97 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 98 { 99 atomic_inc(&genpd->sd_count); 100 smp_mb__after_atomic(); 101 } 102 103 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) 104 { 105 ktime_t time_start; 106 s64 elapsed_ns; 107 int ret; 108 109 if (!genpd->power_on) 110 return 0; 111 112 if (!timed) 113 return genpd->power_on(genpd); 114 115 time_start = ktime_get(); 116 ret = genpd->power_on(genpd); 117 if (ret) 118 return ret; 119 120 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 121 if (elapsed_ns <= genpd->power_on_latency_ns) 122 return ret; 123 124 genpd->power_on_latency_ns = elapsed_ns; 125 genpd->max_off_time_changed = true; 126 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 127 genpd->name, "on", elapsed_ns); 128 129 return ret; 130 } 131 132 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) 133 { 134 ktime_t time_start; 135 s64 elapsed_ns; 136 int ret; 137 138 if (!genpd->power_off) 139 return 0; 140 141 if (!timed) 142 return genpd->power_off(genpd); 143 144 time_start = ktime_get(); 145 ret = genpd->power_off(genpd); 146 if (ret == -EBUSY) 147 return ret; 148 149 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 150 if (elapsed_ns <= genpd->power_off_latency_ns) 151 return ret; 152 153 genpd->power_off_latency_ns = elapsed_ns; 154 genpd->max_off_time_changed = true; 155 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 156 genpd->name, "off", elapsed_ns); 157 158 return ret; 159 } 160 161 /** 162 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). 163 * @genpd: PM domait to power off. 164 * 165 * Queue up the execution of genpd_poweroff() unless it's already been done 166 * before. 167 */ 168 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 169 { 170 queue_work(pm_wq, &genpd->power_off_work); 171 } 172 173 static int genpd_poweron(struct generic_pm_domain *genpd); 174 175 /** 176 * __genpd_poweron - Restore power to a given PM domain and its masters. 177 * @genpd: PM domain to power up. 178 * 179 * Restore power to @genpd and all of its masters so that it is possible to 180 * resume a device belonging to it. 181 */ 182 static int __genpd_poweron(struct generic_pm_domain *genpd) 183 { 184 struct gpd_link *link; 185 int ret = 0; 186 187 if (genpd->status == GPD_STATE_ACTIVE 188 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 189 return 0; 190 191 /* 192 * The list is guaranteed not to change while the loop below is being 193 * executed, unless one of the masters' .power_on() callbacks fiddles 194 * with it. 195 */ 196 list_for_each_entry(link, &genpd->slave_links, slave_node) { 197 genpd_sd_counter_inc(link->master); 198 199 ret = genpd_poweron(link->master); 200 if (ret) { 201 genpd_sd_counter_dec(link->master); 202 goto err; 203 } 204 } 205 206 ret = genpd_power_on(genpd, true); 207 if (ret) 208 goto err; 209 210 genpd->status = GPD_STATE_ACTIVE; 211 return 0; 212 213 err: 214 list_for_each_entry_continue_reverse(link, 215 &genpd->slave_links, 216 slave_node) { 217 genpd_sd_counter_dec(link->master); 218 genpd_queue_power_off_work(link->master); 219 } 220 221 return ret; 222 } 223 224 /** 225 * genpd_poweron - Restore power to a given PM domain and its masters. 226 * @genpd: PM domain to power up. 227 */ 228 static int genpd_poweron(struct generic_pm_domain *genpd) 229 { 230 int ret; 231 232 mutex_lock(&genpd->lock); 233 ret = __genpd_poweron(genpd); 234 mutex_unlock(&genpd->lock); 235 return ret; 236 } 237 238 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 239 { 240 return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); 241 } 242 243 static int genpd_restore_dev(struct generic_pm_domain *genpd, 244 struct device *dev) 245 { 246 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); 247 } 248 249 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 250 unsigned long val, void *ptr) 251 { 252 struct generic_pm_domain_data *gpd_data; 253 struct device *dev; 254 255 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 256 dev = gpd_data->base.dev; 257 258 for (;;) { 259 struct generic_pm_domain *genpd; 260 struct pm_domain_data *pdd; 261 262 spin_lock_irq(&dev->power.lock); 263 264 pdd = dev->power.subsys_data ? 265 dev->power.subsys_data->domain_data : NULL; 266 if (pdd && pdd->dev) { 267 to_gpd_data(pdd)->td.constraint_changed = true; 268 genpd = dev_to_genpd(dev); 269 } else { 270 genpd = ERR_PTR(-ENODATA); 271 } 272 273 spin_unlock_irq(&dev->power.lock); 274 275 if (!IS_ERR(genpd)) { 276 mutex_lock(&genpd->lock); 277 genpd->max_off_time_changed = true; 278 mutex_unlock(&genpd->lock); 279 } 280 281 dev = dev->parent; 282 if (!dev || dev->power.ignore_children) 283 break; 284 } 285 286 return NOTIFY_DONE; 287 } 288 289 /** 290 * genpd_poweroff - Remove power from a given PM domain. 291 * @genpd: PM domain to power down. 292 * @is_async: PM domain is powered down from a scheduled work 293 * 294 * If all of the @genpd's devices have been suspended and all of its subdomains 295 * have been powered down, remove power from @genpd. 296 */ 297 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) 298 { 299 struct pm_domain_data *pdd; 300 struct gpd_link *link; 301 unsigned int not_suspended = 0; 302 303 /* 304 * Do not try to power off the domain in the following situations: 305 * (1) The domain is already in the "power off" state. 306 * (2) System suspend is in progress. 307 */ 308 if (genpd->status == GPD_STATE_POWER_OFF 309 || genpd->prepared_count > 0) 310 return 0; 311 312 if (atomic_read(&genpd->sd_count) > 0) 313 return -EBUSY; 314 315 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 316 enum pm_qos_flags_status stat; 317 318 stat = dev_pm_qos_flags(pdd->dev, 319 PM_QOS_FLAG_NO_POWER_OFF 320 | PM_QOS_FLAG_REMOTE_WAKEUP); 321 if (stat > PM_QOS_FLAGS_NONE) 322 return -EBUSY; 323 324 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) 325 not_suspended++; 326 } 327 328 if (not_suspended > 1 || (not_suspended == 1 && is_async)) 329 return -EBUSY; 330 331 if (genpd->gov && genpd->gov->power_down_ok) { 332 if (!genpd->gov->power_down_ok(&genpd->domain)) 333 return -EAGAIN; 334 } 335 336 if (genpd->power_off) { 337 int ret; 338 339 if (atomic_read(&genpd->sd_count) > 0) 340 return -EBUSY; 341 342 /* 343 * If sd_count > 0 at this point, one of the subdomains hasn't 344 * managed to call genpd_poweron() for the master yet after 345 * incrementing it. In that case genpd_poweron() will wait 346 * for us to drop the lock, so we can call .power_off() and let 347 * the genpd_poweron() restore power for us (this shouldn't 348 * happen very often). 349 */ 350 ret = genpd_power_off(genpd, true); 351 if (ret) 352 return ret; 353 } 354 355 genpd->status = GPD_STATE_POWER_OFF; 356 357 list_for_each_entry(link, &genpd->slave_links, slave_node) { 358 genpd_sd_counter_dec(link->master); 359 genpd_queue_power_off_work(link->master); 360 } 361 362 return 0; 363 } 364 365 /** 366 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 367 * @work: Work structure used for scheduling the execution of this function. 368 */ 369 static void genpd_power_off_work_fn(struct work_struct *work) 370 { 371 struct generic_pm_domain *genpd; 372 373 genpd = container_of(work, struct generic_pm_domain, power_off_work); 374 375 mutex_lock(&genpd->lock); 376 genpd_poweroff(genpd, true); 377 mutex_unlock(&genpd->lock); 378 } 379 380 /** 381 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 382 * @dev: Device to suspend. 383 * 384 * Carry out a runtime suspend of a device under the assumption that its 385 * pm_domain field points to the domain member of an object of type 386 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 387 */ 388 static int pm_genpd_runtime_suspend(struct device *dev) 389 { 390 struct generic_pm_domain *genpd; 391 bool (*stop_ok)(struct device *__dev); 392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 393 bool runtime_pm = pm_runtime_enabled(dev); 394 ktime_t time_start; 395 s64 elapsed_ns; 396 int ret; 397 398 dev_dbg(dev, "%s()\n", __func__); 399 400 genpd = dev_to_genpd(dev); 401 if (IS_ERR(genpd)) 402 return -EINVAL; 403 404 /* 405 * A runtime PM centric subsystem/driver may re-use the runtime PM 406 * callbacks for other purposes than runtime PM. In those scenarios 407 * runtime PM is disabled. Under these circumstances, we shall skip 408 * validating/measuring the PM QoS latency. 409 */ 410 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 411 if (runtime_pm && stop_ok && !stop_ok(dev)) 412 return -EBUSY; 413 414 /* Measure suspend latency. */ 415 if (runtime_pm) 416 time_start = ktime_get(); 417 418 ret = genpd_save_dev(genpd, dev); 419 if (ret) 420 return ret; 421 422 ret = genpd_stop_dev(genpd, dev); 423 if (ret) { 424 genpd_restore_dev(genpd, dev); 425 return ret; 426 } 427 428 /* Update suspend latency value if the measured time exceeds it. */ 429 if (runtime_pm) { 430 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 431 if (elapsed_ns > td->suspend_latency_ns) { 432 td->suspend_latency_ns = elapsed_ns; 433 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 434 elapsed_ns); 435 genpd->max_off_time_changed = true; 436 td->constraint_changed = true; 437 } 438 } 439 440 /* 441 * If power.irq_safe is set, this routine will be run with interrupts 442 * off, so it can't use mutexes. 443 */ 444 if (dev->power.irq_safe) 445 return 0; 446 447 mutex_lock(&genpd->lock); 448 genpd_poweroff(genpd, false); 449 mutex_unlock(&genpd->lock); 450 451 return 0; 452 } 453 454 /** 455 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 456 * @dev: Device to resume. 457 * 458 * Carry out a runtime resume of a device under the assumption that its 459 * pm_domain field points to the domain member of an object of type 460 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 461 */ 462 static int pm_genpd_runtime_resume(struct device *dev) 463 { 464 struct generic_pm_domain *genpd; 465 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 466 bool runtime_pm = pm_runtime_enabled(dev); 467 ktime_t time_start; 468 s64 elapsed_ns; 469 int ret; 470 bool timed = true; 471 472 dev_dbg(dev, "%s()\n", __func__); 473 474 genpd = dev_to_genpd(dev); 475 if (IS_ERR(genpd)) 476 return -EINVAL; 477 478 /* If power.irq_safe, the PM domain is never powered off. */ 479 if (dev->power.irq_safe) { 480 timed = false; 481 goto out; 482 } 483 484 mutex_lock(&genpd->lock); 485 ret = __genpd_poweron(genpd); 486 mutex_unlock(&genpd->lock); 487 488 if (ret) 489 return ret; 490 491 out: 492 /* Measure resume latency. */ 493 if (timed && runtime_pm) 494 time_start = ktime_get(); 495 496 genpd_start_dev(genpd, dev); 497 genpd_restore_dev(genpd, dev); 498 499 /* Update resume latency value if the measured time exceeds it. */ 500 if (timed && runtime_pm) { 501 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 502 if (elapsed_ns > td->resume_latency_ns) { 503 td->resume_latency_ns = elapsed_ns; 504 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 505 elapsed_ns); 506 genpd->max_off_time_changed = true; 507 td->constraint_changed = true; 508 } 509 } 510 511 return 0; 512 } 513 514 static bool pd_ignore_unused; 515 static int __init pd_ignore_unused_setup(char *__unused) 516 { 517 pd_ignore_unused = true; 518 return 1; 519 } 520 __setup("pd_ignore_unused", pd_ignore_unused_setup); 521 522 /** 523 * genpd_poweroff_unused - Power off all PM domains with no devices in use. 524 */ 525 static int __init genpd_poweroff_unused(void) 526 { 527 struct generic_pm_domain *genpd; 528 529 if (pd_ignore_unused) { 530 pr_warn("genpd: Not disabling unused power domains\n"); 531 return 0; 532 } 533 534 mutex_lock(&gpd_list_lock); 535 536 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 537 genpd_queue_power_off_work(genpd); 538 539 mutex_unlock(&gpd_list_lock); 540 541 return 0; 542 } 543 late_initcall(genpd_poweroff_unused); 544 545 #ifdef CONFIG_PM_SLEEP 546 547 /** 548 * pm_genpd_present - Check if the given PM domain has been initialized. 549 * @genpd: PM domain to check. 550 */ 551 static bool pm_genpd_present(const struct generic_pm_domain *genpd) 552 { 553 const struct generic_pm_domain *gpd; 554 555 if (IS_ERR_OR_NULL(genpd)) 556 return false; 557 558 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 559 if (gpd == genpd) 560 return true; 561 562 return false; 563 } 564 565 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 566 struct device *dev) 567 { 568 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 569 } 570 571 /** 572 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 573 * @genpd: PM domain to power off, if possible. 574 * @timed: True if latency measurements are allowed. 575 * 576 * Check if the given PM domain can be powered off (during system suspend or 577 * hibernation) and do that if so. Also, in that case propagate to its masters. 578 * 579 * This function is only called in "noirq" and "syscore" stages of system power 580 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 581 * executed sequentially, so it is guaranteed that it will never run twice in 582 * parallel). 583 */ 584 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, 585 bool timed) 586 { 587 struct gpd_link *link; 588 589 if (genpd->status == GPD_STATE_POWER_OFF) 590 return; 591 592 if (genpd->suspended_count != genpd->device_count 593 || atomic_read(&genpd->sd_count) > 0) 594 return; 595 596 genpd_power_off(genpd, timed); 597 598 genpd->status = GPD_STATE_POWER_OFF; 599 600 list_for_each_entry(link, &genpd->slave_links, slave_node) { 601 genpd_sd_counter_dec(link->master); 602 pm_genpd_sync_poweroff(link->master, timed); 603 } 604 } 605 606 /** 607 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 608 * @genpd: PM domain to power on. 609 * @timed: True if latency measurements are allowed. 610 * 611 * This function is only called in "noirq" and "syscore" stages of system power 612 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 613 * executed sequentially, so it is guaranteed that it will never run twice in 614 * parallel). 615 */ 616 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, 617 bool timed) 618 { 619 struct gpd_link *link; 620 621 if (genpd->status == GPD_STATE_ACTIVE) 622 return; 623 624 list_for_each_entry(link, &genpd->slave_links, slave_node) { 625 pm_genpd_sync_poweron(link->master, timed); 626 genpd_sd_counter_inc(link->master); 627 } 628 629 genpd_power_on(genpd, timed); 630 631 genpd->status = GPD_STATE_ACTIVE; 632 } 633 634 /** 635 * resume_needed - Check whether to resume a device before system suspend. 636 * @dev: Device to check. 637 * @genpd: PM domain the device belongs to. 638 * 639 * There are two cases in which a device that can wake up the system from sleep 640 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 641 * to wake up the system and it has to remain active for this purpose while the 642 * system is in the sleep state and (2) if the device is not enabled to wake up 643 * the system from sleep states and it generally doesn't generate wakeup signals 644 * by itself (those signals are generated on its behalf by other parts of the 645 * system). In the latter case it may be necessary to reconfigure the device's 646 * wakeup settings during system suspend, because it may have been set up to 647 * signal remote wakeup from the system's working state as needed by runtime PM. 648 * Return 'true' in either of the above cases. 649 */ 650 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 651 { 652 bool active_wakeup; 653 654 if (!device_can_wakeup(dev)) 655 return false; 656 657 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 658 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 659 } 660 661 /** 662 * pm_genpd_prepare - Start power transition of a device in a PM domain. 663 * @dev: Device to start the transition of. 664 * 665 * Start a power transition of a device (during a system-wide power transition) 666 * under the assumption that its pm_domain field points to the domain member of 667 * an object of type struct generic_pm_domain representing a PM domain 668 * consisting of I/O devices. 669 */ 670 static int pm_genpd_prepare(struct device *dev) 671 { 672 struct generic_pm_domain *genpd; 673 int ret; 674 675 dev_dbg(dev, "%s()\n", __func__); 676 677 genpd = dev_to_genpd(dev); 678 if (IS_ERR(genpd)) 679 return -EINVAL; 680 681 /* 682 * If a wakeup request is pending for the device, it should be woken up 683 * at this point and a system wakeup event should be reported if it's 684 * set up to wake up the system from sleep states. 685 */ 686 pm_runtime_get_noresume(dev); 687 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 688 pm_wakeup_event(dev, 0); 689 690 if (pm_wakeup_pending()) { 691 pm_runtime_put(dev); 692 return -EBUSY; 693 } 694 695 if (resume_needed(dev, genpd)) 696 pm_runtime_resume(dev); 697 698 mutex_lock(&genpd->lock); 699 700 if (genpd->prepared_count++ == 0) { 701 genpd->suspended_count = 0; 702 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 703 } 704 705 mutex_unlock(&genpd->lock); 706 707 if (genpd->suspend_power_off) { 708 pm_runtime_put_noidle(dev); 709 return 0; 710 } 711 712 /* 713 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 714 * so genpd_poweron() will return immediately, but if the device 715 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 716 * to make it operational. 717 */ 718 pm_runtime_resume(dev); 719 __pm_runtime_disable(dev, false); 720 721 ret = pm_generic_prepare(dev); 722 if (ret) { 723 mutex_lock(&genpd->lock); 724 725 if (--genpd->prepared_count == 0) 726 genpd->suspend_power_off = false; 727 728 mutex_unlock(&genpd->lock); 729 pm_runtime_enable(dev); 730 } 731 732 pm_runtime_put(dev); 733 return ret; 734 } 735 736 /** 737 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 738 * @dev: Device to suspend. 739 * 740 * Suspend a device under the assumption that its pm_domain field points to the 741 * domain member of an object of type struct generic_pm_domain representing 742 * a PM domain consisting of I/O devices. 743 */ 744 static int pm_genpd_suspend(struct device *dev) 745 { 746 struct generic_pm_domain *genpd; 747 748 dev_dbg(dev, "%s()\n", __func__); 749 750 genpd = dev_to_genpd(dev); 751 if (IS_ERR(genpd)) 752 return -EINVAL; 753 754 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 755 } 756 757 /** 758 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 759 * @dev: Device to suspend. 760 * 761 * Carry out a late suspend of a device under the assumption that its 762 * pm_domain field points to the domain member of an object of type 763 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 764 */ 765 static int pm_genpd_suspend_late(struct device *dev) 766 { 767 struct generic_pm_domain *genpd; 768 769 dev_dbg(dev, "%s()\n", __func__); 770 771 genpd = dev_to_genpd(dev); 772 if (IS_ERR(genpd)) 773 return -EINVAL; 774 775 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 776 } 777 778 /** 779 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 780 * @dev: Device to suspend. 781 * 782 * Stop the device and remove power from the domain if all devices in it have 783 * been stopped. 784 */ 785 static int pm_genpd_suspend_noirq(struct device *dev) 786 { 787 struct generic_pm_domain *genpd; 788 789 dev_dbg(dev, "%s()\n", __func__); 790 791 genpd = dev_to_genpd(dev); 792 if (IS_ERR(genpd)) 793 return -EINVAL; 794 795 if (genpd->suspend_power_off 796 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 797 return 0; 798 799 genpd_stop_dev(genpd, dev); 800 801 /* 802 * Since all of the "noirq" callbacks are executed sequentially, it is 803 * guaranteed that this function will never run twice in parallel for 804 * the same PM domain, so it is not necessary to use locking here. 805 */ 806 genpd->suspended_count++; 807 pm_genpd_sync_poweroff(genpd, true); 808 809 return 0; 810 } 811 812 /** 813 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 814 * @dev: Device to resume. 815 * 816 * Restore power to the device's PM domain, if necessary, and start the device. 817 */ 818 static int pm_genpd_resume_noirq(struct device *dev) 819 { 820 struct generic_pm_domain *genpd; 821 822 dev_dbg(dev, "%s()\n", __func__); 823 824 genpd = dev_to_genpd(dev); 825 if (IS_ERR(genpd)) 826 return -EINVAL; 827 828 if (genpd->suspend_power_off 829 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 830 return 0; 831 832 /* 833 * Since all of the "noirq" callbacks are executed sequentially, it is 834 * guaranteed that this function will never run twice in parallel for 835 * the same PM domain, so it is not necessary to use locking here. 836 */ 837 pm_genpd_sync_poweron(genpd, true); 838 genpd->suspended_count--; 839 840 return genpd_start_dev(genpd, dev); 841 } 842 843 /** 844 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 845 * @dev: Device to resume. 846 * 847 * Carry out an early resume of a device under the assumption that its 848 * pm_domain field points to the domain member of an object of type 849 * struct generic_pm_domain representing a power domain consisting of I/O 850 * devices. 851 */ 852 static int pm_genpd_resume_early(struct device *dev) 853 { 854 struct generic_pm_domain *genpd; 855 856 dev_dbg(dev, "%s()\n", __func__); 857 858 genpd = dev_to_genpd(dev); 859 if (IS_ERR(genpd)) 860 return -EINVAL; 861 862 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 863 } 864 865 /** 866 * pm_genpd_resume - Resume of device in an I/O PM domain. 867 * @dev: Device to resume. 868 * 869 * Resume a device under the assumption that its pm_domain field points to the 870 * domain member of an object of type struct generic_pm_domain representing 871 * a power domain consisting of I/O devices. 872 */ 873 static int pm_genpd_resume(struct device *dev) 874 { 875 struct generic_pm_domain *genpd; 876 877 dev_dbg(dev, "%s()\n", __func__); 878 879 genpd = dev_to_genpd(dev); 880 if (IS_ERR(genpd)) 881 return -EINVAL; 882 883 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 884 } 885 886 /** 887 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 888 * @dev: Device to freeze. 889 * 890 * Freeze a device under the assumption that its pm_domain field points to the 891 * domain member of an object of type struct generic_pm_domain representing 892 * a power domain consisting of I/O devices. 893 */ 894 static int pm_genpd_freeze(struct device *dev) 895 { 896 struct generic_pm_domain *genpd; 897 898 dev_dbg(dev, "%s()\n", __func__); 899 900 genpd = dev_to_genpd(dev); 901 if (IS_ERR(genpd)) 902 return -EINVAL; 903 904 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 905 } 906 907 /** 908 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 909 * @dev: Device to freeze. 910 * 911 * Carry out a late freeze of a device under the assumption that its 912 * pm_domain field points to the domain member of an object of type 913 * struct generic_pm_domain representing a power domain consisting of I/O 914 * devices. 915 */ 916 static int pm_genpd_freeze_late(struct device *dev) 917 { 918 struct generic_pm_domain *genpd; 919 920 dev_dbg(dev, "%s()\n", __func__); 921 922 genpd = dev_to_genpd(dev); 923 if (IS_ERR(genpd)) 924 return -EINVAL; 925 926 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 927 } 928 929 /** 930 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 931 * @dev: Device to freeze. 932 * 933 * Carry out a late freeze of a device under the assumption that its 934 * pm_domain field points to the domain member of an object of type 935 * struct generic_pm_domain representing a power domain consisting of I/O 936 * devices. 937 */ 938 static int pm_genpd_freeze_noirq(struct device *dev) 939 { 940 struct generic_pm_domain *genpd; 941 942 dev_dbg(dev, "%s()\n", __func__); 943 944 genpd = dev_to_genpd(dev); 945 if (IS_ERR(genpd)) 946 return -EINVAL; 947 948 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 949 } 950 951 /** 952 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 953 * @dev: Device to thaw. 954 * 955 * Start the device, unless power has been removed from the domain already 956 * before the system transition. 957 */ 958 static int pm_genpd_thaw_noirq(struct device *dev) 959 { 960 struct generic_pm_domain *genpd; 961 962 dev_dbg(dev, "%s()\n", __func__); 963 964 genpd = dev_to_genpd(dev); 965 if (IS_ERR(genpd)) 966 return -EINVAL; 967 968 return genpd->suspend_power_off ? 969 0 : genpd_start_dev(genpd, dev); 970 } 971 972 /** 973 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 974 * @dev: Device to thaw. 975 * 976 * Carry out an early thaw of a device under the assumption that its 977 * pm_domain field points to the domain member of an object of type 978 * struct generic_pm_domain representing a power domain consisting of I/O 979 * devices. 980 */ 981 static int pm_genpd_thaw_early(struct device *dev) 982 { 983 struct generic_pm_domain *genpd; 984 985 dev_dbg(dev, "%s()\n", __func__); 986 987 genpd = dev_to_genpd(dev); 988 if (IS_ERR(genpd)) 989 return -EINVAL; 990 991 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 992 } 993 994 /** 995 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 996 * @dev: Device to thaw. 997 * 998 * Thaw a device under the assumption that its pm_domain field points to the 999 * domain member of an object of type struct generic_pm_domain representing 1000 * a power domain consisting of I/O devices. 1001 */ 1002 static int pm_genpd_thaw(struct device *dev) 1003 { 1004 struct generic_pm_domain *genpd; 1005 1006 dev_dbg(dev, "%s()\n", __func__); 1007 1008 genpd = dev_to_genpd(dev); 1009 if (IS_ERR(genpd)) 1010 return -EINVAL; 1011 1012 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1013 } 1014 1015 /** 1016 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1017 * @dev: Device to resume. 1018 * 1019 * Make sure the domain will be in the same power state as before the 1020 * hibernation the system is resuming from and start the device if necessary. 1021 */ 1022 static int pm_genpd_restore_noirq(struct device *dev) 1023 { 1024 struct generic_pm_domain *genpd; 1025 1026 dev_dbg(dev, "%s()\n", __func__); 1027 1028 genpd = dev_to_genpd(dev); 1029 if (IS_ERR(genpd)) 1030 return -EINVAL; 1031 1032 /* 1033 * Since all of the "noirq" callbacks are executed sequentially, it is 1034 * guaranteed that this function will never run twice in parallel for 1035 * the same PM domain, so it is not necessary to use locking here. 1036 * 1037 * At this point suspended_count == 0 means we are being run for the 1038 * first time for the given domain in the present cycle. 1039 */ 1040 if (genpd->suspended_count++ == 0) { 1041 /* 1042 * The boot kernel might put the domain into arbitrary state, 1043 * so make it appear as powered off to pm_genpd_sync_poweron(), 1044 * so that it tries to power it on in case it was really off. 1045 */ 1046 genpd->status = GPD_STATE_POWER_OFF; 1047 if (genpd->suspend_power_off) { 1048 /* 1049 * If the domain was off before the hibernation, make 1050 * sure it will be off going forward. 1051 */ 1052 genpd_power_off(genpd, true); 1053 1054 return 0; 1055 } 1056 } 1057 1058 if (genpd->suspend_power_off) 1059 return 0; 1060 1061 pm_genpd_sync_poweron(genpd, true); 1062 1063 return genpd_start_dev(genpd, dev); 1064 } 1065 1066 /** 1067 * pm_genpd_complete - Complete power transition of a device in a power domain. 1068 * @dev: Device to complete the transition of. 1069 * 1070 * Complete a power transition of a device (during a system-wide power 1071 * transition) under the assumption that its pm_domain field points to the 1072 * domain member of an object of type struct generic_pm_domain representing 1073 * a power domain consisting of I/O devices. 1074 */ 1075 static void pm_genpd_complete(struct device *dev) 1076 { 1077 struct generic_pm_domain *genpd; 1078 bool run_complete; 1079 1080 dev_dbg(dev, "%s()\n", __func__); 1081 1082 genpd = dev_to_genpd(dev); 1083 if (IS_ERR(genpd)) 1084 return; 1085 1086 mutex_lock(&genpd->lock); 1087 1088 run_complete = !genpd->suspend_power_off; 1089 if (--genpd->prepared_count == 0) 1090 genpd->suspend_power_off = false; 1091 1092 mutex_unlock(&genpd->lock); 1093 1094 if (run_complete) { 1095 pm_generic_complete(dev); 1096 pm_runtime_set_active(dev); 1097 pm_runtime_enable(dev); 1098 pm_request_idle(dev); 1099 } 1100 } 1101 1102 /** 1103 * genpd_syscore_switch - Switch power during system core suspend or resume. 1104 * @dev: Device that normally is marked as "always on" to switch power for. 1105 * 1106 * This routine may only be called during the system core (syscore) suspend or 1107 * resume phase for devices whose "always on" flags are set. 1108 */ 1109 static void genpd_syscore_switch(struct device *dev, bool suspend) 1110 { 1111 struct generic_pm_domain *genpd; 1112 1113 genpd = dev_to_genpd(dev); 1114 if (!pm_genpd_present(genpd)) 1115 return; 1116 1117 if (suspend) { 1118 genpd->suspended_count++; 1119 pm_genpd_sync_poweroff(genpd, false); 1120 } else { 1121 pm_genpd_sync_poweron(genpd, false); 1122 genpd->suspended_count--; 1123 } 1124 } 1125 1126 void pm_genpd_syscore_poweroff(struct device *dev) 1127 { 1128 genpd_syscore_switch(dev, true); 1129 } 1130 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1131 1132 void pm_genpd_syscore_poweron(struct device *dev) 1133 { 1134 genpd_syscore_switch(dev, false); 1135 } 1136 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1137 1138 #else /* !CONFIG_PM_SLEEP */ 1139 1140 #define pm_genpd_prepare NULL 1141 #define pm_genpd_suspend NULL 1142 #define pm_genpd_suspend_late NULL 1143 #define pm_genpd_suspend_noirq NULL 1144 #define pm_genpd_resume_early NULL 1145 #define pm_genpd_resume_noirq NULL 1146 #define pm_genpd_resume NULL 1147 #define pm_genpd_freeze NULL 1148 #define pm_genpd_freeze_late NULL 1149 #define pm_genpd_freeze_noirq NULL 1150 #define pm_genpd_thaw_early NULL 1151 #define pm_genpd_thaw_noirq NULL 1152 #define pm_genpd_thaw NULL 1153 #define pm_genpd_restore_noirq NULL 1154 #define pm_genpd_complete NULL 1155 1156 #endif /* CONFIG_PM_SLEEP */ 1157 1158 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1159 struct generic_pm_domain *genpd, 1160 struct gpd_timing_data *td) 1161 { 1162 struct generic_pm_domain_data *gpd_data; 1163 int ret; 1164 1165 ret = dev_pm_get_subsys_data(dev); 1166 if (ret) 1167 return ERR_PTR(ret); 1168 1169 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1170 if (!gpd_data) { 1171 ret = -ENOMEM; 1172 goto err_put; 1173 } 1174 1175 if (td) 1176 gpd_data->td = *td; 1177 1178 gpd_data->base.dev = dev; 1179 gpd_data->td.constraint_changed = true; 1180 gpd_data->td.effective_constraint_ns = -1; 1181 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1182 1183 spin_lock_irq(&dev->power.lock); 1184 1185 if (dev->power.subsys_data->domain_data) { 1186 ret = -EINVAL; 1187 goto err_free; 1188 } 1189 1190 dev->power.subsys_data->domain_data = &gpd_data->base; 1191 dev->pm_domain = &genpd->domain; 1192 1193 spin_unlock_irq(&dev->power.lock); 1194 1195 return gpd_data; 1196 1197 err_free: 1198 spin_unlock_irq(&dev->power.lock); 1199 kfree(gpd_data); 1200 err_put: 1201 dev_pm_put_subsys_data(dev); 1202 return ERR_PTR(ret); 1203 } 1204 1205 static void genpd_free_dev_data(struct device *dev, 1206 struct generic_pm_domain_data *gpd_data) 1207 { 1208 spin_lock_irq(&dev->power.lock); 1209 1210 dev->pm_domain = NULL; 1211 dev->power.subsys_data->domain_data = NULL; 1212 1213 spin_unlock_irq(&dev->power.lock); 1214 1215 kfree(gpd_data); 1216 dev_pm_put_subsys_data(dev); 1217 } 1218 1219 /** 1220 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1221 * @genpd: PM domain to add the device to. 1222 * @dev: Device to be added. 1223 * @td: Set of PM QoS timing parameters to attach to the device. 1224 */ 1225 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1226 struct gpd_timing_data *td) 1227 { 1228 struct generic_pm_domain_data *gpd_data; 1229 int ret = 0; 1230 1231 dev_dbg(dev, "%s()\n", __func__); 1232 1233 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1234 return -EINVAL; 1235 1236 gpd_data = genpd_alloc_dev_data(dev, genpd, td); 1237 if (IS_ERR(gpd_data)) 1238 return PTR_ERR(gpd_data); 1239 1240 mutex_lock(&genpd->lock); 1241 1242 if (genpd->prepared_count > 0) { 1243 ret = -EAGAIN; 1244 goto out; 1245 } 1246 1247 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1248 if (ret) 1249 goto out; 1250 1251 genpd->device_count++; 1252 genpd->max_off_time_changed = true; 1253 1254 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1255 1256 out: 1257 mutex_unlock(&genpd->lock); 1258 1259 if (ret) 1260 genpd_free_dev_data(dev, gpd_data); 1261 else 1262 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1263 1264 return ret; 1265 } 1266 1267 /** 1268 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1269 * @genpd: PM domain to remove the device from. 1270 * @dev: Device to be removed. 1271 */ 1272 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1273 struct device *dev) 1274 { 1275 struct generic_pm_domain_data *gpd_data; 1276 struct pm_domain_data *pdd; 1277 int ret = 0; 1278 1279 dev_dbg(dev, "%s()\n", __func__); 1280 1281 if (!genpd || genpd != pm_genpd_lookup_dev(dev)) 1282 return -EINVAL; 1283 1284 /* The above validation also means we have existing domain_data. */ 1285 pdd = dev->power.subsys_data->domain_data; 1286 gpd_data = to_gpd_data(pdd); 1287 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1288 1289 mutex_lock(&genpd->lock); 1290 1291 if (genpd->prepared_count > 0) { 1292 ret = -EAGAIN; 1293 goto out; 1294 } 1295 1296 genpd->device_count--; 1297 genpd->max_off_time_changed = true; 1298 1299 if (genpd->detach_dev) 1300 genpd->detach_dev(genpd, dev); 1301 1302 list_del_init(&pdd->list_node); 1303 1304 mutex_unlock(&genpd->lock); 1305 1306 genpd_free_dev_data(dev, gpd_data); 1307 1308 return 0; 1309 1310 out: 1311 mutex_unlock(&genpd->lock); 1312 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1313 1314 return ret; 1315 } 1316 1317 /** 1318 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1319 * @genpd: Master PM domain to add the subdomain to. 1320 * @subdomain: Subdomain to be added. 1321 */ 1322 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1323 struct generic_pm_domain *subdomain) 1324 { 1325 struct gpd_link *link, *itr; 1326 int ret = 0; 1327 1328 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1329 || genpd == subdomain) 1330 return -EINVAL; 1331 1332 link = kzalloc(sizeof(*link), GFP_KERNEL); 1333 if (!link) 1334 return -ENOMEM; 1335 1336 mutex_lock(&genpd->lock); 1337 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1338 1339 if (genpd->status == GPD_STATE_POWER_OFF 1340 && subdomain->status != GPD_STATE_POWER_OFF) { 1341 ret = -EINVAL; 1342 goto out; 1343 } 1344 1345 list_for_each_entry(itr, &genpd->master_links, master_node) { 1346 if (itr->slave == subdomain && itr->master == genpd) { 1347 ret = -EINVAL; 1348 goto out; 1349 } 1350 } 1351 1352 link->master = genpd; 1353 list_add_tail(&link->master_node, &genpd->master_links); 1354 link->slave = subdomain; 1355 list_add_tail(&link->slave_node, &subdomain->slave_links); 1356 if (subdomain->status != GPD_STATE_POWER_OFF) 1357 genpd_sd_counter_inc(genpd); 1358 1359 out: 1360 mutex_unlock(&subdomain->lock); 1361 mutex_unlock(&genpd->lock); 1362 if (ret) 1363 kfree(link); 1364 return ret; 1365 } 1366 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1367 1368 /** 1369 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1370 * @genpd: Master PM domain to remove the subdomain from. 1371 * @subdomain: Subdomain to be removed. 1372 */ 1373 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1374 struct generic_pm_domain *subdomain) 1375 { 1376 struct gpd_link *link; 1377 int ret = -EINVAL; 1378 1379 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1380 return -EINVAL; 1381 1382 mutex_lock(&genpd->lock); 1383 1384 if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { 1385 pr_warn("%s: unable to remove subdomain %s\n", genpd->name, 1386 subdomain->name); 1387 ret = -EBUSY; 1388 goto out; 1389 } 1390 1391 list_for_each_entry(link, &genpd->master_links, master_node) { 1392 if (link->slave != subdomain) 1393 continue; 1394 1395 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1396 1397 list_del(&link->master_node); 1398 list_del(&link->slave_node); 1399 kfree(link); 1400 if (subdomain->status != GPD_STATE_POWER_OFF) 1401 genpd_sd_counter_dec(genpd); 1402 1403 mutex_unlock(&subdomain->lock); 1404 1405 ret = 0; 1406 break; 1407 } 1408 1409 out: 1410 mutex_unlock(&genpd->lock); 1411 1412 return ret; 1413 } 1414 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1415 1416 /* Default device callbacks for generic PM domains. */ 1417 1418 /** 1419 * pm_genpd_default_save_state - Default "save device state" for PM domains. 1420 * @dev: Device to handle. 1421 */ 1422 static int pm_genpd_default_save_state(struct device *dev) 1423 { 1424 int (*cb)(struct device *__dev); 1425 1426 if (dev->type && dev->type->pm) 1427 cb = dev->type->pm->runtime_suspend; 1428 else if (dev->class && dev->class->pm) 1429 cb = dev->class->pm->runtime_suspend; 1430 else if (dev->bus && dev->bus->pm) 1431 cb = dev->bus->pm->runtime_suspend; 1432 else 1433 cb = NULL; 1434 1435 if (!cb && dev->driver && dev->driver->pm) 1436 cb = dev->driver->pm->runtime_suspend; 1437 1438 return cb ? cb(dev) : 0; 1439 } 1440 1441 /** 1442 * pm_genpd_default_restore_state - Default PM domains "restore device state". 1443 * @dev: Device to handle. 1444 */ 1445 static int pm_genpd_default_restore_state(struct device *dev) 1446 { 1447 int (*cb)(struct device *__dev); 1448 1449 if (dev->type && dev->type->pm) 1450 cb = dev->type->pm->runtime_resume; 1451 else if (dev->class && dev->class->pm) 1452 cb = dev->class->pm->runtime_resume; 1453 else if (dev->bus && dev->bus->pm) 1454 cb = dev->bus->pm->runtime_resume; 1455 else 1456 cb = NULL; 1457 1458 if (!cb && dev->driver && dev->driver->pm) 1459 cb = dev->driver->pm->runtime_resume; 1460 1461 return cb ? cb(dev) : 0; 1462 } 1463 1464 /** 1465 * pm_genpd_init - Initialize a generic I/O PM domain object. 1466 * @genpd: PM domain object to initialize. 1467 * @gov: PM domain governor to associate with the domain (may be NULL). 1468 * @is_off: Initial value of the domain's power_is_off field. 1469 */ 1470 void pm_genpd_init(struct generic_pm_domain *genpd, 1471 struct dev_power_governor *gov, bool is_off) 1472 { 1473 if (IS_ERR_OR_NULL(genpd)) 1474 return; 1475 1476 INIT_LIST_HEAD(&genpd->master_links); 1477 INIT_LIST_HEAD(&genpd->slave_links); 1478 INIT_LIST_HEAD(&genpd->dev_list); 1479 mutex_init(&genpd->lock); 1480 genpd->gov = gov; 1481 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1482 atomic_set(&genpd->sd_count, 0); 1483 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1484 genpd->device_count = 0; 1485 genpd->max_off_time_ns = -1; 1486 genpd->max_off_time_changed = true; 1487 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1488 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1489 genpd->domain.ops.prepare = pm_genpd_prepare; 1490 genpd->domain.ops.suspend = pm_genpd_suspend; 1491 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1492 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1493 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1494 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1495 genpd->domain.ops.resume = pm_genpd_resume; 1496 genpd->domain.ops.freeze = pm_genpd_freeze; 1497 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1498 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1499 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1500 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1501 genpd->domain.ops.thaw = pm_genpd_thaw; 1502 genpd->domain.ops.poweroff = pm_genpd_suspend; 1503 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1504 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1505 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1506 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1507 genpd->domain.ops.restore = pm_genpd_resume; 1508 genpd->domain.ops.complete = pm_genpd_complete; 1509 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1510 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1511 1512 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1513 genpd->dev_ops.stop = pm_clk_suspend; 1514 genpd->dev_ops.start = pm_clk_resume; 1515 } 1516 1517 mutex_lock(&gpd_list_lock); 1518 list_add(&genpd->gpd_list_node, &gpd_list); 1519 mutex_unlock(&gpd_list_lock); 1520 } 1521 EXPORT_SYMBOL_GPL(pm_genpd_init); 1522 1523 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1524 /* 1525 * Device Tree based PM domain providers. 1526 * 1527 * The code below implements generic device tree based PM domain providers that 1528 * bind device tree nodes with generic PM domains registered in the system. 1529 * 1530 * Any driver that registers generic PM domains and needs to support binding of 1531 * devices to these domains is supposed to register a PM domain provider, which 1532 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1533 * 1534 * Two simple mapping functions have been provided for convenience: 1535 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1536 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1537 * index. 1538 */ 1539 1540 /** 1541 * struct of_genpd_provider - PM domain provider registration structure 1542 * @link: Entry in global list of PM domain providers 1543 * @node: Pointer to device tree node of PM domain provider 1544 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1545 * into a PM domain. 1546 * @data: context pointer to be passed into @xlate callback 1547 */ 1548 struct of_genpd_provider { 1549 struct list_head link; 1550 struct device_node *node; 1551 genpd_xlate_t xlate; 1552 void *data; 1553 }; 1554 1555 /* List of registered PM domain providers. */ 1556 static LIST_HEAD(of_genpd_providers); 1557 /* Mutex to protect the list above. */ 1558 static DEFINE_MUTEX(of_genpd_mutex); 1559 1560 /** 1561 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1562 * @genpdspec: OF phandle args to map into a PM domain 1563 * @data: xlate function private data - pointer to struct generic_pm_domain 1564 * 1565 * This is a generic xlate function that can be used to model PM domains that 1566 * have their own device tree nodes. The private data of xlate function needs 1567 * to be a valid pointer to struct generic_pm_domain. 1568 */ 1569 struct generic_pm_domain *__of_genpd_xlate_simple( 1570 struct of_phandle_args *genpdspec, 1571 void *data) 1572 { 1573 if (genpdspec->args_count != 0) 1574 return ERR_PTR(-EINVAL); 1575 return data; 1576 } 1577 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 1578 1579 /** 1580 * __of_genpd_xlate_onecell() - Xlate function using a single index. 1581 * @genpdspec: OF phandle args to map into a PM domain 1582 * @data: xlate function private data - pointer to struct genpd_onecell_data 1583 * 1584 * This is a generic xlate function that can be used to model simple PM domain 1585 * controllers that have one device tree node and provide multiple PM domains. 1586 * A single cell is used as an index into an array of PM domains specified in 1587 * the genpd_onecell_data struct when registering the provider. 1588 */ 1589 struct generic_pm_domain *__of_genpd_xlate_onecell( 1590 struct of_phandle_args *genpdspec, 1591 void *data) 1592 { 1593 struct genpd_onecell_data *genpd_data = data; 1594 unsigned int idx = genpdspec->args[0]; 1595 1596 if (genpdspec->args_count != 1) 1597 return ERR_PTR(-EINVAL); 1598 1599 if (idx >= genpd_data->num_domains) { 1600 pr_err("%s: invalid domain index %u\n", __func__, idx); 1601 return ERR_PTR(-EINVAL); 1602 } 1603 1604 if (!genpd_data->domains[idx]) 1605 return ERR_PTR(-ENOENT); 1606 1607 return genpd_data->domains[idx]; 1608 } 1609 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 1610 1611 /** 1612 * __of_genpd_add_provider() - Register a PM domain provider for a node 1613 * @np: Device node pointer associated with the PM domain provider. 1614 * @xlate: Callback for decoding PM domain from phandle arguments. 1615 * @data: Context pointer for @xlate callback. 1616 */ 1617 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 1618 void *data) 1619 { 1620 struct of_genpd_provider *cp; 1621 1622 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 1623 if (!cp) 1624 return -ENOMEM; 1625 1626 cp->node = of_node_get(np); 1627 cp->data = data; 1628 cp->xlate = xlate; 1629 1630 mutex_lock(&of_genpd_mutex); 1631 list_add(&cp->link, &of_genpd_providers); 1632 mutex_unlock(&of_genpd_mutex); 1633 pr_debug("Added domain provider from %s\n", np->full_name); 1634 1635 return 0; 1636 } 1637 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 1638 1639 /** 1640 * of_genpd_del_provider() - Remove a previously registered PM domain provider 1641 * @np: Device node pointer associated with the PM domain provider 1642 */ 1643 void of_genpd_del_provider(struct device_node *np) 1644 { 1645 struct of_genpd_provider *cp; 1646 1647 mutex_lock(&of_genpd_mutex); 1648 list_for_each_entry(cp, &of_genpd_providers, link) { 1649 if (cp->node == np) { 1650 list_del(&cp->link); 1651 of_node_put(cp->node); 1652 kfree(cp); 1653 break; 1654 } 1655 } 1656 mutex_unlock(&of_genpd_mutex); 1657 } 1658 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 1659 1660 /** 1661 * of_genpd_get_from_provider() - Look-up PM domain 1662 * @genpdspec: OF phandle args to use for look-up 1663 * 1664 * Looks for a PM domain provider under the node specified by @genpdspec and if 1665 * found, uses xlate function of the provider to map phandle args to a PM 1666 * domain. 1667 * 1668 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 1669 * on failure. 1670 */ 1671 struct generic_pm_domain *of_genpd_get_from_provider( 1672 struct of_phandle_args *genpdspec) 1673 { 1674 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 1675 struct of_genpd_provider *provider; 1676 1677 mutex_lock(&of_genpd_mutex); 1678 1679 /* Check if we have such a provider in our array */ 1680 list_for_each_entry(provider, &of_genpd_providers, link) { 1681 if (provider->node == genpdspec->np) 1682 genpd = provider->xlate(genpdspec, provider->data); 1683 if (!IS_ERR(genpd)) 1684 break; 1685 } 1686 1687 mutex_unlock(&of_genpd_mutex); 1688 1689 return genpd; 1690 } 1691 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); 1692 1693 /** 1694 * genpd_dev_pm_detach - Detach a device from its PM domain. 1695 * @dev: Device to detach. 1696 * @power_off: Currently not used 1697 * 1698 * Try to locate a corresponding generic PM domain, which the device was 1699 * attached to previously. If such is found, the device is detached from it. 1700 */ 1701 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 1702 { 1703 struct generic_pm_domain *pd; 1704 unsigned int i; 1705 int ret = 0; 1706 1707 pd = pm_genpd_lookup_dev(dev); 1708 if (!pd) 1709 return; 1710 1711 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 1712 1713 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1714 ret = pm_genpd_remove_device(pd, dev); 1715 if (ret != -EAGAIN) 1716 break; 1717 1718 mdelay(i); 1719 cond_resched(); 1720 } 1721 1722 if (ret < 0) { 1723 dev_err(dev, "failed to remove from PM domain %s: %d", 1724 pd->name, ret); 1725 return; 1726 } 1727 1728 /* Check if PM domain can be powered off after removing this device. */ 1729 genpd_queue_power_off_work(pd); 1730 } 1731 1732 static void genpd_dev_pm_sync(struct device *dev) 1733 { 1734 struct generic_pm_domain *pd; 1735 1736 pd = dev_to_genpd(dev); 1737 if (IS_ERR(pd)) 1738 return; 1739 1740 genpd_queue_power_off_work(pd); 1741 } 1742 1743 /** 1744 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 1745 * @dev: Device to attach. 1746 * 1747 * Parse device's OF node to find a PM domain specifier. If such is found, 1748 * attaches the device to retrieved pm_domain ops. 1749 * 1750 * Both generic and legacy Samsung-specific DT bindings are supported to keep 1751 * backwards compatibility with existing DTBs. 1752 * 1753 * Returns 0 on successfully attached PM domain or negative error code. Note 1754 * that if a power-domain exists for the device, but it cannot be found or 1755 * turned on, then return -EPROBE_DEFER to ensure that the device is not 1756 * probed and to re-try again later. 1757 */ 1758 int genpd_dev_pm_attach(struct device *dev) 1759 { 1760 struct of_phandle_args pd_args; 1761 struct generic_pm_domain *pd; 1762 unsigned int i; 1763 int ret; 1764 1765 if (!dev->of_node) 1766 return -ENODEV; 1767 1768 if (dev->pm_domain) 1769 return -EEXIST; 1770 1771 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 1772 "#power-domain-cells", 0, &pd_args); 1773 if (ret < 0) { 1774 if (ret != -ENOENT) 1775 return ret; 1776 1777 /* 1778 * Try legacy Samsung-specific bindings 1779 * (for backwards compatibility of DT ABI) 1780 */ 1781 pd_args.args_count = 0; 1782 pd_args.np = of_parse_phandle(dev->of_node, 1783 "samsung,power-domain", 0); 1784 if (!pd_args.np) 1785 return -ENOENT; 1786 } 1787 1788 pd = of_genpd_get_from_provider(&pd_args); 1789 of_node_put(pd_args.np); 1790 if (IS_ERR(pd)) { 1791 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1792 __func__, PTR_ERR(pd)); 1793 return -EPROBE_DEFER; 1794 } 1795 1796 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 1797 1798 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1799 ret = pm_genpd_add_device(pd, dev); 1800 if (ret != -EAGAIN) 1801 break; 1802 1803 mdelay(i); 1804 cond_resched(); 1805 } 1806 1807 if (ret < 0) { 1808 dev_err(dev, "failed to add to PM domain %s: %d", 1809 pd->name, ret); 1810 goto out; 1811 } 1812 1813 dev->pm_domain->detach = genpd_dev_pm_detach; 1814 dev->pm_domain->sync = genpd_dev_pm_sync; 1815 ret = genpd_poweron(pd); 1816 1817 out: 1818 return ret ? -EPROBE_DEFER : 0; 1819 } 1820 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 1821 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 1822 1823 1824 /*** debugfs support ***/ 1825 1826 #ifdef CONFIG_PM_ADVANCED_DEBUG 1827 #include <linux/pm.h> 1828 #include <linux/device.h> 1829 #include <linux/debugfs.h> 1830 #include <linux/seq_file.h> 1831 #include <linux/init.h> 1832 #include <linux/kobject.h> 1833 static struct dentry *pm_genpd_debugfs_dir; 1834 1835 /* 1836 * TODO: This function is a slightly modified version of rtpm_status_show 1837 * from sysfs.c, so generalize it. 1838 */ 1839 static void rtpm_status_str(struct seq_file *s, struct device *dev) 1840 { 1841 static const char * const status_lookup[] = { 1842 [RPM_ACTIVE] = "active", 1843 [RPM_RESUMING] = "resuming", 1844 [RPM_SUSPENDED] = "suspended", 1845 [RPM_SUSPENDING] = "suspending" 1846 }; 1847 const char *p = ""; 1848 1849 if (dev->power.runtime_error) 1850 p = "error"; 1851 else if (dev->power.disable_depth) 1852 p = "unsupported"; 1853 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 1854 p = status_lookup[dev->power.runtime_status]; 1855 else 1856 WARN_ON(1); 1857 1858 seq_puts(s, p); 1859 } 1860 1861 static int pm_genpd_summary_one(struct seq_file *s, 1862 struct generic_pm_domain *genpd) 1863 { 1864 static const char * const status_lookup[] = { 1865 [GPD_STATE_ACTIVE] = "on", 1866 [GPD_STATE_POWER_OFF] = "off" 1867 }; 1868 struct pm_domain_data *pm_data; 1869 const char *kobj_path; 1870 struct gpd_link *link; 1871 int ret; 1872 1873 ret = mutex_lock_interruptible(&genpd->lock); 1874 if (ret) 1875 return -ERESTARTSYS; 1876 1877 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 1878 goto exit; 1879 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); 1880 1881 /* 1882 * Modifications on the list require holding locks on both 1883 * master and slave, so we are safe. 1884 * Also genpd->name is immutable. 1885 */ 1886 list_for_each_entry(link, &genpd->master_links, master_node) { 1887 seq_printf(s, "%s", link->slave->name); 1888 if (!list_is_last(&link->master_node, &genpd->master_links)) 1889 seq_puts(s, ", "); 1890 } 1891 1892 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 1893 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 1894 if (kobj_path == NULL) 1895 continue; 1896 1897 seq_printf(s, "\n %-50s ", kobj_path); 1898 rtpm_status_str(s, pm_data->dev); 1899 kfree(kobj_path); 1900 } 1901 1902 seq_puts(s, "\n"); 1903 exit: 1904 mutex_unlock(&genpd->lock); 1905 1906 return 0; 1907 } 1908 1909 static int pm_genpd_summary_show(struct seq_file *s, void *data) 1910 { 1911 struct generic_pm_domain *genpd; 1912 int ret = 0; 1913 1914 seq_puts(s, "domain status slaves\n"); 1915 seq_puts(s, " /device runtime status\n"); 1916 seq_puts(s, "----------------------------------------------------------------------\n"); 1917 1918 ret = mutex_lock_interruptible(&gpd_list_lock); 1919 if (ret) 1920 return -ERESTARTSYS; 1921 1922 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 1923 ret = pm_genpd_summary_one(s, genpd); 1924 if (ret) 1925 break; 1926 } 1927 mutex_unlock(&gpd_list_lock); 1928 1929 return ret; 1930 } 1931 1932 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 1933 { 1934 return single_open(file, pm_genpd_summary_show, NULL); 1935 } 1936 1937 static const struct file_operations pm_genpd_summary_fops = { 1938 .open = pm_genpd_summary_open, 1939 .read = seq_read, 1940 .llseek = seq_lseek, 1941 .release = single_release, 1942 }; 1943 1944 static int __init pm_genpd_debug_init(void) 1945 { 1946 struct dentry *d; 1947 1948 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 1949 1950 if (!pm_genpd_debugfs_dir) 1951 return -ENOMEM; 1952 1953 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 1954 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 1955 if (!d) 1956 return -ENOMEM; 1957 1958 return 0; 1959 } 1960 late_initcall(pm_genpd_debug_init); 1961 1962 static void __exit pm_genpd_debug_exit(void) 1963 { 1964 debugfs_remove_recursive(pm_genpd_debugfs_dir); 1965 } 1966 __exitcall(pm_genpd_debug_exit); 1967 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 1968