1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/pm_domain.h> 15 #include <linux/pm_qos.h> 16 #include <linux/pm_clock.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 #include <linux/sched.h> 20 #include <linux/suspend.h> 21 #include <linux/export.h> 22 23 #include "power.h" 24 25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 26 27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 28 ({ \ 29 type (*__routine)(struct device *__d); \ 30 type __ret = (type)0; \ 31 \ 32 __routine = genpd->dev_ops.callback; \ 33 if (__routine) { \ 34 __ret = __routine(dev); \ 35 } \ 36 __ret; \ 37 }) 38 39 static LIST_HEAD(gpd_list); 40 static DEFINE_MUTEX(gpd_list_lock); 41 42 /* 43 * Get the generic PM domain for a particular struct device. 44 * This validates the struct device pointer, the PM domain pointer, 45 * and checks that the PM domain pointer is a real generic PM domain. 46 * Any failure results in NULL being returned. 47 */ 48 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) 49 { 50 struct generic_pm_domain *genpd = NULL, *gpd; 51 52 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 53 return NULL; 54 55 mutex_lock(&gpd_list_lock); 56 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 57 if (&gpd->domain == dev->pm_domain) { 58 genpd = gpd; 59 break; 60 } 61 } 62 mutex_unlock(&gpd_list_lock); 63 64 return genpd; 65 } 66 67 /* 68 * This should only be used where we are certain that the pm_domain 69 * attached to the device is a genpd domain. 70 */ 71 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 72 { 73 if (IS_ERR_OR_NULL(dev->pm_domain)) 74 return ERR_PTR(-EINVAL); 75 76 return pd_to_genpd(dev->pm_domain); 77 } 78 79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 80 { 81 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 82 } 83 84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 85 { 86 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 87 } 88 89 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 90 { 91 bool ret = false; 92 93 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 94 ret = !!atomic_dec_and_test(&genpd->sd_count); 95 96 return ret; 97 } 98 99 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 100 { 101 atomic_inc(&genpd->sd_count); 102 smp_mb__after_atomic(); 103 } 104 105 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) 106 { 107 unsigned int state_idx = genpd->state_idx; 108 ktime_t time_start; 109 s64 elapsed_ns; 110 int ret; 111 112 if (!genpd->power_on) 113 return 0; 114 115 if (!timed) 116 return genpd->power_on(genpd); 117 118 time_start = ktime_get(); 119 ret = genpd->power_on(genpd); 120 if (ret) 121 return ret; 122 123 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 124 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 125 return ret; 126 127 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 128 genpd->max_off_time_changed = true; 129 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 130 genpd->name, "on", elapsed_ns); 131 132 return ret; 133 } 134 135 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) 136 { 137 unsigned int state_idx = genpd->state_idx; 138 ktime_t time_start; 139 s64 elapsed_ns; 140 int ret; 141 142 if (!genpd->power_off) 143 return 0; 144 145 if (!timed) 146 return genpd->power_off(genpd); 147 148 time_start = ktime_get(); 149 ret = genpd->power_off(genpd); 150 if (ret == -EBUSY) 151 return ret; 152 153 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 154 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 155 return ret; 156 157 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 158 genpd->max_off_time_changed = true; 159 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 160 genpd->name, "off", elapsed_ns); 161 162 return ret; 163 } 164 165 /** 166 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). 167 * @genpd: PM domain to power off. 168 * 169 * Queue up the execution of genpd_poweroff() unless it's already been done 170 * before. 171 */ 172 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 173 { 174 queue_work(pm_wq, &genpd->power_off_work); 175 } 176 177 /** 178 * genpd_poweron - Restore power to a given PM domain and its masters. 179 * @genpd: PM domain to power up. 180 * @depth: nesting count for lockdep. 181 * 182 * Restore power to @genpd and all of its masters so that it is possible to 183 * resume a device belonging to it. 184 */ 185 static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) 186 { 187 struct gpd_link *link; 188 int ret = 0; 189 190 if (genpd->status == GPD_STATE_ACTIVE 191 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 192 return 0; 193 194 /* 195 * The list is guaranteed not to change while the loop below is being 196 * executed, unless one of the masters' .power_on() callbacks fiddles 197 * with it. 198 */ 199 list_for_each_entry(link, &genpd->slave_links, slave_node) { 200 struct generic_pm_domain *master = link->master; 201 202 genpd_sd_counter_inc(master); 203 204 mutex_lock_nested(&master->lock, depth + 1); 205 ret = genpd_poweron(master, depth + 1); 206 mutex_unlock(&master->lock); 207 208 if (ret) { 209 genpd_sd_counter_dec(master); 210 goto err; 211 } 212 } 213 214 ret = genpd_power_on(genpd, true); 215 if (ret) 216 goto err; 217 218 genpd->status = GPD_STATE_ACTIVE; 219 return 0; 220 221 err: 222 list_for_each_entry_continue_reverse(link, 223 &genpd->slave_links, 224 slave_node) { 225 genpd_sd_counter_dec(link->master); 226 genpd_queue_power_off_work(link->master); 227 } 228 229 return ret; 230 } 231 232 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 233 unsigned long val, void *ptr) 234 { 235 struct generic_pm_domain_data *gpd_data; 236 struct device *dev; 237 238 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 239 dev = gpd_data->base.dev; 240 241 for (;;) { 242 struct generic_pm_domain *genpd; 243 struct pm_domain_data *pdd; 244 245 spin_lock_irq(&dev->power.lock); 246 247 pdd = dev->power.subsys_data ? 248 dev->power.subsys_data->domain_data : NULL; 249 if (pdd && pdd->dev) { 250 to_gpd_data(pdd)->td.constraint_changed = true; 251 genpd = dev_to_genpd(dev); 252 } else { 253 genpd = ERR_PTR(-ENODATA); 254 } 255 256 spin_unlock_irq(&dev->power.lock); 257 258 if (!IS_ERR(genpd)) { 259 mutex_lock(&genpd->lock); 260 genpd->max_off_time_changed = true; 261 mutex_unlock(&genpd->lock); 262 } 263 264 dev = dev->parent; 265 if (!dev || dev->power.ignore_children) 266 break; 267 } 268 269 return NOTIFY_DONE; 270 } 271 272 /** 273 * genpd_poweroff - Remove power from a given PM domain. 274 * @genpd: PM domain to power down. 275 * @is_async: PM domain is powered down from a scheduled work 276 * 277 * If all of the @genpd's devices have been suspended and all of its subdomains 278 * have been powered down, remove power from @genpd. 279 */ 280 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) 281 { 282 struct pm_domain_data *pdd; 283 struct gpd_link *link; 284 unsigned int not_suspended = 0; 285 286 /* 287 * Do not try to power off the domain in the following situations: 288 * (1) The domain is already in the "power off" state. 289 * (2) System suspend is in progress. 290 */ 291 if (genpd->status == GPD_STATE_POWER_OFF 292 || genpd->prepared_count > 0) 293 return 0; 294 295 if (atomic_read(&genpd->sd_count) > 0) 296 return -EBUSY; 297 298 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 299 enum pm_qos_flags_status stat; 300 301 stat = dev_pm_qos_flags(pdd->dev, 302 PM_QOS_FLAG_NO_POWER_OFF 303 | PM_QOS_FLAG_REMOTE_WAKEUP); 304 if (stat > PM_QOS_FLAGS_NONE) 305 return -EBUSY; 306 307 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) 308 not_suspended++; 309 } 310 311 if (not_suspended > 1 || (not_suspended == 1 && is_async)) 312 return -EBUSY; 313 314 if (genpd->gov && genpd->gov->power_down_ok) { 315 if (!genpd->gov->power_down_ok(&genpd->domain)) 316 return -EAGAIN; 317 } 318 319 if (genpd->power_off) { 320 int ret; 321 322 if (atomic_read(&genpd->sd_count) > 0) 323 return -EBUSY; 324 325 /* 326 * If sd_count > 0 at this point, one of the subdomains hasn't 327 * managed to call genpd_poweron() for the master yet after 328 * incrementing it. In that case genpd_poweron() will wait 329 * for us to drop the lock, so we can call .power_off() and let 330 * the genpd_poweron() restore power for us (this shouldn't 331 * happen very often). 332 */ 333 ret = genpd_power_off(genpd, true); 334 if (ret) 335 return ret; 336 } 337 338 genpd->status = GPD_STATE_POWER_OFF; 339 340 list_for_each_entry(link, &genpd->slave_links, slave_node) { 341 genpd_sd_counter_dec(link->master); 342 genpd_queue_power_off_work(link->master); 343 } 344 345 return 0; 346 } 347 348 /** 349 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 350 * @work: Work structure used for scheduling the execution of this function. 351 */ 352 static void genpd_power_off_work_fn(struct work_struct *work) 353 { 354 struct generic_pm_domain *genpd; 355 356 genpd = container_of(work, struct generic_pm_domain, power_off_work); 357 358 mutex_lock(&genpd->lock); 359 genpd_poweroff(genpd, true); 360 mutex_unlock(&genpd->lock); 361 } 362 363 /** 364 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 365 * @dev: Device to handle. 366 */ 367 static int __genpd_runtime_suspend(struct device *dev) 368 { 369 int (*cb)(struct device *__dev); 370 371 if (dev->type && dev->type->pm) 372 cb = dev->type->pm->runtime_suspend; 373 else if (dev->class && dev->class->pm) 374 cb = dev->class->pm->runtime_suspend; 375 else if (dev->bus && dev->bus->pm) 376 cb = dev->bus->pm->runtime_suspend; 377 else 378 cb = NULL; 379 380 if (!cb && dev->driver && dev->driver->pm) 381 cb = dev->driver->pm->runtime_suspend; 382 383 return cb ? cb(dev) : 0; 384 } 385 386 /** 387 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 388 * @dev: Device to handle. 389 */ 390 static int __genpd_runtime_resume(struct device *dev) 391 { 392 int (*cb)(struct device *__dev); 393 394 if (dev->type && dev->type->pm) 395 cb = dev->type->pm->runtime_resume; 396 else if (dev->class && dev->class->pm) 397 cb = dev->class->pm->runtime_resume; 398 else if (dev->bus && dev->bus->pm) 399 cb = dev->bus->pm->runtime_resume; 400 else 401 cb = NULL; 402 403 if (!cb && dev->driver && dev->driver->pm) 404 cb = dev->driver->pm->runtime_resume; 405 406 return cb ? cb(dev) : 0; 407 } 408 409 /** 410 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 411 * @dev: Device to suspend. 412 * 413 * Carry out a runtime suspend of a device under the assumption that its 414 * pm_domain field points to the domain member of an object of type 415 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 416 */ 417 static int genpd_runtime_suspend(struct device *dev) 418 { 419 struct generic_pm_domain *genpd; 420 bool (*suspend_ok)(struct device *__dev); 421 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 422 bool runtime_pm = pm_runtime_enabled(dev); 423 ktime_t time_start; 424 s64 elapsed_ns; 425 int ret; 426 427 dev_dbg(dev, "%s()\n", __func__); 428 429 genpd = dev_to_genpd(dev); 430 if (IS_ERR(genpd)) 431 return -EINVAL; 432 433 /* 434 * A runtime PM centric subsystem/driver may re-use the runtime PM 435 * callbacks for other purposes than runtime PM. In those scenarios 436 * runtime PM is disabled. Under these circumstances, we shall skip 437 * validating/measuring the PM QoS latency. 438 */ 439 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 440 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 441 return -EBUSY; 442 443 /* Measure suspend latency. */ 444 if (runtime_pm) 445 time_start = ktime_get(); 446 447 ret = __genpd_runtime_suspend(dev); 448 if (ret) 449 return ret; 450 451 ret = genpd_stop_dev(genpd, dev); 452 if (ret) { 453 __genpd_runtime_resume(dev); 454 return ret; 455 } 456 457 /* Update suspend latency value if the measured time exceeds it. */ 458 if (runtime_pm) { 459 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 460 if (elapsed_ns > td->suspend_latency_ns) { 461 td->suspend_latency_ns = elapsed_ns; 462 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 463 elapsed_ns); 464 genpd->max_off_time_changed = true; 465 td->constraint_changed = true; 466 } 467 } 468 469 /* 470 * If power.irq_safe is set, this routine will be run with interrupts 471 * off, so it can't use mutexes. 472 */ 473 if (dev->power.irq_safe) 474 return 0; 475 476 mutex_lock(&genpd->lock); 477 genpd_poweroff(genpd, false); 478 mutex_unlock(&genpd->lock); 479 480 return 0; 481 } 482 483 /** 484 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 485 * @dev: Device to resume. 486 * 487 * Carry out a runtime resume of a device under the assumption that its 488 * pm_domain field points to the domain member of an object of type 489 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 490 */ 491 static int genpd_runtime_resume(struct device *dev) 492 { 493 struct generic_pm_domain *genpd; 494 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 495 bool runtime_pm = pm_runtime_enabled(dev); 496 ktime_t time_start; 497 s64 elapsed_ns; 498 int ret; 499 bool timed = true; 500 501 dev_dbg(dev, "%s()\n", __func__); 502 503 genpd = dev_to_genpd(dev); 504 if (IS_ERR(genpd)) 505 return -EINVAL; 506 507 /* If power.irq_safe, the PM domain is never powered off. */ 508 if (dev->power.irq_safe) { 509 timed = false; 510 goto out; 511 } 512 513 mutex_lock(&genpd->lock); 514 ret = genpd_poweron(genpd, 0); 515 mutex_unlock(&genpd->lock); 516 517 if (ret) 518 return ret; 519 520 out: 521 /* Measure resume latency. */ 522 if (timed && runtime_pm) 523 time_start = ktime_get(); 524 525 ret = genpd_start_dev(genpd, dev); 526 if (ret) 527 goto err_poweroff; 528 529 ret = __genpd_runtime_resume(dev); 530 if (ret) 531 goto err_stop; 532 533 /* Update resume latency value if the measured time exceeds it. */ 534 if (timed && runtime_pm) { 535 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 536 if (elapsed_ns > td->resume_latency_ns) { 537 td->resume_latency_ns = elapsed_ns; 538 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 539 elapsed_ns); 540 genpd->max_off_time_changed = true; 541 td->constraint_changed = true; 542 } 543 } 544 545 return 0; 546 547 err_stop: 548 genpd_stop_dev(genpd, dev); 549 err_poweroff: 550 if (!dev->power.irq_safe) { 551 mutex_lock(&genpd->lock); 552 genpd_poweroff(genpd, 0); 553 mutex_unlock(&genpd->lock); 554 } 555 556 return ret; 557 } 558 559 static bool pd_ignore_unused; 560 static int __init pd_ignore_unused_setup(char *__unused) 561 { 562 pd_ignore_unused = true; 563 return 1; 564 } 565 __setup("pd_ignore_unused", pd_ignore_unused_setup); 566 567 /** 568 * genpd_poweroff_unused - Power off all PM domains with no devices in use. 569 */ 570 static int __init genpd_poweroff_unused(void) 571 { 572 struct generic_pm_domain *genpd; 573 574 if (pd_ignore_unused) { 575 pr_warn("genpd: Not disabling unused power domains\n"); 576 return 0; 577 } 578 579 mutex_lock(&gpd_list_lock); 580 581 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 582 genpd_queue_power_off_work(genpd); 583 584 mutex_unlock(&gpd_list_lock); 585 586 return 0; 587 } 588 late_initcall(genpd_poweroff_unused); 589 590 #ifdef CONFIG_PM_SLEEP 591 592 /** 593 * pm_genpd_present - Check if the given PM domain has been initialized. 594 * @genpd: PM domain to check. 595 */ 596 static bool pm_genpd_present(const struct generic_pm_domain *genpd) 597 { 598 const struct generic_pm_domain *gpd; 599 600 if (IS_ERR_OR_NULL(genpd)) 601 return false; 602 603 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 604 if (gpd == genpd) 605 return true; 606 607 return false; 608 } 609 610 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 611 struct device *dev) 612 { 613 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 614 } 615 616 /** 617 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 618 * @genpd: PM domain to power off, if possible. 619 * @timed: True if latency measurements are allowed. 620 * 621 * Check if the given PM domain can be powered off (during system suspend or 622 * hibernation) and do that if so. Also, in that case propagate to its masters. 623 * 624 * This function is only called in "noirq" and "syscore" stages of system power 625 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 626 * executed sequentially, so it is guaranteed that it will never run twice in 627 * parallel). 628 */ 629 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, 630 bool timed) 631 { 632 struct gpd_link *link; 633 634 if (genpd->status == GPD_STATE_POWER_OFF) 635 return; 636 637 if (genpd->suspended_count != genpd->device_count 638 || atomic_read(&genpd->sd_count) > 0) 639 return; 640 641 /* Choose the deepest state when suspending */ 642 genpd->state_idx = genpd->state_count - 1; 643 genpd_power_off(genpd, timed); 644 645 genpd->status = GPD_STATE_POWER_OFF; 646 647 list_for_each_entry(link, &genpd->slave_links, slave_node) { 648 genpd_sd_counter_dec(link->master); 649 pm_genpd_sync_poweroff(link->master, timed); 650 } 651 } 652 653 /** 654 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 655 * @genpd: PM domain to power on. 656 * @timed: True if latency measurements are allowed. 657 * 658 * This function is only called in "noirq" and "syscore" stages of system power 659 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 660 * executed sequentially, so it is guaranteed that it will never run twice in 661 * parallel). 662 */ 663 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, 664 bool timed) 665 { 666 struct gpd_link *link; 667 668 if (genpd->status == GPD_STATE_ACTIVE) 669 return; 670 671 list_for_each_entry(link, &genpd->slave_links, slave_node) { 672 pm_genpd_sync_poweron(link->master, timed); 673 genpd_sd_counter_inc(link->master); 674 } 675 676 genpd_power_on(genpd, timed); 677 678 genpd->status = GPD_STATE_ACTIVE; 679 } 680 681 /** 682 * resume_needed - Check whether to resume a device before system suspend. 683 * @dev: Device to check. 684 * @genpd: PM domain the device belongs to. 685 * 686 * There are two cases in which a device that can wake up the system from sleep 687 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 688 * to wake up the system and it has to remain active for this purpose while the 689 * system is in the sleep state and (2) if the device is not enabled to wake up 690 * the system from sleep states and it generally doesn't generate wakeup signals 691 * by itself (those signals are generated on its behalf by other parts of the 692 * system). In the latter case it may be necessary to reconfigure the device's 693 * wakeup settings during system suspend, because it may have been set up to 694 * signal remote wakeup from the system's working state as needed by runtime PM. 695 * Return 'true' in either of the above cases. 696 */ 697 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 698 { 699 bool active_wakeup; 700 701 if (!device_can_wakeup(dev)) 702 return false; 703 704 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 705 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 706 } 707 708 /** 709 * pm_genpd_prepare - Start power transition of a device in a PM domain. 710 * @dev: Device to start the transition of. 711 * 712 * Start a power transition of a device (during a system-wide power transition) 713 * under the assumption that its pm_domain field points to the domain member of 714 * an object of type struct generic_pm_domain representing a PM domain 715 * consisting of I/O devices. 716 */ 717 static int pm_genpd_prepare(struct device *dev) 718 { 719 struct generic_pm_domain *genpd; 720 int ret; 721 722 dev_dbg(dev, "%s()\n", __func__); 723 724 genpd = dev_to_genpd(dev); 725 if (IS_ERR(genpd)) 726 return -EINVAL; 727 728 /* 729 * If a wakeup request is pending for the device, it should be woken up 730 * at this point and a system wakeup event should be reported if it's 731 * set up to wake up the system from sleep states. 732 */ 733 if (resume_needed(dev, genpd)) 734 pm_runtime_resume(dev); 735 736 mutex_lock(&genpd->lock); 737 738 if (genpd->prepared_count++ == 0) { 739 genpd->suspended_count = 0; 740 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 741 } 742 743 mutex_unlock(&genpd->lock); 744 745 if (genpd->suspend_power_off) 746 return 0; 747 748 /* 749 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 750 * so genpd_poweron() will return immediately, but if the device 751 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 752 * to make it operational. 753 */ 754 pm_runtime_resume(dev); 755 __pm_runtime_disable(dev, false); 756 757 ret = pm_generic_prepare(dev); 758 if (ret) { 759 mutex_lock(&genpd->lock); 760 761 if (--genpd->prepared_count == 0) 762 genpd->suspend_power_off = false; 763 764 mutex_unlock(&genpd->lock); 765 pm_runtime_enable(dev); 766 } 767 768 return ret; 769 } 770 771 /** 772 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 773 * @dev: Device to suspend. 774 * 775 * Suspend a device under the assumption that its pm_domain field points to the 776 * domain member of an object of type struct generic_pm_domain representing 777 * a PM domain consisting of I/O devices. 778 */ 779 static int pm_genpd_suspend(struct device *dev) 780 { 781 struct generic_pm_domain *genpd; 782 783 dev_dbg(dev, "%s()\n", __func__); 784 785 genpd = dev_to_genpd(dev); 786 if (IS_ERR(genpd)) 787 return -EINVAL; 788 789 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 790 } 791 792 /** 793 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 794 * @dev: Device to suspend. 795 * 796 * Carry out a late suspend of a device under the assumption that its 797 * pm_domain field points to the domain member of an object of type 798 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 799 */ 800 static int pm_genpd_suspend_late(struct device *dev) 801 { 802 struct generic_pm_domain *genpd; 803 804 dev_dbg(dev, "%s()\n", __func__); 805 806 genpd = dev_to_genpd(dev); 807 if (IS_ERR(genpd)) 808 return -EINVAL; 809 810 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 811 } 812 813 /** 814 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 815 * @dev: Device to suspend. 816 * 817 * Stop the device and remove power from the domain if all devices in it have 818 * been stopped. 819 */ 820 static int pm_genpd_suspend_noirq(struct device *dev) 821 { 822 struct generic_pm_domain *genpd; 823 824 dev_dbg(dev, "%s()\n", __func__); 825 826 genpd = dev_to_genpd(dev); 827 if (IS_ERR(genpd)) 828 return -EINVAL; 829 830 if (genpd->suspend_power_off 831 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 832 return 0; 833 834 genpd_stop_dev(genpd, dev); 835 836 /* 837 * Since all of the "noirq" callbacks are executed sequentially, it is 838 * guaranteed that this function will never run twice in parallel for 839 * the same PM domain, so it is not necessary to use locking here. 840 */ 841 genpd->suspended_count++; 842 pm_genpd_sync_poweroff(genpd, true); 843 844 return 0; 845 } 846 847 /** 848 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 849 * @dev: Device to resume. 850 * 851 * Restore power to the device's PM domain, if necessary, and start the device. 852 */ 853 static int pm_genpd_resume_noirq(struct device *dev) 854 { 855 struct generic_pm_domain *genpd; 856 857 dev_dbg(dev, "%s()\n", __func__); 858 859 genpd = dev_to_genpd(dev); 860 if (IS_ERR(genpd)) 861 return -EINVAL; 862 863 if (genpd->suspend_power_off 864 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 865 return 0; 866 867 /* 868 * Since all of the "noirq" callbacks are executed sequentially, it is 869 * guaranteed that this function will never run twice in parallel for 870 * the same PM domain, so it is not necessary to use locking here. 871 */ 872 pm_genpd_sync_poweron(genpd, true); 873 genpd->suspended_count--; 874 875 return genpd_start_dev(genpd, dev); 876 } 877 878 /** 879 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 880 * @dev: Device to resume. 881 * 882 * Carry out an early resume of a device under the assumption that its 883 * pm_domain field points to the domain member of an object of type 884 * struct generic_pm_domain representing a power domain consisting of I/O 885 * devices. 886 */ 887 static int pm_genpd_resume_early(struct device *dev) 888 { 889 struct generic_pm_domain *genpd; 890 891 dev_dbg(dev, "%s()\n", __func__); 892 893 genpd = dev_to_genpd(dev); 894 if (IS_ERR(genpd)) 895 return -EINVAL; 896 897 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 898 } 899 900 /** 901 * pm_genpd_resume - Resume of device in an I/O PM domain. 902 * @dev: Device to resume. 903 * 904 * Resume a device under the assumption that its pm_domain field points to the 905 * domain member of an object of type struct generic_pm_domain representing 906 * a power domain consisting of I/O devices. 907 */ 908 static int pm_genpd_resume(struct device *dev) 909 { 910 struct generic_pm_domain *genpd; 911 912 dev_dbg(dev, "%s()\n", __func__); 913 914 genpd = dev_to_genpd(dev); 915 if (IS_ERR(genpd)) 916 return -EINVAL; 917 918 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 919 } 920 921 /** 922 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 923 * @dev: Device to freeze. 924 * 925 * Freeze a device under the assumption that its pm_domain field points to the 926 * domain member of an object of type struct generic_pm_domain representing 927 * a power domain consisting of I/O devices. 928 */ 929 static int pm_genpd_freeze(struct device *dev) 930 { 931 struct generic_pm_domain *genpd; 932 933 dev_dbg(dev, "%s()\n", __func__); 934 935 genpd = dev_to_genpd(dev); 936 if (IS_ERR(genpd)) 937 return -EINVAL; 938 939 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 940 } 941 942 /** 943 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 944 * @dev: Device to freeze. 945 * 946 * Carry out a late freeze of a device under the assumption that its 947 * pm_domain field points to the domain member of an object of type 948 * struct generic_pm_domain representing a power domain consisting of I/O 949 * devices. 950 */ 951 static int pm_genpd_freeze_late(struct device *dev) 952 { 953 struct generic_pm_domain *genpd; 954 955 dev_dbg(dev, "%s()\n", __func__); 956 957 genpd = dev_to_genpd(dev); 958 if (IS_ERR(genpd)) 959 return -EINVAL; 960 961 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 962 } 963 964 /** 965 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 966 * @dev: Device to freeze. 967 * 968 * Carry out a late freeze of a device under the assumption that its 969 * pm_domain field points to the domain member of an object of type 970 * struct generic_pm_domain representing a power domain consisting of I/O 971 * devices. 972 */ 973 static int pm_genpd_freeze_noirq(struct device *dev) 974 { 975 struct generic_pm_domain *genpd; 976 977 dev_dbg(dev, "%s()\n", __func__); 978 979 genpd = dev_to_genpd(dev); 980 if (IS_ERR(genpd)) 981 return -EINVAL; 982 983 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 984 } 985 986 /** 987 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 988 * @dev: Device to thaw. 989 * 990 * Start the device, unless power has been removed from the domain already 991 * before the system transition. 992 */ 993 static int pm_genpd_thaw_noirq(struct device *dev) 994 { 995 struct generic_pm_domain *genpd; 996 997 dev_dbg(dev, "%s()\n", __func__); 998 999 genpd = dev_to_genpd(dev); 1000 if (IS_ERR(genpd)) 1001 return -EINVAL; 1002 1003 return genpd->suspend_power_off ? 1004 0 : genpd_start_dev(genpd, dev); 1005 } 1006 1007 /** 1008 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1009 * @dev: Device to thaw. 1010 * 1011 * Carry out an early thaw of a device under the assumption that its 1012 * pm_domain field points to the domain member of an object of type 1013 * struct generic_pm_domain representing a power domain consisting of I/O 1014 * devices. 1015 */ 1016 static int pm_genpd_thaw_early(struct device *dev) 1017 { 1018 struct generic_pm_domain *genpd; 1019 1020 dev_dbg(dev, "%s()\n", __func__); 1021 1022 genpd = dev_to_genpd(dev); 1023 if (IS_ERR(genpd)) 1024 return -EINVAL; 1025 1026 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 1027 } 1028 1029 /** 1030 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1031 * @dev: Device to thaw. 1032 * 1033 * Thaw a device under the assumption that its pm_domain field points to the 1034 * domain member of an object of type struct generic_pm_domain representing 1035 * a power domain consisting of I/O devices. 1036 */ 1037 static int pm_genpd_thaw(struct device *dev) 1038 { 1039 struct generic_pm_domain *genpd; 1040 1041 dev_dbg(dev, "%s()\n", __func__); 1042 1043 genpd = dev_to_genpd(dev); 1044 if (IS_ERR(genpd)) 1045 return -EINVAL; 1046 1047 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1048 } 1049 1050 /** 1051 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1052 * @dev: Device to resume. 1053 * 1054 * Make sure the domain will be in the same power state as before the 1055 * hibernation the system is resuming from and start the device if necessary. 1056 */ 1057 static int pm_genpd_restore_noirq(struct device *dev) 1058 { 1059 struct generic_pm_domain *genpd; 1060 1061 dev_dbg(dev, "%s()\n", __func__); 1062 1063 genpd = dev_to_genpd(dev); 1064 if (IS_ERR(genpd)) 1065 return -EINVAL; 1066 1067 /* 1068 * Since all of the "noirq" callbacks are executed sequentially, it is 1069 * guaranteed that this function will never run twice in parallel for 1070 * the same PM domain, so it is not necessary to use locking here. 1071 * 1072 * At this point suspended_count == 0 means we are being run for the 1073 * first time for the given domain in the present cycle. 1074 */ 1075 if (genpd->suspended_count++ == 0) { 1076 /* 1077 * The boot kernel might put the domain into arbitrary state, 1078 * so make it appear as powered off to pm_genpd_sync_poweron(), 1079 * so that it tries to power it on in case it was really off. 1080 */ 1081 genpd->status = GPD_STATE_POWER_OFF; 1082 if (genpd->suspend_power_off) { 1083 /* 1084 * If the domain was off before the hibernation, make 1085 * sure it will be off going forward. 1086 */ 1087 genpd_power_off(genpd, true); 1088 1089 return 0; 1090 } 1091 } 1092 1093 if (genpd->suspend_power_off) 1094 return 0; 1095 1096 pm_genpd_sync_poweron(genpd, true); 1097 1098 return genpd_start_dev(genpd, dev); 1099 } 1100 1101 /** 1102 * pm_genpd_complete - Complete power transition of a device in a power domain. 1103 * @dev: Device to complete the transition of. 1104 * 1105 * Complete a power transition of a device (during a system-wide power 1106 * transition) under the assumption that its pm_domain field points to the 1107 * domain member of an object of type struct generic_pm_domain representing 1108 * a power domain consisting of I/O devices. 1109 */ 1110 static void pm_genpd_complete(struct device *dev) 1111 { 1112 struct generic_pm_domain *genpd; 1113 bool run_complete; 1114 1115 dev_dbg(dev, "%s()\n", __func__); 1116 1117 genpd = dev_to_genpd(dev); 1118 if (IS_ERR(genpd)) 1119 return; 1120 1121 mutex_lock(&genpd->lock); 1122 1123 run_complete = !genpd->suspend_power_off; 1124 if (--genpd->prepared_count == 0) 1125 genpd->suspend_power_off = false; 1126 1127 mutex_unlock(&genpd->lock); 1128 1129 if (run_complete) { 1130 pm_generic_complete(dev); 1131 pm_runtime_set_active(dev); 1132 pm_runtime_enable(dev); 1133 pm_request_idle(dev); 1134 } 1135 } 1136 1137 /** 1138 * genpd_syscore_switch - Switch power during system core suspend or resume. 1139 * @dev: Device that normally is marked as "always on" to switch power for. 1140 * 1141 * This routine may only be called during the system core (syscore) suspend or 1142 * resume phase for devices whose "always on" flags are set. 1143 */ 1144 static void genpd_syscore_switch(struct device *dev, bool suspend) 1145 { 1146 struct generic_pm_domain *genpd; 1147 1148 genpd = dev_to_genpd(dev); 1149 if (!pm_genpd_present(genpd)) 1150 return; 1151 1152 if (suspend) { 1153 genpd->suspended_count++; 1154 pm_genpd_sync_poweroff(genpd, false); 1155 } else { 1156 pm_genpd_sync_poweron(genpd, false); 1157 genpd->suspended_count--; 1158 } 1159 } 1160 1161 void pm_genpd_syscore_poweroff(struct device *dev) 1162 { 1163 genpd_syscore_switch(dev, true); 1164 } 1165 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1166 1167 void pm_genpd_syscore_poweron(struct device *dev) 1168 { 1169 genpd_syscore_switch(dev, false); 1170 } 1171 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1172 1173 #else /* !CONFIG_PM_SLEEP */ 1174 1175 #define pm_genpd_prepare NULL 1176 #define pm_genpd_suspend NULL 1177 #define pm_genpd_suspend_late NULL 1178 #define pm_genpd_suspend_noirq NULL 1179 #define pm_genpd_resume_early NULL 1180 #define pm_genpd_resume_noirq NULL 1181 #define pm_genpd_resume NULL 1182 #define pm_genpd_freeze NULL 1183 #define pm_genpd_freeze_late NULL 1184 #define pm_genpd_freeze_noirq NULL 1185 #define pm_genpd_thaw_early NULL 1186 #define pm_genpd_thaw_noirq NULL 1187 #define pm_genpd_thaw NULL 1188 #define pm_genpd_restore_noirq NULL 1189 #define pm_genpd_complete NULL 1190 1191 #endif /* CONFIG_PM_SLEEP */ 1192 1193 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1194 struct generic_pm_domain *genpd, 1195 struct gpd_timing_data *td) 1196 { 1197 struct generic_pm_domain_data *gpd_data; 1198 int ret; 1199 1200 ret = dev_pm_get_subsys_data(dev); 1201 if (ret) 1202 return ERR_PTR(ret); 1203 1204 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1205 if (!gpd_data) { 1206 ret = -ENOMEM; 1207 goto err_put; 1208 } 1209 1210 if (td) 1211 gpd_data->td = *td; 1212 1213 gpd_data->base.dev = dev; 1214 gpd_data->td.constraint_changed = true; 1215 gpd_data->td.effective_constraint_ns = -1; 1216 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1217 1218 spin_lock_irq(&dev->power.lock); 1219 1220 if (dev->power.subsys_data->domain_data) { 1221 ret = -EINVAL; 1222 goto err_free; 1223 } 1224 1225 dev->power.subsys_data->domain_data = &gpd_data->base; 1226 1227 spin_unlock_irq(&dev->power.lock); 1228 1229 dev_pm_domain_set(dev, &genpd->domain); 1230 1231 return gpd_data; 1232 1233 err_free: 1234 spin_unlock_irq(&dev->power.lock); 1235 kfree(gpd_data); 1236 err_put: 1237 dev_pm_put_subsys_data(dev); 1238 return ERR_PTR(ret); 1239 } 1240 1241 static void genpd_free_dev_data(struct device *dev, 1242 struct generic_pm_domain_data *gpd_data) 1243 { 1244 dev_pm_domain_set(dev, NULL); 1245 1246 spin_lock_irq(&dev->power.lock); 1247 1248 dev->power.subsys_data->domain_data = NULL; 1249 1250 spin_unlock_irq(&dev->power.lock); 1251 1252 kfree(gpd_data); 1253 dev_pm_put_subsys_data(dev); 1254 } 1255 1256 /** 1257 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1258 * @genpd: PM domain to add the device to. 1259 * @dev: Device to be added. 1260 * @td: Set of PM QoS timing parameters to attach to the device. 1261 */ 1262 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1263 struct gpd_timing_data *td) 1264 { 1265 struct generic_pm_domain_data *gpd_data; 1266 int ret = 0; 1267 1268 dev_dbg(dev, "%s()\n", __func__); 1269 1270 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1271 return -EINVAL; 1272 1273 gpd_data = genpd_alloc_dev_data(dev, genpd, td); 1274 if (IS_ERR(gpd_data)) 1275 return PTR_ERR(gpd_data); 1276 1277 mutex_lock(&genpd->lock); 1278 1279 if (genpd->prepared_count > 0) { 1280 ret = -EAGAIN; 1281 goto out; 1282 } 1283 1284 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1285 if (ret) 1286 goto out; 1287 1288 genpd->device_count++; 1289 genpd->max_off_time_changed = true; 1290 1291 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1292 1293 out: 1294 mutex_unlock(&genpd->lock); 1295 1296 if (ret) 1297 genpd_free_dev_data(dev, gpd_data); 1298 else 1299 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1300 1301 return ret; 1302 } 1303 EXPORT_SYMBOL_GPL(__pm_genpd_add_device); 1304 1305 /** 1306 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1307 * @genpd: PM domain to remove the device from. 1308 * @dev: Device to be removed. 1309 */ 1310 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1311 struct device *dev) 1312 { 1313 struct generic_pm_domain_data *gpd_data; 1314 struct pm_domain_data *pdd; 1315 int ret = 0; 1316 1317 dev_dbg(dev, "%s()\n", __func__); 1318 1319 if (!genpd || genpd != pm_genpd_lookup_dev(dev)) 1320 return -EINVAL; 1321 1322 /* The above validation also means we have existing domain_data. */ 1323 pdd = dev->power.subsys_data->domain_data; 1324 gpd_data = to_gpd_data(pdd); 1325 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1326 1327 mutex_lock(&genpd->lock); 1328 1329 if (genpd->prepared_count > 0) { 1330 ret = -EAGAIN; 1331 goto out; 1332 } 1333 1334 genpd->device_count--; 1335 genpd->max_off_time_changed = true; 1336 1337 if (genpd->detach_dev) 1338 genpd->detach_dev(genpd, dev); 1339 1340 list_del_init(&pdd->list_node); 1341 1342 mutex_unlock(&genpd->lock); 1343 1344 genpd_free_dev_data(dev, gpd_data); 1345 1346 return 0; 1347 1348 out: 1349 mutex_unlock(&genpd->lock); 1350 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1351 1352 return ret; 1353 } 1354 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1355 1356 /** 1357 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1358 * @genpd: Master PM domain to add the subdomain to. 1359 * @subdomain: Subdomain to be added. 1360 */ 1361 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1362 struct generic_pm_domain *subdomain) 1363 { 1364 struct gpd_link *link, *itr; 1365 int ret = 0; 1366 1367 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1368 || genpd == subdomain) 1369 return -EINVAL; 1370 1371 link = kzalloc(sizeof(*link), GFP_KERNEL); 1372 if (!link) 1373 return -ENOMEM; 1374 1375 mutex_lock(&subdomain->lock); 1376 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 1377 1378 if (genpd->status == GPD_STATE_POWER_OFF 1379 && subdomain->status != GPD_STATE_POWER_OFF) { 1380 ret = -EINVAL; 1381 goto out; 1382 } 1383 1384 list_for_each_entry(itr, &genpd->master_links, master_node) { 1385 if (itr->slave == subdomain && itr->master == genpd) { 1386 ret = -EINVAL; 1387 goto out; 1388 } 1389 } 1390 1391 link->master = genpd; 1392 list_add_tail(&link->master_node, &genpd->master_links); 1393 link->slave = subdomain; 1394 list_add_tail(&link->slave_node, &subdomain->slave_links); 1395 if (subdomain->status != GPD_STATE_POWER_OFF) 1396 genpd_sd_counter_inc(genpd); 1397 1398 out: 1399 mutex_unlock(&genpd->lock); 1400 mutex_unlock(&subdomain->lock); 1401 if (ret) 1402 kfree(link); 1403 return ret; 1404 } 1405 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1406 1407 /** 1408 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1409 * @genpd: Master PM domain to remove the subdomain from. 1410 * @subdomain: Subdomain to be removed. 1411 */ 1412 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1413 struct generic_pm_domain *subdomain) 1414 { 1415 struct gpd_link *link; 1416 int ret = -EINVAL; 1417 1418 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1419 return -EINVAL; 1420 1421 mutex_lock(&subdomain->lock); 1422 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); 1423 1424 if (!list_empty(&subdomain->master_links) || subdomain->device_count) { 1425 pr_warn("%s: unable to remove subdomain %s\n", genpd->name, 1426 subdomain->name); 1427 ret = -EBUSY; 1428 goto out; 1429 } 1430 1431 list_for_each_entry(link, &genpd->master_links, master_node) { 1432 if (link->slave != subdomain) 1433 continue; 1434 1435 list_del(&link->master_node); 1436 list_del(&link->slave_node); 1437 kfree(link); 1438 if (subdomain->status != GPD_STATE_POWER_OFF) 1439 genpd_sd_counter_dec(genpd); 1440 1441 ret = 0; 1442 break; 1443 } 1444 1445 out: 1446 mutex_unlock(&genpd->lock); 1447 mutex_unlock(&subdomain->lock); 1448 1449 return ret; 1450 } 1451 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1452 1453 /** 1454 * pm_genpd_init - Initialize a generic I/O PM domain object. 1455 * @genpd: PM domain object to initialize. 1456 * @gov: PM domain governor to associate with the domain (may be NULL). 1457 * @is_off: Initial value of the domain's power_is_off field. 1458 */ 1459 void pm_genpd_init(struct generic_pm_domain *genpd, 1460 struct dev_power_governor *gov, bool is_off) 1461 { 1462 if (IS_ERR_OR_NULL(genpd)) 1463 return; 1464 1465 INIT_LIST_HEAD(&genpd->master_links); 1466 INIT_LIST_HEAD(&genpd->slave_links); 1467 INIT_LIST_HEAD(&genpd->dev_list); 1468 mutex_init(&genpd->lock); 1469 genpd->gov = gov; 1470 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1471 atomic_set(&genpd->sd_count, 0); 1472 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1473 genpd->device_count = 0; 1474 genpd->max_off_time_ns = -1; 1475 genpd->max_off_time_changed = true; 1476 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 1477 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 1478 genpd->domain.ops.prepare = pm_genpd_prepare; 1479 genpd->domain.ops.suspend = pm_genpd_suspend; 1480 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1481 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1482 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1483 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1484 genpd->domain.ops.resume = pm_genpd_resume; 1485 genpd->domain.ops.freeze = pm_genpd_freeze; 1486 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1487 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1488 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1489 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1490 genpd->domain.ops.thaw = pm_genpd_thaw; 1491 genpd->domain.ops.poweroff = pm_genpd_suspend; 1492 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1493 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1494 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1495 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1496 genpd->domain.ops.restore = pm_genpd_resume; 1497 genpd->domain.ops.complete = pm_genpd_complete; 1498 1499 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1500 genpd->dev_ops.stop = pm_clk_suspend; 1501 genpd->dev_ops.start = pm_clk_resume; 1502 } 1503 1504 if (genpd->state_idx >= GENPD_MAX_NUM_STATES) { 1505 pr_warn("Initial state index out of bounds.\n"); 1506 genpd->state_idx = GENPD_MAX_NUM_STATES - 1; 1507 } 1508 1509 if (genpd->state_count > GENPD_MAX_NUM_STATES) { 1510 pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES); 1511 genpd->state_count = GENPD_MAX_NUM_STATES; 1512 } 1513 1514 /* Use only one "off" state if there were no states declared */ 1515 if (genpd->state_count == 0) 1516 genpd->state_count = 1; 1517 1518 mutex_lock(&gpd_list_lock); 1519 list_add(&genpd->gpd_list_node, &gpd_list); 1520 mutex_unlock(&gpd_list_lock); 1521 } 1522 EXPORT_SYMBOL_GPL(pm_genpd_init); 1523 1524 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1525 /* 1526 * Device Tree based PM domain providers. 1527 * 1528 * The code below implements generic device tree based PM domain providers that 1529 * bind device tree nodes with generic PM domains registered in the system. 1530 * 1531 * Any driver that registers generic PM domains and needs to support binding of 1532 * devices to these domains is supposed to register a PM domain provider, which 1533 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1534 * 1535 * Two simple mapping functions have been provided for convenience: 1536 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1537 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1538 * index. 1539 */ 1540 1541 /** 1542 * struct of_genpd_provider - PM domain provider registration structure 1543 * @link: Entry in global list of PM domain providers 1544 * @node: Pointer to device tree node of PM domain provider 1545 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1546 * into a PM domain. 1547 * @data: context pointer to be passed into @xlate callback 1548 */ 1549 struct of_genpd_provider { 1550 struct list_head link; 1551 struct device_node *node; 1552 genpd_xlate_t xlate; 1553 void *data; 1554 }; 1555 1556 /* List of registered PM domain providers. */ 1557 static LIST_HEAD(of_genpd_providers); 1558 /* Mutex to protect the list above. */ 1559 static DEFINE_MUTEX(of_genpd_mutex); 1560 1561 /** 1562 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1563 * @genpdspec: OF phandle args to map into a PM domain 1564 * @data: xlate function private data - pointer to struct generic_pm_domain 1565 * 1566 * This is a generic xlate function that can be used to model PM domains that 1567 * have their own device tree nodes. The private data of xlate function needs 1568 * to be a valid pointer to struct generic_pm_domain. 1569 */ 1570 struct generic_pm_domain *__of_genpd_xlate_simple( 1571 struct of_phandle_args *genpdspec, 1572 void *data) 1573 { 1574 if (genpdspec->args_count != 0) 1575 return ERR_PTR(-EINVAL); 1576 return data; 1577 } 1578 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 1579 1580 /** 1581 * __of_genpd_xlate_onecell() - Xlate function using a single index. 1582 * @genpdspec: OF phandle args to map into a PM domain 1583 * @data: xlate function private data - pointer to struct genpd_onecell_data 1584 * 1585 * This is a generic xlate function that can be used to model simple PM domain 1586 * controllers that have one device tree node and provide multiple PM domains. 1587 * A single cell is used as an index into an array of PM domains specified in 1588 * the genpd_onecell_data struct when registering the provider. 1589 */ 1590 struct generic_pm_domain *__of_genpd_xlate_onecell( 1591 struct of_phandle_args *genpdspec, 1592 void *data) 1593 { 1594 struct genpd_onecell_data *genpd_data = data; 1595 unsigned int idx = genpdspec->args[0]; 1596 1597 if (genpdspec->args_count != 1) 1598 return ERR_PTR(-EINVAL); 1599 1600 if (idx >= genpd_data->num_domains) { 1601 pr_err("%s: invalid domain index %u\n", __func__, idx); 1602 return ERR_PTR(-EINVAL); 1603 } 1604 1605 if (!genpd_data->domains[idx]) 1606 return ERR_PTR(-ENOENT); 1607 1608 return genpd_data->domains[idx]; 1609 } 1610 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 1611 1612 /** 1613 * __of_genpd_add_provider() - Register a PM domain provider for a node 1614 * @np: Device node pointer associated with the PM domain provider. 1615 * @xlate: Callback for decoding PM domain from phandle arguments. 1616 * @data: Context pointer for @xlate callback. 1617 */ 1618 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 1619 void *data) 1620 { 1621 struct of_genpd_provider *cp; 1622 1623 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 1624 if (!cp) 1625 return -ENOMEM; 1626 1627 cp->node = of_node_get(np); 1628 cp->data = data; 1629 cp->xlate = xlate; 1630 1631 mutex_lock(&of_genpd_mutex); 1632 list_add(&cp->link, &of_genpd_providers); 1633 mutex_unlock(&of_genpd_mutex); 1634 pr_debug("Added domain provider from %s\n", np->full_name); 1635 1636 return 0; 1637 } 1638 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 1639 1640 /** 1641 * of_genpd_del_provider() - Remove a previously registered PM domain provider 1642 * @np: Device node pointer associated with the PM domain provider 1643 */ 1644 void of_genpd_del_provider(struct device_node *np) 1645 { 1646 struct of_genpd_provider *cp; 1647 1648 mutex_lock(&of_genpd_mutex); 1649 list_for_each_entry(cp, &of_genpd_providers, link) { 1650 if (cp->node == np) { 1651 list_del(&cp->link); 1652 of_node_put(cp->node); 1653 kfree(cp); 1654 break; 1655 } 1656 } 1657 mutex_unlock(&of_genpd_mutex); 1658 } 1659 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 1660 1661 /** 1662 * of_genpd_get_from_provider() - Look-up PM domain 1663 * @genpdspec: OF phandle args to use for look-up 1664 * 1665 * Looks for a PM domain provider under the node specified by @genpdspec and if 1666 * found, uses xlate function of the provider to map phandle args to a PM 1667 * domain. 1668 * 1669 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 1670 * on failure. 1671 */ 1672 struct generic_pm_domain *of_genpd_get_from_provider( 1673 struct of_phandle_args *genpdspec) 1674 { 1675 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 1676 struct of_genpd_provider *provider; 1677 1678 if (!genpdspec) 1679 return ERR_PTR(-EINVAL); 1680 1681 mutex_lock(&of_genpd_mutex); 1682 1683 /* Check if we have such a provider in our array */ 1684 list_for_each_entry(provider, &of_genpd_providers, link) { 1685 if (provider->node == genpdspec->np) 1686 genpd = provider->xlate(genpdspec, provider->data); 1687 if (!IS_ERR(genpd)) 1688 break; 1689 } 1690 1691 mutex_unlock(&of_genpd_mutex); 1692 1693 return genpd; 1694 } 1695 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); 1696 1697 /** 1698 * genpd_dev_pm_detach - Detach a device from its PM domain. 1699 * @dev: Device to detach. 1700 * @power_off: Currently not used 1701 * 1702 * Try to locate a corresponding generic PM domain, which the device was 1703 * attached to previously. If such is found, the device is detached from it. 1704 */ 1705 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 1706 { 1707 struct generic_pm_domain *pd; 1708 unsigned int i; 1709 int ret = 0; 1710 1711 pd = pm_genpd_lookup_dev(dev); 1712 if (!pd) 1713 return; 1714 1715 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 1716 1717 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1718 ret = pm_genpd_remove_device(pd, dev); 1719 if (ret != -EAGAIN) 1720 break; 1721 1722 mdelay(i); 1723 cond_resched(); 1724 } 1725 1726 if (ret < 0) { 1727 dev_err(dev, "failed to remove from PM domain %s: %d", 1728 pd->name, ret); 1729 return; 1730 } 1731 1732 /* Check if PM domain can be powered off after removing this device. */ 1733 genpd_queue_power_off_work(pd); 1734 } 1735 1736 static void genpd_dev_pm_sync(struct device *dev) 1737 { 1738 struct generic_pm_domain *pd; 1739 1740 pd = dev_to_genpd(dev); 1741 if (IS_ERR(pd)) 1742 return; 1743 1744 genpd_queue_power_off_work(pd); 1745 } 1746 1747 /** 1748 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 1749 * @dev: Device to attach. 1750 * 1751 * Parse device's OF node to find a PM domain specifier. If such is found, 1752 * attaches the device to retrieved pm_domain ops. 1753 * 1754 * Both generic and legacy Samsung-specific DT bindings are supported to keep 1755 * backwards compatibility with existing DTBs. 1756 * 1757 * Returns 0 on successfully attached PM domain or negative error code. Note 1758 * that if a power-domain exists for the device, but it cannot be found or 1759 * turned on, then return -EPROBE_DEFER to ensure that the device is not 1760 * probed and to re-try again later. 1761 */ 1762 int genpd_dev_pm_attach(struct device *dev) 1763 { 1764 struct of_phandle_args pd_args; 1765 struct generic_pm_domain *pd; 1766 unsigned int i; 1767 int ret; 1768 1769 if (!dev->of_node) 1770 return -ENODEV; 1771 1772 if (dev->pm_domain) 1773 return -EEXIST; 1774 1775 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 1776 "#power-domain-cells", 0, &pd_args); 1777 if (ret < 0) { 1778 if (ret != -ENOENT) 1779 return ret; 1780 1781 /* 1782 * Try legacy Samsung-specific bindings 1783 * (for backwards compatibility of DT ABI) 1784 */ 1785 pd_args.args_count = 0; 1786 pd_args.np = of_parse_phandle(dev->of_node, 1787 "samsung,power-domain", 0); 1788 if (!pd_args.np) 1789 return -ENOENT; 1790 } 1791 1792 pd = of_genpd_get_from_provider(&pd_args); 1793 of_node_put(pd_args.np); 1794 if (IS_ERR(pd)) { 1795 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1796 __func__, PTR_ERR(pd)); 1797 return -EPROBE_DEFER; 1798 } 1799 1800 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 1801 1802 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1803 ret = pm_genpd_add_device(pd, dev); 1804 if (ret != -EAGAIN) 1805 break; 1806 1807 mdelay(i); 1808 cond_resched(); 1809 } 1810 1811 if (ret < 0) { 1812 dev_err(dev, "failed to add to PM domain %s: %d", 1813 pd->name, ret); 1814 goto out; 1815 } 1816 1817 dev->pm_domain->detach = genpd_dev_pm_detach; 1818 dev->pm_domain->sync = genpd_dev_pm_sync; 1819 1820 mutex_lock(&pd->lock); 1821 ret = genpd_poweron(pd, 0); 1822 mutex_unlock(&pd->lock); 1823 out: 1824 return ret ? -EPROBE_DEFER : 0; 1825 } 1826 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 1827 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 1828 1829 1830 /*** debugfs support ***/ 1831 1832 #ifdef CONFIG_PM_ADVANCED_DEBUG 1833 #include <linux/pm.h> 1834 #include <linux/device.h> 1835 #include <linux/debugfs.h> 1836 #include <linux/seq_file.h> 1837 #include <linux/init.h> 1838 #include <linux/kobject.h> 1839 static struct dentry *pm_genpd_debugfs_dir; 1840 1841 /* 1842 * TODO: This function is a slightly modified version of rtpm_status_show 1843 * from sysfs.c, so generalize it. 1844 */ 1845 static void rtpm_status_str(struct seq_file *s, struct device *dev) 1846 { 1847 static const char * const status_lookup[] = { 1848 [RPM_ACTIVE] = "active", 1849 [RPM_RESUMING] = "resuming", 1850 [RPM_SUSPENDED] = "suspended", 1851 [RPM_SUSPENDING] = "suspending" 1852 }; 1853 const char *p = ""; 1854 1855 if (dev->power.runtime_error) 1856 p = "error"; 1857 else if (dev->power.disable_depth) 1858 p = "unsupported"; 1859 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 1860 p = status_lookup[dev->power.runtime_status]; 1861 else 1862 WARN_ON(1); 1863 1864 seq_puts(s, p); 1865 } 1866 1867 static int pm_genpd_summary_one(struct seq_file *s, 1868 struct generic_pm_domain *genpd) 1869 { 1870 static const char * const status_lookup[] = { 1871 [GPD_STATE_ACTIVE] = "on", 1872 [GPD_STATE_POWER_OFF] = "off" 1873 }; 1874 struct pm_domain_data *pm_data; 1875 const char *kobj_path; 1876 struct gpd_link *link; 1877 char state[16]; 1878 int ret; 1879 1880 ret = mutex_lock_interruptible(&genpd->lock); 1881 if (ret) 1882 return -ERESTARTSYS; 1883 1884 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 1885 goto exit; 1886 if (genpd->status == GPD_STATE_POWER_OFF) 1887 snprintf(state, sizeof(state), "%s-%u", 1888 status_lookup[genpd->status], genpd->state_idx); 1889 else 1890 snprintf(state, sizeof(state), "%s", 1891 status_lookup[genpd->status]); 1892 seq_printf(s, "%-30s %-15s ", genpd->name, state); 1893 1894 /* 1895 * Modifications on the list require holding locks on both 1896 * master and slave, so we are safe. 1897 * Also genpd->name is immutable. 1898 */ 1899 list_for_each_entry(link, &genpd->master_links, master_node) { 1900 seq_printf(s, "%s", link->slave->name); 1901 if (!list_is_last(&link->master_node, &genpd->master_links)) 1902 seq_puts(s, ", "); 1903 } 1904 1905 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 1906 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 1907 if (kobj_path == NULL) 1908 continue; 1909 1910 seq_printf(s, "\n %-50s ", kobj_path); 1911 rtpm_status_str(s, pm_data->dev); 1912 kfree(kobj_path); 1913 } 1914 1915 seq_puts(s, "\n"); 1916 exit: 1917 mutex_unlock(&genpd->lock); 1918 1919 return 0; 1920 } 1921 1922 static int pm_genpd_summary_show(struct seq_file *s, void *data) 1923 { 1924 struct generic_pm_domain *genpd; 1925 int ret = 0; 1926 1927 seq_puts(s, "domain status slaves\n"); 1928 seq_puts(s, " /device runtime status\n"); 1929 seq_puts(s, "----------------------------------------------------------------------\n"); 1930 1931 ret = mutex_lock_interruptible(&gpd_list_lock); 1932 if (ret) 1933 return -ERESTARTSYS; 1934 1935 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 1936 ret = pm_genpd_summary_one(s, genpd); 1937 if (ret) 1938 break; 1939 } 1940 mutex_unlock(&gpd_list_lock); 1941 1942 return ret; 1943 } 1944 1945 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 1946 { 1947 return single_open(file, pm_genpd_summary_show, NULL); 1948 } 1949 1950 static const struct file_operations pm_genpd_summary_fops = { 1951 .open = pm_genpd_summary_open, 1952 .read = seq_read, 1953 .llseek = seq_lseek, 1954 .release = single_release, 1955 }; 1956 1957 static int __init pm_genpd_debug_init(void) 1958 { 1959 struct dentry *d; 1960 1961 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 1962 1963 if (!pm_genpd_debugfs_dir) 1964 return -ENOMEM; 1965 1966 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 1967 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 1968 if (!d) 1969 return -ENOMEM; 1970 1971 return 0; 1972 } 1973 late_initcall(pm_genpd_debug_init); 1974 1975 static void __exit pm_genpd_debug_exit(void) 1976 { 1977 debugfs_remove_recursive(pm_genpd_debugfs_dir); 1978 } 1979 __exitcall(pm_genpd_debug_exit); 1980 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 1981