1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/pm_domain.h> 15 #include <linux/pm_qos.h> 16 #include <linux/pm_clock.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 #include <linux/sched.h> 20 #include <linux/suspend.h> 21 #include <linux/export.h> 22 23 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 24 25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 26 ({ \ 27 type (*__routine)(struct device *__d); \ 28 type __ret = (type)0; \ 29 \ 30 __routine = genpd->dev_ops.callback; \ 31 if (__routine) { \ 32 __ret = __routine(dev); \ 33 } \ 34 __ret; \ 35 }) 36 37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ 38 ({ \ 39 ktime_t __start = ktime_get(); \ 40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 43 if (!__retval && __elapsed > __td->field) { \ 44 __td->field = __elapsed; \ 45 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ 46 __elapsed); \ 47 genpd->max_off_time_changed = true; \ 48 __td->constraint_changed = true; \ 49 } \ 50 __retval; \ 51 }) 52 53 static LIST_HEAD(gpd_list); 54 static DEFINE_MUTEX(gpd_list_lock); 55 56 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) 57 { 58 struct generic_pm_domain *genpd = NULL, *gpd; 59 60 if (IS_ERR_OR_NULL(domain_name)) 61 return NULL; 62 63 mutex_lock(&gpd_list_lock); 64 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 65 if (!strcmp(gpd->name, domain_name)) { 66 genpd = gpd; 67 break; 68 } 69 } 70 mutex_unlock(&gpd_list_lock); 71 return genpd; 72 } 73 74 /* 75 * Get the generic PM domain for a particular struct device. 76 * This validates the struct device pointer, the PM domain pointer, 77 * and checks that the PM domain pointer is a real generic PM domain. 78 * Any failure results in NULL being returned. 79 */ 80 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) 81 { 82 struct generic_pm_domain *genpd = NULL, *gpd; 83 84 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 85 return NULL; 86 87 mutex_lock(&gpd_list_lock); 88 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 89 if (&gpd->domain == dev->pm_domain) { 90 genpd = gpd; 91 break; 92 } 93 } 94 mutex_unlock(&gpd_list_lock); 95 96 return genpd; 97 } 98 99 /* 100 * This should only be used where we are certain that the pm_domain 101 * attached to the device is a genpd domain. 102 */ 103 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 104 { 105 if (IS_ERR_OR_NULL(dev->pm_domain)) 106 return ERR_PTR(-EINVAL); 107 108 return pd_to_genpd(dev->pm_domain); 109 } 110 111 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 112 { 113 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, 114 stop_latency_ns, "stop"); 115 } 116 117 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, 118 bool timed) 119 { 120 if (!timed) 121 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 122 123 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 124 start_latency_ns, "start"); 125 } 126 127 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 128 { 129 bool ret = false; 130 131 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 132 ret = !!atomic_dec_and_test(&genpd->sd_count); 133 134 return ret; 135 } 136 137 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 138 { 139 atomic_inc(&genpd->sd_count); 140 smp_mb__after_atomic(); 141 } 142 143 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 144 { 145 s64 usecs64; 146 147 if (!genpd->cpuidle_data) 148 return; 149 150 usecs64 = genpd->power_on_latency_ns; 151 do_div(usecs64, NSEC_PER_USEC); 152 usecs64 += genpd->cpuidle_data->saved_exit_latency; 153 genpd->cpuidle_data->idle_state->exit_latency = usecs64; 154 } 155 156 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) 157 { 158 ktime_t time_start; 159 s64 elapsed_ns; 160 int ret; 161 162 if (!genpd->power_on) 163 return 0; 164 165 if (!timed) 166 return genpd->power_on(genpd); 167 168 time_start = ktime_get(); 169 ret = genpd->power_on(genpd); 170 if (ret) 171 return ret; 172 173 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 174 if (elapsed_ns <= genpd->power_on_latency_ns) 175 return ret; 176 177 genpd->power_on_latency_ns = elapsed_ns; 178 genpd->max_off_time_changed = true; 179 genpd_recalc_cpu_exit_latency(genpd); 180 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 181 genpd->name, "on", elapsed_ns); 182 183 return ret; 184 } 185 186 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) 187 { 188 ktime_t time_start; 189 s64 elapsed_ns; 190 int ret; 191 192 if (!genpd->power_off) 193 return 0; 194 195 if (!timed) 196 return genpd->power_off(genpd); 197 198 time_start = ktime_get(); 199 ret = genpd->power_off(genpd); 200 if (ret == -EBUSY) 201 return ret; 202 203 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 204 if (elapsed_ns <= genpd->power_off_latency_ns) 205 return ret; 206 207 genpd->power_off_latency_ns = elapsed_ns; 208 genpd->max_off_time_changed = true; 209 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 210 genpd->name, "off", elapsed_ns); 211 212 return ret; 213 } 214 215 /** 216 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 217 * @genpd: PM domain to power up. 218 * 219 * Restore power to @genpd and all of its masters so that it is possible to 220 * resume a device belonging to it. 221 */ 222 static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 223 { 224 struct gpd_link *link; 225 int ret = 0; 226 227 if (genpd->status == GPD_STATE_ACTIVE 228 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 229 return 0; 230 231 if (genpd->cpuidle_data) { 232 cpuidle_pause_and_lock(); 233 genpd->cpuidle_data->idle_state->disabled = true; 234 cpuidle_resume_and_unlock(); 235 goto out; 236 } 237 238 /* 239 * The list is guaranteed not to change while the loop below is being 240 * executed, unless one of the masters' .power_on() callbacks fiddles 241 * with it. 242 */ 243 list_for_each_entry(link, &genpd->slave_links, slave_node) { 244 genpd_sd_counter_inc(link->master); 245 246 ret = pm_genpd_poweron(link->master); 247 if (ret) { 248 genpd_sd_counter_dec(link->master); 249 goto err; 250 } 251 } 252 253 ret = genpd_power_on(genpd, true); 254 if (ret) 255 goto err; 256 257 out: 258 genpd->status = GPD_STATE_ACTIVE; 259 return 0; 260 261 err: 262 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 263 genpd_sd_counter_dec(link->master); 264 265 return ret; 266 } 267 268 /** 269 * pm_genpd_poweron - Restore power to a given PM domain and its masters. 270 * @genpd: PM domain to power up. 271 */ 272 int pm_genpd_poweron(struct generic_pm_domain *genpd) 273 { 274 int ret; 275 276 mutex_lock(&genpd->lock); 277 ret = __pm_genpd_poweron(genpd); 278 mutex_unlock(&genpd->lock); 279 return ret; 280 } 281 282 /** 283 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. 284 * @domain_name: Name of the PM domain to power up. 285 */ 286 int pm_genpd_name_poweron(const char *domain_name) 287 { 288 struct generic_pm_domain *genpd; 289 290 genpd = pm_genpd_lookup_name(domain_name); 291 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 292 } 293 294 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 295 { 296 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 297 save_state_latency_ns, "state save"); 298 } 299 300 static int genpd_restore_dev(struct generic_pm_domain *genpd, 301 struct device *dev, bool timed) 302 { 303 if (!timed) 304 return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); 305 306 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 307 restore_state_latency_ns, 308 "state restore"); 309 } 310 311 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 312 unsigned long val, void *ptr) 313 { 314 struct generic_pm_domain_data *gpd_data; 315 struct device *dev; 316 317 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 318 dev = gpd_data->base.dev; 319 320 for (;;) { 321 struct generic_pm_domain *genpd; 322 struct pm_domain_data *pdd; 323 324 spin_lock_irq(&dev->power.lock); 325 326 pdd = dev->power.subsys_data ? 327 dev->power.subsys_data->domain_data : NULL; 328 if (pdd && pdd->dev) { 329 to_gpd_data(pdd)->td.constraint_changed = true; 330 genpd = dev_to_genpd(dev); 331 } else { 332 genpd = ERR_PTR(-ENODATA); 333 } 334 335 spin_unlock_irq(&dev->power.lock); 336 337 if (!IS_ERR(genpd)) { 338 mutex_lock(&genpd->lock); 339 genpd->max_off_time_changed = true; 340 mutex_unlock(&genpd->lock); 341 } 342 343 dev = dev->parent; 344 if (!dev || dev->power.ignore_children) 345 break; 346 } 347 348 return NOTIFY_DONE; 349 } 350 351 /** 352 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 353 * @genpd: PM domait to power off. 354 * 355 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 356 * before. 357 */ 358 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 359 { 360 queue_work(pm_wq, &genpd->power_off_work); 361 } 362 363 /** 364 * pm_genpd_poweroff - Remove power from a given PM domain. 365 * @genpd: PM domain to power down. 366 * 367 * If all of the @genpd's devices have been suspended and all of its subdomains 368 * have been powered down, remove power from @genpd. 369 */ 370 static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 371 { 372 struct pm_domain_data *pdd; 373 struct gpd_link *link; 374 unsigned int not_suspended = 0; 375 376 /* 377 * Do not try to power off the domain in the following situations: 378 * (1) The domain is already in the "power off" state. 379 * (2) System suspend is in progress. 380 */ 381 if (genpd->status == GPD_STATE_POWER_OFF 382 || genpd->prepared_count > 0) 383 return 0; 384 385 if (atomic_read(&genpd->sd_count) > 0) 386 return -EBUSY; 387 388 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 389 enum pm_qos_flags_status stat; 390 391 stat = dev_pm_qos_flags(pdd->dev, 392 PM_QOS_FLAG_NO_POWER_OFF 393 | PM_QOS_FLAG_REMOTE_WAKEUP); 394 if (stat > PM_QOS_FLAGS_NONE) 395 return -EBUSY; 396 397 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 398 || pdd->dev->power.irq_safe)) 399 not_suspended++; 400 } 401 402 if (not_suspended > genpd->in_progress) 403 return -EBUSY; 404 405 if (genpd->gov && genpd->gov->power_down_ok) { 406 if (!genpd->gov->power_down_ok(&genpd->domain)) 407 return -EAGAIN; 408 } 409 410 if (genpd->cpuidle_data) { 411 /* 412 * If cpuidle_data is set, cpuidle should turn the domain off 413 * when the CPU in it is idle. In that case we don't decrement 414 * the subdomain counts of the master domains, so that power is 415 * not removed from the current domain prematurely as a result 416 * of cutting off the masters' power. 417 */ 418 genpd->status = GPD_STATE_POWER_OFF; 419 cpuidle_pause_and_lock(); 420 genpd->cpuidle_data->idle_state->disabled = false; 421 cpuidle_resume_and_unlock(); 422 return 0; 423 } 424 425 if (genpd->power_off) { 426 int ret; 427 428 if (atomic_read(&genpd->sd_count) > 0) 429 return -EBUSY; 430 431 /* 432 * If sd_count > 0 at this point, one of the subdomains hasn't 433 * managed to call pm_genpd_poweron() for the master yet after 434 * incrementing it. In that case pm_genpd_poweron() will wait 435 * for us to drop the lock, so we can call .power_off() and let 436 * the pm_genpd_poweron() restore power for us (this shouldn't 437 * happen very often). 438 */ 439 ret = genpd_power_off(genpd, true); 440 if (ret) 441 return ret; 442 } 443 444 genpd->status = GPD_STATE_POWER_OFF; 445 446 list_for_each_entry(link, &genpd->slave_links, slave_node) { 447 genpd_sd_counter_dec(link->master); 448 genpd_queue_power_off_work(link->master); 449 } 450 451 return 0; 452 } 453 454 /** 455 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 456 * @work: Work structure used for scheduling the execution of this function. 457 */ 458 static void genpd_power_off_work_fn(struct work_struct *work) 459 { 460 struct generic_pm_domain *genpd; 461 462 genpd = container_of(work, struct generic_pm_domain, power_off_work); 463 464 mutex_lock(&genpd->lock); 465 pm_genpd_poweroff(genpd); 466 mutex_unlock(&genpd->lock); 467 } 468 469 /** 470 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 471 * @dev: Device to suspend. 472 * 473 * Carry out a runtime suspend of a device under the assumption that its 474 * pm_domain field points to the domain member of an object of type 475 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 476 */ 477 static int pm_genpd_runtime_suspend(struct device *dev) 478 { 479 struct generic_pm_domain *genpd; 480 bool (*stop_ok)(struct device *__dev); 481 int ret; 482 483 dev_dbg(dev, "%s()\n", __func__); 484 485 genpd = dev_to_genpd(dev); 486 if (IS_ERR(genpd)) 487 return -EINVAL; 488 489 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 490 if (stop_ok && !stop_ok(dev)) 491 return -EBUSY; 492 493 ret = genpd_save_dev(genpd, dev); 494 if (ret) 495 return ret; 496 497 ret = genpd_stop_dev(genpd, dev); 498 if (ret) { 499 genpd_restore_dev(genpd, dev, true); 500 return ret; 501 } 502 503 /* 504 * If power.irq_safe is set, this routine will be run with interrupts 505 * off, so it can't use mutexes. 506 */ 507 if (dev->power.irq_safe) 508 return 0; 509 510 mutex_lock(&genpd->lock); 511 genpd->in_progress++; 512 pm_genpd_poweroff(genpd); 513 genpd->in_progress--; 514 mutex_unlock(&genpd->lock); 515 516 return 0; 517 } 518 519 /** 520 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 521 * @dev: Device to resume. 522 * 523 * Carry out a runtime resume of a device under the assumption that its 524 * pm_domain field points to the domain member of an object of type 525 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 526 */ 527 static int pm_genpd_runtime_resume(struct device *dev) 528 { 529 struct generic_pm_domain *genpd; 530 int ret; 531 bool timed = true; 532 533 dev_dbg(dev, "%s()\n", __func__); 534 535 genpd = dev_to_genpd(dev); 536 if (IS_ERR(genpd)) 537 return -EINVAL; 538 539 /* If power.irq_safe, the PM domain is never powered off. */ 540 if (dev->power.irq_safe) { 541 timed = false; 542 goto out; 543 } 544 545 mutex_lock(&genpd->lock); 546 ret = __pm_genpd_poweron(genpd); 547 mutex_unlock(&genpd->lock); 548 549 if (ret) 550 return ret; 551 552 out: 553 genpd_start_dev(genpd, dev, timed); 554 genpd_restore_dev(genpd, dev, timed); 555 556 return 0; 557 } 558 559 static bool pd_ignore_unused; 560 static int __init pd_ignore_unused_setup(char *__unused) 561 { 562 pd_ignore_unused = true; 563 return 1; 564 } 565 __setup("pd_ignore_unused", pd_ignore_unused_setup); 566 567 /** 568 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. 569 */ 570 void pm_genpd_poweroff_unused(void) 571 { 572 struct generic_pm_domain *genpd; 573 574 if (pd_ignore_unused) { 575 pr_warn("genpd: Not disabling unused power domains\n"); 576 return; 577 } 578 579 mutex_lock(&gpd_list_lock); 580 581 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 582 genpd_queue_power_off_work(genpd); 583 584 mutex_unlock(&gpd_list_lock); 585 } 586 587 static int __init genpd_poweroff_unused(void) 588 { 589 pm_genpd_poweroff_unused(); 590 return 0; 591 } 592 late_initcall(genpd_poweroff_unused); 593 594 #ifdef CONFIG_PM_SLEEP 595 596 /** 597 * pm_genpd_present - Check if the given PM domain has been initialized. 598 * @genpd: PM domain to check. 599 */ 600 static bool pm_genpd_present(const struct generic_pm_domain *genpd) 601 { 602 const struct generic_pm_domain *gpd; 603 604 if (IS_ERR_OR_NULL(genpd)) 605 return false; 606 607 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 608 if (gpd == genpd) 609 return true; 610 611 return false; 612 } 613 614 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 615 struct device *dev) 616 { 617 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 618 } 619 620 /** 621 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 622 * @genpd: PM domain to power off, if possible. 623 * @timed: True if latency measurements are allowed. 624 * 625 * Check if the given PM domain can be powered off (during system suspend or 626 * hibernation) and do that if so. Also, in that case propagate to its masters. 627 * 628 * This function is only called in "noirq" and "syscore" stages of system power 629 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 630 * executed sequentially, so it is guaranteed that it will never run twice in 631 * parallel). 632 */ 633 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, 634 bool timed) 635 { 636 struct gpd_link *link; 637 638 if (genpd->status == GPD_STATE_POWER_OFF) 639 return; 640 641 if (genpd->suspended_count != genpd->device_count 642 || atomic_read(&genpd->sd_count) > 0) 643 return; 644 645 genpd_power_off(genpd, timed); 646 647 genpd->status = GPD_STATE_POWER_OFF; 648 649 list_for_each_entry(link, &genpd->slave_links, slave_node) { 650 genpd_sd_counter_dec(link->master); 651 pm_genpd_sync_poweroff(link->master, timed); 652 } 653 } 654 655 /** 656 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 657 * @genpd: PM domain to power on. 658 * @timed: True if latency measurements are allowed. 659 * 660 * This function is only called in "noirq" and "syscore" stages of system power 661 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 662 * executed sequentially, so it is guaranteed that it will never run twice in 663 * parallel). 664 */ 665 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, 666 bool timed) 667 { 668 struct gpd_link *link; 669 670 if (genpd->status == GPD_STATE_ACTIVE) 671 return; 672 673 list_for_each_entry(link, &genpd->slave_links, slave_node) { 674 pm_genpd_sync_poweron(link->master, timed); 675 genpd_sd_counter_inc(link->master); 676 } 677 678 genpd_power_on(genpd, timed); 679 680 genpd->status = GPD_STATE_ACTIVE; 681 } 682 683 /** 684 * resume_needed - Check whether to resume a device before system suspend. 685 * @dev: Device to check. 686 * @genpd: PM domain the device belongs to. 687 * 688 * There are two cases in which a device that can wake up the system from sleep 689 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 690 * to wake up the system and it has to remain active for this purpose while the 691 * system is in the sleep state and (2) if the device is not enabled to wake up 692 * the system from sleep states and it generally doesn't generate wakeup signals 693 * by itself (those signals are generated on its behalf by other parts of the 694 * system). In the latter case it may be necessary to reconfigure the device's 695 * wakeup settings during system suspend, because it may have been set up to 696 * signal remote wakeup from the system's working state as needed by runtime PM. 697 * Return 'true' in either of the above cases. 698 */ 699 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 700 { 701 bool active_wakeup; 702 703 if (!device_can_wakeup(dev)) 704 return false; 705 706 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 707 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 708 } 709 710 /** 711 * pm_genpd_prepare - Start power transition of a device in a PM domain. 712 * @dev: Device to start the transition of. 713 * 714 * Start a power transition of a device (during a system-wide power transition) 715 * under the assumption that its pm_domain field points to the domain member of 716 * an object of type struct generic_pm_domain representing a PM domain 717 * consisting of I/O devices. 718 */ 719 static int pm_genpd_prepare(struct device *dev) 720 { 721 struct generic_pm_domain *genpd; 722 int ret; 723 724 dev_dbg(dev, "%s()\n", __func__); 725 726 genpd = dev_to_genpd(dev); 727 if (IS_ERR(genpd)) 728 return -EINVAL; 729 730 /* 731 * If a wakeup request is pending for the device, it should be woken up 732 * at this point and a system wakeup event should be reported if it's 733 * set up to wake up the system from sleep states. 734 */ 735 pm_runtime_get_noresume(dev); 736 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 737 pm_wakeup_event(dev, 0); 738 739 if (pm_wakeup_pending()) { 740 pm_runtime_put(dev); 741 return -EBUSY; 742 } 743 744 if (resume_needed(dev, genpd)) 745 pm_runtime_resume(dev); 746 747 mutex_lock(&genpd->lock); 748 749 if (genpd->prepared_count++ == 0) { 750 genpd->suspended_count = 0; 751 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 752 } 753 754 mutex_unlock(&genpd->lock); 755 756 if (genpd->suspend_power_off) { 757 pm_runtime_put_noidle(dev); 758 return 0; 759 } 760 761 /* 762 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 763 * so pm_genpd_poweron() will return immediately, but if the device 764 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 765 * to make it operational. 766 */ 767 pm_runtime_resume(dev); 768 __pm_runtime_disable(dev, false); 769 770 ret = pm_generic_prepare(dev); 771 if (ret) { 772 mutex_lock(&genpd->lock); 773 774 if (--genpd->prepared_count == 0) 775 genpd->suspend_power_off = false; 776 777 mutex_unlock(&genpd->lock); 778 pm_runtime_enable(dev); 779 } 780 781 pm_runtime_put(dev); 782 return ret; 783 } 784 785 /** 786 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 787 * @dev: Device to suspend. 788 * 789 * Suspend a device under the assumption that its pm_domain field points to the 790 * domain member of an object of type struct generic_pm_domain representing 791 * a PM domain consisting of I/O devices. 792 */ 793 static int pm_genpd_suspend(struct device *dev) 794 { 795 struct generic_pm_domain *genpd; 796 797 dev_dbg(dev, "%s()\n", __func__); 798 799 genpd = dev_to_genpd(dev); 800 if (IS_ERR(genpd)) 801 return -EINVAL; 802 803 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 804 } 805 806 /** 807 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 808 * @dev: Device to suspend. 809 * 810 * Carry out a late suspend of a device under the assumption that its 811 * pm_domain field points to the domain member of an object of type 812 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 813 */ 814 static int pm_genpd_suspend_late(struct device *dev) 815 { 816 struct generic_pm_domain *genpd; 817 818 dev_dbg(dev, "%s()\n", __func__); 819 820 genpd = dev_to_genpd(dev); 821 if (IS_ERR(genpd)) 822 return -EINVAL; 823 824 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 825 } 826 827 /** 828 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 829 * @dev: Device to suspend. 830 * 831 * Stop the device and remove power from the domain if all devices in it have 832 * been stopped. 833 */ 834 static int pm_genpd_suspend_noirq(struct device *dev) 835 { 836 struct generic_pm_domain *genpd; 837 838 dev_dbg(dev, "%s()\n", __func__); 839 840 genpd = dev_to_genpd(dev); 841 if (IS_ERR(genpd)) 842 return -EINVAL; 843 844 if (genpd->suspend_power_off 845 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 846 return 0; 847 848 genpd_stop_dev(genpd, dev); 849 850 /* 851 * Since all of the "noirq" callbacks are executed sequentially, it is 852 * guaranteed that this function will never run twice in parallel for 853 * the same PM domain, so it is not necessary to use locking here. 854 */ 855 genpd->suspended_count++; 856 pm_genpd_sync_poweroff(genpd, true); 857 858 return 0; 859 } 860 861 /** 862 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 863 * @dev: Device to resume. 864 * 865 * Restore power to the device's PM domain, if necessary, and start the device. 866 */ 867 static int pm_genpd_resume_noirq(struct device *dev) 868 { 869 struct generic_pm_domain *genpd; 870 871 dev_dbg(dev, "%s()\n", __func__); 872 873 genpd = dev_to_genpd(dev); 874 if (IS_ERR(genpd)) 875 return -EINVAL; 876 877 if (genpd->suspend_power_off 878 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 879 return 0; 880 881 /* 882 * Since all of the "noirq" callbacks are executed sequentially, it is 883 * guaranteed that this function will never run twice in parallel for 884 * the same PM domain, so it is not necessary to use locking here. 885 */ 886 pm_genpd_sync_poweron(genpd, true); 887 genpd->suspended_count--; 888 889 return genpd_start_dev(genpd, dev, true); 890 } 891 892 /** 893 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 894 * @dev: Device to resume. 895 * 896 * Carry out an early resume of a device under the assumption that its 897 * pm_domain field points to the domain member of an object of type 898 * struct generic_pm_domain representing a power domain consisting of I/O 899 * devices. 900 */ 901 static int pm_genpd_resume_early(struct device *dev) 902 { 903 struct generic_pm_domain *genpd; 904 905 dev_dbg(dev, "%s()\n", __func__); 906 907 genpd = dev_to_genpd(dev); 908 if (IS_ERR(genpd)) 909 return -EINVAL; 910 911 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 912 } 913 914 /** 915 * pm_genpd_resume - Resume of device in an I/O PM domain. 916 * @dev: Device to resume. 917 * 918 * Resume a device under the assumption that its pm_domain field points to the 919 * domain member of an object of type struct generic_pm_domain representing 920 * a power domain consisting of I/O devices. 921 */ 922 static int pm_genpd_resume(struct device *dev) 923 { 924 struct generic_pm_domain *genpd; 925 926 dev_dbg(dev, "%s()\n", __func__); 927 928 genpd = dev_to_genpd(dev); 929 if (IS_ERR(genpd)) 930 return -EINVAL; 931 932 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 933 } 934 935 /** 936 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 937 * @dev: Device to freeze. 938 * 939 * Freeze a device under the assumption that its pm_domain field points to the 940 * domain member of an object of type struct generic_pm_domain representing 941 * a power domain consisting of I/O devices. 942 */ 943 static int pm_genpd_freeze(struct device *dev) 944 { 945 struct generic_pm_domain *genpd; 946 947 dev_dbg(dev, "%s()\n", __func__); 948 949 genpd = dev_to_genpd(dev); 950 if (IS_ERR(genpd)) 951 return -EINVAL; 952 953 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 954 } 955 956 /** 957 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 958 * @dev: Device to freeze. 959 * 960 * Carry out a late freeze of a device under the assumption that its 961 * pm_domain field points to the domain member of an object of type 962 * struct generic_pm_domain representing a power domain consisting of I/O 963 * devices. 964 */ 965 static int pm_genpd_freeze_late(struct device *dev) 966 { 967 struct generic_pm_domain *genpd; 968 969 dev_dbg(dev, "%s()\n", __func__); 970 971 genpd = dev_to_genpd(dev); 972 if (IS_ERR(genpd)) 973 return -EINVAL; 974 975 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 976 } 977 978 /** 979 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 980 * @dev: Device to freeze. 981 * 982 * Carry out a late freeze of a device under the assumption that its 983 * pm_domain field points to the domain member of an object of type 984 * struct generic_pm_domain representing a power domain consisting of I/O 985 * devices. 986 */ 987 static int pm_genpd_freeze_noirq(struct device *dev) 988 { 989 struct generic_pm_domain *genpd; 990 991 dev_dbg(dev, "%s()\n", __func__); 992 993 genpd = dev_to_genpd(dev); 994 if (IS_ERR(genpd)) 995 return -EINVAL; 996 997 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 998 } 999 1000 /** 1001 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1002 * @dev: Device to thaw. 1003 * 1004 * Start the device, unless power has been removed from the domain already 1005 * before the system transition. 1006 */ 1007 static int pm_genpd_thaw_noirq(struct device *dev) 1008 { 1009 struct generic_pm_domain *genpd; 1010 1011 dev_dbg(dev, "%s()\n", __func__); 1012 1013 genpd = dev_to_genpd(dev); 1014 if (IS_ERR(genpd)) 1015 return -EINVAL; 1016 1017 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); 1018 } 1019 1020 /** 1021 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1022 * @dev: Device to thaw. 1023 * 1024 * Carry out an early thaw of a device under the assumption that its 1025 * pm_domain field points to the domain member of an object of type 1026 * struct generic_pm_domain representing a power domain consisting of I/O 1027 * devices. 1028 */ 1029 static int pm_genpd_thaw_early(struct device *dev) 1030 { 1031 struct generic_pm_domain *genpd; 1032 1033 dev_dbg(dev, "%s()\n", __func__); 1034 1035 genpd = dev_to_genpd(dev); 1036 if (IS_ERR(genpd)) 1037 return -EINVAL; 1038 1039 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 1040 } 1041 1042 /** 1043 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1044 * @dev: Device to thaw. 1045 * 1046 * Thaw a device under the assumption that its pm_domain field points to the 1047 * domain member of an object of type struct generic_pm_domain representing 1048 * a power domain consisting of I/O devices. 1049 */ 1050 static int pm_genpd_thaw(struct device *dev) 1051 { 1052 struct generic_pm_domain *genpd; 1053 1054 dev_dbg(dev, "%s()\n", __func__); 1055 1056 genpd = dev_to_genpd(dev); 1057 if (IS_ERR(genpd)) 1058 return -EINVAL; 1059 1060 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1061 } 1062 1063 /** 1064 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1065 * @dev: Device to resume. 1066 * 1067 * Make sure the domain will be in the same power state as before the 1068 * hibernation the system is resuming from and start the device if necessary. 1069 */ 1070 static int pm_genpd_restore_noirq(struct device *dev) 1071 { 1072 struct generic_pm_domain *genpd; 1073 1074 dev_dbg(dev, "%s()\n", __func__); 1075 1076 genpd = dev_to_genpd(dev); 1077 if (IS_ERR(genpd)) 1078 return -EINVAL; 1079 1080 /* 1081 * Since all of the "noirq" callbacks are executed sequentially, it is 1082 * guaranteed that this function will never run twice in parallel for 1083 * the same PM domain, so it is not necessary to use locking here. 1084 * 1085 * At this point suspended_count == 0 means we are being run for the 1086 * first time for the given domain in the present cycle. 1087 */ 1088 if (genpd->suspended_count++ == 0) { 1089 /* 1090 * The boot kernel might put the domain into arbitrary state, 1091 * so make it appear as powered off to pm_genpd_sync_poweron(), 1092 * so that it tries to power it on in case it was really off. 1093 */ 1094 genpd->status = GPD_STATE_POWER_OFF; 1095 if (genpd->suspend_power_off) { 1096 /* 1097 * If the domain was off before the hibernation, make 1098 * sure it will be off going forward. 1099 */ 1100 genpd_power_off(genpd, true); 1101 1102 return 0; 1103 } 1104 } 1105 1106 if (genpd->suspend_power_off) 1107 return 0; 1108 1109 pm_genpd_sync_poweron(genpd, true); 1110 1111 return genpd_start_dev(genpd, dev, true); 1112 } 1113 1114 /** 1115 * pm_genpd_complete - Complete power transition of a device in a power domain. 1116 * @dev: Device to complete the transition of. 1117 * 1118 * Complete a power transition of a device (during a system-wide power 1119 * transition) under the assumption that its pm_domain field points to the 1120 * domain member of an object of type struct generic_pm_domain representing 1121 * a power domain consisting of I/O devices. 1122 */ 1123 static void pm_genpd_complete(struct device *dev) 1124 { 1125 struct generic_pm_domain *genpd; 1126 bool run_complete; 1127 1128 dev_dbg(dev, "%s()\n", __func__); 1129 1130 genpd = dev_to_genpd(dev); 1131 if (IS_ERR(genpd)) 1132 return; 1133 1134 mutex_lock(&genpd->lock); 1135 1136 run_complete = !genpd->suspend_power_off; 1137 if (--genpd->prepared_count == 0) 1138 genpd->suspend_power_off = false; 1139 1140 mutex_unlock(&genpd->lock); 1141 1142 if (run_complete) { 1143 pm_generic_complete(dev); 1144 pm_runtime_set_active(dev); 1145 pm_runtime_enable(dev); 1146 pm_request_idle(dev); 1147 } 1148 } 1149 1150 /** 1151 * genpd_syscore_switch - Switch power during system core suspend or resume. 1152 * @dev: Device that normally is marked as "always on" to switch power for. 1153 * 1154 * This routine may only be called during the system core (syscore) suspend or 1155 * resume phase for devices whose "always on" flags are set. 1156 */ 1157 static void genpd_syscore_switch(struct device *dev, bool suspend) 1158 { 1159 struct generic_pm_domain *genpd; 1160 1161 genpd = dev_to_genpd(dev); 1162 if (!pm_genpd_present(genpd)) 1163 return; 1164 1165 if (suspend) { 1166 genpd->suspended_count++; 1167 pm_genpd_sync_poweroff(genpd, false); 1168 } else { 1169 pm_genpd_sync_poweron(genpd, false); 1170 genpd->suspended_count--; 1171 } 1172 } 1173 1174 void pm_genpd_syscore_poweroff(struct device *dev) 1175 { 1176 genpd_syscore_switch(dev, true); 1177 } 1178 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1179 1180 void pm_genpd_syscore_poweron(struct device *dev) 1181 { 1182 genpd_syscore_switch(dev, false); 1183 } 1184 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1185 1186 #else /* !CONFIG_PM_SLEEP */ 1187 1188 #define pm_genpd_prepare NULL 1189 #define pm_genpd_suspend NULL 1190 #define pm_genpd_suspend_late NULL 1191 #define pm_genpd_suspend_noirq NULL 1192 #define pm_genpd_resume_early NULL 1193 #define pm_genpd_resume_noirq NULL 1194 #define pm_genpd_resume NULL 1195 #define pm_genpd_freeze NULL 1196 #define pm_genpd_freeze_late NULL 1197 #define pm_genpd_freeze_noirq NULL 1198 #define pm_genpd_thaw_early NULL 1199 #define pm_genpd_thaw_noirq NULL 1200 #define pm_genpd_thaw NULL 1201 #define pm_genpd_restore_noirq NULL 1202 #define pm_genpd_complete NULL 1203 1204 #endif /* CONFIG_PM_SLEEP */ 1205 1206 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1207 struct generic_pm_domain *genpd, 1208 struct gpd_timing_data *td) 1209 { 1210 struct generic_pm_domain_data *gpd_data; 1211 int ret; 1212 1213 ret = dev_pm_get_subsys_data(dev); 1214 if (ret) 1215 return ERR_PTR(ret); 1216 1217 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1218 if (!gpd_data) { 1219 ret = -ENOMEM; 1220 goto err_put; 1221 } 1222 1223 if (td) 1224 gpd_data->td = *td; 1225 1226 gpd_data->base.dev = dev; 1227 gpd_data->td.constraint_changed = true; 1228 gpd_data->td.effective_constraint_ns = -1; 1229 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1230 1231 spin_lock_irq(&dev->power.lock); 1232 1233 if (dev->power.subsys_data->domain_data) { 1234 ret = -EINVAL; 1235 goto err_free; 1236 } 1237 1238 dev->power.subsys_data->domain_data = &gpd_data->base; 1239 dev->pm_domain = &genpd->domain; 1240 1241 spin_unlock_irq(&dev->power.lock); 1242 1243 return gpd_data; 1244 1245 err_free: 1246 spin_unlock_irq(&dev->power.lock); 1247 kfree(gpd_data); 1248 err_put: 1249 dev_pm_put_subsys_data(dev); 1250 return ERR_PTR(ret); 1251 } 1252 1253 static void genpd_free_dev_data(struct device *dev, 1254 struct generic_pm_domain_data *gpd_data) 1255 { 1256 spin_lock_irq(&dev->power.lock); 1257 1258 dev->pm_domain = NULL; 1259 dev->power.subsys_data->domain_data = NULL; 1260 1261 spin_unlock_irq(&dev->power.lock); 1262 1263 kfree(gpd_data); 1264 dev_pm_put_subsys_data(dev); 1265 } 1266 1267 /** 1268 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1269 * @genpd: PM domain to add the device to. 1270 * @dev: Device to be added. 1271 * @td: Set of PM QoS timing parameters to attach to the device. 1272 */ 1273 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1274 struct gpd_timing_data *td) 1275 { 1276 struct generic_pm_domain_data *gpd_data; 1277 int ret = 0; 1278 1279 dev_dbg(dev, "%s()\n", __func__); 1280 1281 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1282 return -EINVAL; 1283 1284 gpd_data = genpd_alloc_dev_data(dev, genpd, td); 1285 if (IS_ERR(gpd_data)) 1286 return PTR_ERR(gpd_data); 1287 1288 mutex_lock(&genpd->lock); 1289 1290 if (genpd->prepared_count > 0) { 1291 ret = -EAGAIN; 1292 goto out; 1293 } 1294 1295 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1296 if (ret) 1297 goto out; 1298 1299 genpd->device_count++; 1300 genpd->max_off_time_changed = true; 1301 1302 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1303 1304 out: 1305 mutex_unlock(&genpd->lock); 1306 1307 if (ret) 1308 genpd_free_dev_data(dev, gpd_data); 1309 else 1310 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1311 1312 return ret; 1313 } 1314 1315 /** 1316 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1317 * @domain_name: Name of the PM domain to add the device to. 1318 * @dev: Device to be added. 1319 * @td: Set of PM QoS timing parameters to attach to the device. 1320 */ 1321 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, 1322 struct gpd_timing_data *td) 1323 { 1324 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); 1325 } 1326 1327 /** 1328 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1329 * @genpd: PM domain to remove the device from. 1330 * @dev: Device to be removed. 1331 */ 1332 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1333 struct device *dev) 1334 { 1335 struct generic_pm_domain_data *gpd_data; 1336 struct pm_domain_data *pdd; 1337 int ret = 0; 1338 1339 dev_dbg(dev, "%s()\n", __func__); 1340 1341 if (!genpd || genpd != pm_genpd_lookup_dev(dev)) 1342 return -EINVAL; 1343 1344 /* The above validation also means we have existing domain_data. */ 1345 pdd = dev->power.subsys_data->domain_data; 1346 gpd_data = to_gpd_data(pdd); 1347 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1348 1349 mutex_lock(&genpd->lock); 1350 1351 if (genpd->prepared_count > 0) { 1352 ret = -EAGAIN; 1353 goto out; 1354 } 1355 1356 genpd->device_count--; 1357 genpd->max_off_time_changed = true; 1358 1359 if (genpd->detach_dev) 1360 genpd->detach_dev(genpd, dev); 1361 1362 list_del_init(&pdd->list_node); 1363 1364 mutex_unlock(&genpd->lock); 1365 1366 genpd_free_dev_data(dev, gpd_data); 1367 1368 return 0; 1369 1370 out: 1371 mutex_unlock(&genpd->lock); 1372 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1373 1374 return ret; 1375 } 1376 1377 /** 1378 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1379 * @genpd: Master PM domain to add the subdomain to. 1380 * @subdomain: Subdomain to be added. 1381 */ 1382 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1383 struct generic_pm_domain *subdomain) 1384 { 1385 struct gpd_link *link; 1386 int ret = 0; 1387 1388 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1389 || genpd == subdomain) 1390 return -EINVAL; 1391 1392 mutex_lock(&genpd->lock); 1393 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1394 1395 if (genpd->status == GPD_STATE_POWER_OFF 1396 && subdomain->status != GPD_STATE_POWER_OFF) { 1397 ret = -EINVAL; 1398 goto out; 1399 } 1400 1401 list_for_each_entry(link, &genpd->master_links, master_node) { 1402 if (link->slave == subdomain && link->master == genpd) { 1403 ret = -EINVAL; 1404 goto out; 1405 } 1406 } 1407 1408 link = kzalloc(sizeof(*link), GFP_KERNEL); 1409 if (!link) { 1410 ret = -ENOMEM; 1411 goto out; 1412 } 1413 link->master = genpd; 1414 list_add_tail(&link->master_node, &genpd->master_links); 1415 link->slave = subdomain; 1416 list_add_tail(&link->slave_node, &subdomain->slave_links); 1417 if (subdomain->status != GPD_STATE_POWER_OFF) 1418 genpd_sd_counter_inc(genpd); 1419 1420 out: 1421 mutex_unlock(&subdomain->lock); 1422 mutex_unlock(&genpd->lock); 1423 1424 return ret; 1425 } 1426 1427 /** 1428 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. 1429 * @master_name: Name of the master PM domain to add the subdomain to. 1430 * @subdomain_name: Name of the subdomain to be added. 1431 */ 1432 int pm_genpd_add_subdomain_names(const char *master_name, 1433 const char *subdomain_name) 1434 { 1435 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; 1436 1437 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) 1438 return -EINVAL; 1439 1440 mutex_lock(&gpd_list_lock); 1441 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1442 if (!master && !strcmp(gpd->name, master_name)) 1443 master = gpd; 1444 1445 if (!subdomain && !strcmp(gpd->name, subdomain_name)) 1446 subdomain = gpd; 1447 1448 if (master && subdomain) 1449 break; 1450 } 1451 mutex_unlock(&gpd_list_lock); 1452 1453 return pm_genpd_add_subdomain(master, subdomain); 1454 } 1455 1456 /** 1457 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1458 * @genpd: Master PM domain to remove the subdomain from. 1459 * @subdomain: Subdomain to be removed. 1460 */ 1461 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1462 struct generic_pm_domain *subdomain) 1463 { 1464 struct gpd_link *link; 1465 int ret = -EINVAL; 1466 1467 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1468 return -EINVAL; 1469 1470 mutex_lock(&genpd->lock); 1471 1472 list_for_each_entry(link, &genpd->master_links, master_node) { 1473 if (link->slave != subdomain) 1474 continue; 1475 1476 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1477 1478 list_del(&link->master_node); 1479 list_del(&link->slave_node); 1480 kfree(link); 1481 if (subdomain->status != GPD_STATE_POWER_OFF) 1482 genpd_sd_counter_dec(genpd); 1483 1484 mutex_unlock(&subdomain->lock); 1485 1486 ret = 0; 1487 break; 1488 } 1489 1490 mutex_unlock(&genpd->lock); 1491 1492 return ret; 1493 } 1494 1495 /** 1496 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1497 * @genpd: PM domain to be connected with cpuidle. 1498 * @state: cpuidle state this domain can disable/enable. 1499 * 1500 * Make a PM domain behave as though it contained a CPU core, that is, instead 1501 * of calling its power down routine it will enable the given cpuidle state so 1502 * that the cpuidle subsystem can power it down (if possible and desirable). 1503 */ 1504 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1505 { 1506 struct cpuidle_driver *cpuidle_drv; 1507 struct gpd_cpuidle_data *cpuidle_data; 1508 struct cpuidle_state *idle_state; 1509 int ret = 0; 1510 1511 if (IS_ERR_OR_NULL(genpd) || state < 0) 1512 return -EINVAL; 1513 1514 mutex_lock(&genpd->lock); 1515 1516 if (genpd->cpuidle_data) { 1517 ret = -EEXIST; 1518 goto out; 1519 } 1520 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); 1521 if (!cpuidle_data) { 1522 ret = -ENOMEM; 1523 goto out; 1524 } 1525 cpuidle_drv = cpuidle_driver_ref(); 1526 if (!cpuidle_drv) { 1527 ret = -ENODEV; 1528 goto err_drv; 1529 } 1530 if (cpuidle_drv->state_count <= state) { 1531 ret = -EINVAL; 1532 goto err; 1533 } 1534 idle_state = &cpuidle_drv->states[state]; 1535 if (!idle_state->disabled) { 1536 ret = -EAGAIN; 1537 goto err; 1538 } 1539 cpuidle_data->idle_state = idle_state; 1540 cpuidle_data->saved_exit_latency = idle_state->exit_latency; 1541 genpd->cpuidle_data = cpuidle_data; 1542 genpd_recalc_cpu_exit_latency(genpd); 1543 1544 out: 1545 mutex_unlock(&genpd->lock); 1546 return ret; 1547 1548 err: 1549 cpuidle_driver_unref(); 1550 1551 err_drv: 1552 kfree(cpuidle_data); 1553 goto out; 1554 } 1555 1556 /** 1557 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. 1558 * @name: Name of the domain to connect to cpuidle. 1559 * @state: cpuidle state this domain can manipulate. 1560 */ 1561 int pm_genpd_name_attach_cpuidle(const char *name, int state) 1562 { 1563 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); 1564 } 1565 1566 /** 1567 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. 1568 * @genpd: PM domain to remove the cpuidle connection from. 1569 * 1570 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the 1571 * given PM domain. 1572 */ 1573 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1574 { 1575 struct gpd_cpuidle_data *cpuidle_data; 1576 struct cpuidle_state *idle_state; 1577 int ret = 0; 1578 1579 if (IS_ERR_OR_NULL(genpd)) 1580 return -EINVAL; 1581 1582 mutex_lock(&genpd->lock); 1583 1584 cpuidle_data = genpd->cpuidle_data; 1585 if (!cpuidle_data) { 1586 ret = -ENODEV; 1587 goto out; 1588 } 1589 idle_state = cpuidle_data->idle_state; 1590 if (!idle_state->disabled) { 1591 ret = -EAGAIN; 1592 goto out; 1593 } 1594 idle_state->exit_latency = cpuidle_data->saved_exit_latency; 1595 cpuidle_driver_unref(); 1596 genpd->cpuidle_data = NULL; 1597 kfree(cpuidle_data); 1598 1599 out: 1600 mutex_unlock(&genpd->lock); 1601 return ret; 1602 } 1603 1604 /** 1605 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. 1606 * @name: Name of the domain to disconnect cpuidle from. 1607 */ 1608 int pm_genpd_name_detach_cpuidle(const char *name) 1609 { 1610 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); 1611 } 1612 1613 /* Default device callbacks for generic PM domains. */ 1614 1615 /** 1616 * pm_genpd_default_save_state - Default "save device state" for PM domains. 1617 * @dev: Device to handle. 1618 */ 1619 static int pm_genpd_default_save_state(struct device *dev) 1620 { 1621 int (*cb)(struct device *__dev); 1622 1623 if (dev->type && dev->type->pm) 1624 cb = dev->type->pm->runtime_suspend; 1625 else if (dev->class && dev->class->pm) 1626 cb = dev->class->pm->runtime_suspend; 1627 else if (dev->bus && dev->bus->pm) 1628 cb = dev->bus->pm->runtime_suspend; 1629 else 1630 cb = NULL; 1631 1632 if (!cb && dev->driver && dev->driver->pm) 1633 cb = dev->driver->pm->runtime_suspend; 1634 1635 return cb ? cb(dev) : 0; 1636 } 1637 1638 /** 1639 * pm_genpd_default_restore_state - Default PM domains "restore device state". 1640 * @dev: Device to handle. 1641 */ 1642 static int pm_genpd_default_restore_state(struct device *dev) 1643 { 1644 int (*cb)(struct device *__dev); 1645 1646 if (dev->type && dev->type->pm) 1647 cb = dev->type->pm->runtime_resume; 1648 else if (dev->class && dev->class->pm) 1649 cb = dev->class->pm->runtime_resume; 1650 else if (dev->bus && dev->bus->pm) 1651 cb = dev->bus->pm->runtime_resume; 1652 else 1653 cb = NULL; 1654 1655 if (!cb && dev->driver && dev->driver->pm) 1656 cb = dev->driver->pm->runtime_resume; 1657 1658 return cb ? cb(dev) : 0; 1659 } 1660 1661 /** 1662 * pm_genpd_init - Initialize a generic I/O PM domain object. 1663 * @genpd: PM domain object to initialize. 1664 * @gov: PM domain governor to associate with the domain (may be NULL). 1665 * @is_off: Initial value of the domain's power_is_off field. 1666 */ 1667 void pm_genpd_init(struct generic_pm_domain *genpd, 1668 struct dev_power_governor *gov, bool is_off) 1669 { 1670 if (IS_ERR_OR_NULL(genpd)) 1671 return; 1672 1673 INIT_LIST_HEAD(&genpd->master_links); 1674 INIT_LIST_HEAD(&genpd->slave_links); 1675 INIT_LIST_HEAD(&genpd->dev_list); 1676 mutex_init(&genpd->lock); 1677 genpd->gov = gov; 1678 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1679 genpd->in_progress = 0; 1680 atomic_set(&genpd->sd_count, 0); 1681 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1682 genpd->device_count = 0; 1683 genpd->max_off_time_ns = -1; 1684 genpd->max_off_time_changed = true; 1685 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1686 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1687 genpd->domain.ops.prepare = pm_genpd_prepare; 1688 genpd->domain.ops.suspend = pm_genpd_suspend; 1689 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1690 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1691 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1692 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1693 genpd->domain.ops.resume = pm_genpd_resume; 1694 genpd->domain.ops.freeze = pm_genpd_freeze; 1695 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1696 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1697 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1698 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1699 genpd->domain.ops.thaw = pm_genpd_thaw; 1700 genpd->domain.ops.poweroff = pm_genpd_suspend; 1701 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1702 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1703 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1704 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1705 genpd->domain.ops.restore = pm_genpd_resume; 1706 genpd->domain.ops.complete = pm_genpd_complete; 1707 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1708 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1709 1710 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1711 genpd->dev_ops.stop = pm_clk_suspend; 1712 genpd->dev_ops.start = pm_clk_resume; 1713 } 1714 1715 mutex_lock(&gpd_list_lock); 1716 list_add(&genpd->gpd_list_node, &gpd_list); 1717 mutex_unlock(&gpd_list_lock); 1718 } 1719 EXPORT_SYMBOL_GPL(pm_genpd_init); 1720 1721 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1722 /* 1723 * Device Tree based PM domain providers. 1724 * 1725 * The code below implements generic device tree based PM domain providers that 1726 * bind device tree nodes with generic PM domains registered in the system. 1727 * 1728 * Any driver that registers generic PM domains and needs to support binding of 1729 * devices to these domains is supposed to register a PM domain provider, which 1730 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1731 * 1732 * Two simple mapping functions have been provided for convenience: 1733 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1734 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1735 * index. 1736 */ 1737 1738 /** 1739 * struct of_genpd_provider - PM domain provider registration structure 1740 * @link: Entry in global list of PM domain providers 1741 * @node: Pointer to device tree node of PM domain provider 1742 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1743 * into a PM domain. 1744 * @data: context pointer to be passed into @xlate callback 1745 */ 1746 struct of_genpd_provider { 1747 struct list_head link; 1748 struct device_node *node; 1749 genpd_xlate_t xlate; 1750 void *data; 1751 }; 1752 1753 /* List of registered PM domain providers. */ 1754 static LIST_HEAD(of_genpd_providers); 1755 /* Mutex to protect the list above. */ 1756 static DEFINE_MUTEX(of_genpd_mutex); 1757 1758 /** 1759 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1760 * @genpdspec: OF phandle args to map into a PM domain 1761 * @data: xlate function private data - pointer to struct generic_pm_domain 1762 * 1763 * This is a generic xlate function that can be used to model PM domains that 1764 * have their own device tree nodes. The private data of xlate function needs 1765 * to be a valid pointer to struct generic_pm_domain. 1766 */ 1767 struct generic_pm_domain *__of_genpd_xlate_simple( 1768 struct of_phandle_args *genpdspec, 1769 void *data) 1770 { 1771 if (genpdspec->args_count != 0) 1772 return ERR_PTR(-EINVAL); 1773 return data; 1774 } 1775 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 1776 1777 /** 1778 * __of_genpd_xlate_onecell() - Xlate function using a single index. 1779 * @genpdspec: OF phandle args to map into a PM domain 1780 * @data: xlate function private data - pointer to struct genpd_onecell_data 1781 * 1782 * This is a generic xlate function that can be used to model simple PM domain 1783 * controllers that have one device tree node and provide multiple PM domains. 1784 * A single cell is used as an index into an array of PM domains specified in 1785 * the genpd_onecell_data struct when registering the provider. 1786 */ 1787 struct generic_pm_domain *__of_genpd_xlate_onecell( 1788 struct of_phandle_args *genpdspec, 1789 void *data) 1790 { 1791 struct genpd_onecell_data *genpd_data = data; 1792 unsigned int idx = genpdspec->args[0]; 1793 1794 if (genpdspec->args_count != 1) 1795 return ERR_PTR(-EINVAL); 1796 1797 if (idx >= genpd_data->num_domains) { 1798 pr_err("%s: invalid domain index %u\n", __func__, idx); 1799 return ERR_PTR(-EINVAL); 1800 } 1801 1802 if (!genpd_data->domains[idx]) 1803 return ERR_PTR(-ENOENT); 1804 1805 return genpd_data->domains[idx]; 1806 } 1807 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 1808 1809 /** 1810 * __of_genpd_add_provider() - Register a PM domain provider for a node 1811 * @np: Device node pointer associated with the PM domain provider. 1812 * @xlate: Callback for decoding PM domain from phandle arguments. 1813 * @data: Context pointer for @xlate callback. 1814 */ 1815 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 1816 void *data) 1817 { 1818 struct of_genpd_provider *cp; 1819 1820 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 1821 if (!cp) 1822 return -ENOMEM; 1823 1824 cp->node = of_node_get(np); 1825 cp->data = data; 1826 cp->xlate = xlate; 1827 1828 mutex_lock(&of_genpd_mutex); 1829 list_add(&cp->link, &of_genpd_providers); 1830 mutex_unlock(&of_genpd_mutex); 1831 pr_debug("Added domain provider from %s\n", np->full_name); 1832 1833 return 0; 1834 } 1835 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 1836 1837 /** 1838 * of_genpd_del_provider() - Remove a previously registered PM domain provider 1839 * @np: Device node pointer associated with the PM domain provider 1840 */ 1841 void of_genpd_del_provider(struct device_node *np) 1842 { 1843 struct of_genpd_provider *cp; 1844 1845 mutex_lock(&of_genpd_mutex); 1846 list_for_each_entry(cp, &of_genpd_providers, link) { 1847 if (cp->node == np) { 1848 list_del(&cp->link); 1849 of_node_put(cp->node); 1850 kfree(cp); 1851 break; 1852 } 1853 } 1854 mutex_unlock(&of_genpd_mutex); 1855 } 1856 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 1857 1858 /** 1859 * of_genpd_get_from_provider() - Look-up PM domain 1860 * @genpdspec: OF phandle args to use for look-up 1861 * 1862 * Looks for a PM domain provider under the node specified by @genpdspec and if 1863 * found, uses xlate function of the provider to map phandle args to a PM 1864 * domain. 1865 * 1866 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 1867 * on failure. 1868 */ 1869 struct generic_pm_domain *of_genpd_get_from_provider( 1870 struct of_phandle_args *genpdspec) 1871 { 1872 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 1873 struct of_genpd_provider *provider; 1874 1875 mutex_lock(&of_genpd_mutex); 1876 1877 /* Check if we have such a provider in our array */ 1878 list_for_each_entry(provider, &of_genpd_providers, link) { 1879 if (provider->node == genpdspec->np) 1880 genpd = provider->xlate(genpdspec, provider->data); 1881 if (!IS_ERR(genpd)) 1882 break; 1883 } 1884 1885 mutex_unlock(&of_genpd_mutex); 1886 1887 return genpd; 1888 } 1889 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); 1890 1891 /** 1892 * genpd_dev_pm_detach - Detach a device from its PM domain. 1893 * @dev: Device to detach. 1894 * @power_off: Currently not used 1895 * 1896 * Try to locate a corresponding generic PM domain, which the device was 1897 * attached to previously. If such is found, the device is detached from it. 1898 */ 1899 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 1900 { 1901 struct generic_pm_domain *pd; 1902 unsigned int i; 1903 int ret = 0; 1904 1905 pd = pm_genpd_lookup_dev(dev); 1906 if (!pd) 1907 return; 1908 1909 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 1910 1911 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1912 ret = pm_genpd_remove_device(pd, dev); 1913 if (ret != -EAGAIN) 1914 break; 1915 1916 mdelay(i); 1917 cond_resched(); 1918 } 1919 1920 if (ret < 0) { 1921 dev_err(dev, "failed to remove from PM domain %s: %d", 1922 pd->name, ret); 1923 return; 1924 } 1925 1926 /* Check if PM domain can be powered off after removing this device. */ 1927 genpd_queue_power_off_work(pd); 1928 } 1929 1930 static void genpd_dev_pm_sync(struct device *dev) 1931 { 1932 struct generic_pm_domain *pd; 1933 1934 pd = dev_to_genpd(dev); 1935 if (IS_ERR(pd)) 1936 return; 1937 1938 genpd_queue_power_off_work(pd); 1939 } 1940 1941 /** 1942 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 1943 * @dev: Device to attach. 1944 * 1945 * Parse device's OF node to find a PM domain specifier. If such is found, 1946 * attaches the device to retrieved pm_domain ops. 1947 * 1948 * Both generic and legacy Samsung-specific DT bindings are supported to keep 1949 * backwards compatibility with existing DTBs. 1950 * 1951 * Returns 0 on successfully attached PM domain or negative error code. Note 1952 * that if a power-domain exists for the device, but it cannot be found or 1953 * turned on, then return -EPROBE_DEFER to ensure that the device is not 1954 * probed and to re-try again later. 1955 */ 1956 int genpd_dev_pm_attach(struct device *dev) 1957 { 1958 struct of_phandle_args pd_args; 1959 struct generic_pm_domain *pd; 1960 unsigned int i; 1961 int ret; 1962 1963 if (!dev->of_node) 1964 return -ENODEV; 1965 1966 if (dev->pm_domain) 1967 return -EEXIST; 1968 1969 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 1970 "#power-domain-cells", 0, &pd_args); 1971 if (ret < 0) { 1972 if (ret != -ENOENT) 1973 return ret; 1974 1975 /* 1976 * Try legacy Samsung-specific bindings 1977 * (for backwards compatibility of DT ABI) 1978 */ 1979 pd_args.args_count = 0; 1980 pd_args.np = of_parse_phandle(dev->of_node, 1981 "samsung,power-domain", 0); 1982 if (!pd_args.np) 1983 return -ENOENT; 1984 } 1985 1986 pd = of_genpd_get_from_provider(&pd_args); 1987 if (IS_ERR(pd)) { 1988 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1989 __func__, PTR_ERR(pd)); 1990 of_node_put(dev->of_node); 1991 return -EPROBE_DEFER; 1992 } 1993 1994 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 1995 1996 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 1997 ret = pm_genpd_add_device(pd, dev); 1998 if (ret != -EAGAIN) 1999 break; 2000 2001 mdelay(i); 2002 cond_resched(); 2003 } 2004 2005 if (ret < 0) { 2006 dev_err(dev, "failed to add to PM domain %s: %d", 2007 pd->name, ret); 2008 of_node_put(dev->of_node); 2009 goto out; 2010 } 2011 2012 dev->pm_domain->detach = genpd_dev_pm_detach; 2013 dev->pm_domain->sync = genpd_dev_pm_sync; 2014 ret = pm_genpd_poweron(pd); 2015 2016 out: 2017 return ret ? -EPROBE_DEFER : 0; 2018 } 2019 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2020 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2021 2022 2023 /*** debugfs support ***/ 2024 2025 #ifdef CONFIG_PM_ADVANCED_DEBUG 2026 #include <linux/pm.h> 2027 #include <linux/device.h> 2028 #include <linux/debugfs.h> 2029 #include <linux/seq_file.h> 2030 #include <linux/init.h> 2031 #include <linux/kobject.h> 2032 static struct dentry *pm_genpd_debugfs_dir; 2033 2034 /* 2035 * TODO: This function is a slightly modified version of rtpm_status_show 2036 * from sysfs.c, so generalize it. 2037 */ 2038 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2039 { 2040 static const char * const status_lookup[] = { 2041 [RPM_ACTIVE] = "active", 2042 [RPM_RESUMING] = "resuming", 2043 [RPM_SUSPENDED] = "suspended", 2044 [RPM_SUSPENDING] = "suspending" 2045 }; 2046 const char *p = ""; 2047 2048 if (dev->power.runtime_error) 2049 p = "error"; 2050 else if (dev->power.disable_depth) 2051 p = "unsupported"; 2052 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 2053 p = status_lookup[dev->power.runtime_status]; 2054 else 2055 WARN_ON(1); 2056 2057 seq_puts(s, p); 2058 } 2059 2060 static int pm_genpd_summary_one(struct seq_file *s, 2061 struct generic_pm_domain *genpd) 2062 { 2063 static const char * const status_lookup[] = { 2064 [GPD_STATE_ACTIVE] = "on", 2065 [GPD_STATE_POWER_OFF] = "off" 2066 }; 2067 struct pm_domain_data *pm_data; 2068 const char *kobj_path; 2069 struct gpd_link *link; 2070 int ret; 2071 2072 ret = mutex_lock_interruptible(&genpd->lock); 2073 if (ret) 2074 return -ERESTARTSYS; 2075 2076 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 2077 goto exit; 2078 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); 2079 2080 /* 2081 * Modifications on the list require holding locks on both 2082 * master and slave, so we are safe. 2083 * Also genpd->name is immutable. 2084 */ 2085 list_for_each_entry(link, &genpd->master_links, master_node) { 2086 seq_printf(s, "%s", link->slave->name); 2087 if (!list_is_last(&link->master_node, &genpd->master_links)) 2088 seq_puts(s, ", "); 2089 } 2090 2091 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 2092 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 2093 if (kobj_path == NULL) 2094 continue; 2095 2096 seq_printf(s, "\n %-50s ", kobj_path); 2097 rtpm_status_str(s, pm_data->dev); 2098 kfree(kobj_path); 2099 } 2100 2101 seq_puts(s, "\n"); 2102 exit: 2103 mutex_unlock(&genpd->lock); 2104 2105 return 0; 2106 } 2107 2108 static int pm_genpd_summary_show(struct seq_file *s, void *data) 2109 { 2110 struct generic_pm_domain *genpd; 2111 int ret = 0; 2112 2113 seq_puts(s, "domain status slaves\n"); 2114 seq_puts(s, " /device runtime status\n"); 2115 seq_puts(s, "----------------------------------------------------------------------\n"); 2116 2117 ret = mutex_lock_interruptible(&gpd_list_lock); 2118 if (ret) 2119 return -ERESTARTSYS; 2120 2121 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 2122 ret = pm_genpd_summary_one(s, genpd); 2123 if (ret) 2124 break; 2125 } 2126 mutex_unlock(&gpd_list_lock); 2127 2128 return ret; 2129 } 2130 2131 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 2132 { 2133 return single_open(file, pm_genpd_summary_show, NULL); 2134 } 2135 2136 static const struct file_operations pm_genpd_summary_fops = { 2137 .open = pm_genpd_summary_open, 2138 .read = seq_read, 2139 .llseek = seq_lseek, 2140 .release = single_release, 2141 }; 2142 2143 static int __init pm_genpd_debug_init(void) 2144 { 2145 struct dentry *d; 2146 2147 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 2148 2149 if (!pm_genpd_debugfs_dir) 2150 return -ENOMEM; 2151 2152 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 2153 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 2154 if (!d) 2155 return -ENOMEM; 2156 2157 return 0; 2158 } 2159 late_initcall(pm_genpd_debug_init); 2160 2161 static void __exit pm_genpd_debug_exit(void) 2162 { 2163 debugfs_remove_recursive(pm_genpd_debugfs_dir); 2164 } 2165 __exitcall(pm_genpd_debug_exit); 2166 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 2167