1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 24 #include "power.h" 25 26 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 27 28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 29 ({ \ 30 type (*__routine)(struct device *__d); \ 31 type __ret = (type)0; \ 32 \ 33 __routine = genpd->dev_ops.callback; \ 34 if (__routine) { \ 35 __ret = __routine(dev); \ 36 } \ 37 __ret; \ 38 }) 39 40 static LIST_HEAD(gpd_list); 41 static DEFINE_MUTEX(gpd_list_lock); 42 43 struct genpd_lock_ops { 44 void (*lock)(struct generic_pm_domain *genpd); 45 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 46 int (*lock_interruptible)(struct generic_pm_domain *genpd); 47 void (*unlock)(struct generic_pm_domain *genpd); 48 }; 49 50 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 51 { 52 mutex_lock(&genpd->mlock); 53 } 54 55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 56 int depth) 57 { 58 mutex_lock_nested(&genpd->mlock, depth); 59 } 60 61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 62 { 63 return mutex_lock_interruptible(&genpd->mlock); 64 } 65 66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 67 { 68 return mutex_unlock(&genpd->mlock); 69 } 70 71 static const struct genpd_lock_ops genpd_mtx_ops = { 72 .lock = genpd_lock_mtx, 73 .lock_nested = genpd_lock_nested_mtx, 74 .lock_interruptible = genpd_lock_interruptible_mtx, 75 .unlock = genpd_unlock_mtx, 76 }; 77 78 static void genpd_lock_spin(struct generic_pm_domain *genpd) 79 __acquires(&genpd->slock) 80 { 81 unsigned long flags; 82 83 spin_lock_irqsave(&genpd->slock, flags); 84 genpd->lock_flags = flags; 85 } 86 87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 88 int depth) 89 __acquires(&genpd->slock) 90 { 91 unsigned long flags; 92 93 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 94 genpd->lock_flags = flags; 95 } 96 97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 98 __acquires(&genpd->slock) 99 { 100 unsigned long flags; 101 102 spin_lock_irqsave(&genpd->slock, flags); 103 genpd->lock_flags = flags; 104 return 0; 105 } 106 107 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 108 __releases(&genpd->slock) 109 { 110 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 111 } 112 113 static const struct genpd_lock_ops genpd_spin_ops = { 114 .lock = genpd_lock_spin, 115 .lock_nested = genpd_lock_nested_spin, 116 .lock_interruptible = genpd_lock_interruptible_spin, 117 .unlock = genpd_unlock_spin, 118 }; 119 120 #define genpd_lock(p) p->lock_ops->lock(p) 121 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 122 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 123 #define genpd_unlock(p) p->lock_ops->unlock(p) 124 125 #define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE) 126 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 127 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 128 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 129 130 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, 131 const struct generic_pm_domain *genpd) 132 { 133 bool ret; 134 135 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 136 137 /* 138 * Warn once if an IRQ safe device is attached to a no sleep domain, as 139 * to indicate a suboptimal configuration for PM. For an always on 140 * domain this isn't case, thus don't warn. 141 */ 142 if (ret && !genpd_is_always_on(genpd)) 143 dev_warn_once(dev, "PM domain %s will not be powered off\n", 144 genpd->name); 145 146 return ret; 147 } 148 149 /* 150 * Get the generic PM domain for a particular struct device. 151 * This validates the struct device pointer, the PM domain pointer, 152 * and checks that the PM domain pointer is a real generic PM domain. 153 * Any failure results in NULL being returned. 154 */ 155 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev) 156 { 157 struct generic_pm_domain *genpd = NULL, *gpd; 158 159 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 160 return NULL; 161 162 mutex_lock(&gpd_list_lock); 163 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 164 if (&gpd->domain == dev->pm_domain) { 165 genpd = gpd; 166 break; 167 } 168 } 169 mutex_unlock(&gpd_list_lock); 170 171 return genpd; 172 } 173 174 /* 175 * This should only be used where we are certain that the pm_domain 176 * attached to the device is a genpd domain. 177 */ 178 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 179 { 180 if (IS_ERR_OR_NULL(dev->pm_domain)) 181 return ERR_PTR(-EINVAL); 182 183 return pd_to_genpd(dev->pm_domain); 184 } 185 186 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 187 struct device *dev) 188 { 189 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 190 } 191 192 static int genpd_start_dev(const struct generic_pm_domain *genpd, 193 struct device *dev) 194 { 195 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 196 } 197 198 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 199 { 200 bool ret = false; 201 202 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 203 ret = !!atomic_dec_and_test(&genpd->sd_count); 204 205 return ret; 206 } 207 208 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 209 { 210 atomic_inc(&genpd->sd_count); 211 smp_mb__after_atomic(); 212 } 213 214 #ifdef CONFIG_DEBUG_FS 215 static void genpd_update_accounting(struct generic_pm_domain *genpd) 216 { 217 ktime_t delta, now; 218 219 now = ktime_get(); 220 delta = ktime_sub(now, genpd->accounting_time); 221 222 /* 223 * If genpd->status is active, it means we are just 224 * out of off and so update the idle time and vice 225 * versa. 226 */ 227 if (genpd->status == GPD_STATE_ACTIVE) { 228 int state_idx = genpd->state_idx; 229 230 genpd->states[state_idx].idle_time = 231 ktime_add(genpd->states[state_idx].idle_time, delta); 232 } else { 233 genpd->on_time = ktime_add(genpd->on_time, delta); 234 } 235 236 genpd->accounting_time = now; 237 } 238 #else 239 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 240 #endif 241 242 /** 243 * dev_pm_genpd_set_performance_state- Set performance state of device's power 244 * domain. 245 * 246 * @dev: Device for which the performance-state needs to be set. 247 * @state: Target performance state of the device. This can be set as 0 when the 248 * device doesn't have any performance state constraints left (And so 249 * the device wouldn't participate anymore to find the target 250 * performance state of the genpd). 251 * 252 * It is assumed that the users guarantee that the genpd wouldn't be detached 253 * while this routine is getting called. 254 * 255 * Returns 0 on success and negative error values on failures. 256 */ 257 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 258 { 259 struct generic_pm_domain *genpd; 260 struct generic_pm_domain_data *gpd_data, *pd_data; 261 struct pm_domain_data *pdd; 262 unsigned int prev; 263 int ret = 0; 264 265 genpd = dev_to_genpd(dev); 266 if (IS_ERR(genpd)) 267 return -ENODEV; 268 269 if (unlikely(!genpd->set_performance_state)) 270 return -EINVAL; 271 272 if (unlikely(!dev->power.subsys_data || 273 !dev->power.subsys_data->domain_data)) { 274 WARN_ON(1); 275 return -EINVAL; 276 } 277 278 genpd_lock(genpd); 279 280 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 281 prev = gpd_data->performance_state; 282 gpd_data->performance_state = state; 283 284 /* New requested state is same as Max requested state */ 285 if (state == genpd->performance_state) 286 goto unlock; 287 288 /* New requested state is higher than Max requested state */ 289 if (state > genpd->performance_state) 290 goto update_state; 291 292 /* Traverse all devices within the domain */ 293 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 294 pd_data = to_gpd_data(pdd); 295 296 if (pd_data->performance_state > state) 297 state = pd_data->performance_state; 298 } 299 300 if (state == genpd->performance_state) 301 goto unlock; 302 303 /* 304 * We aren't propagating performance state changes of a subdomain to its 305 * masters as we don't have hardware that needs it. Over that, the 306 * performance states of subdomain and its masters may not have 307 * one-to-one mapping and would require additional information. We can 308 * get back to this once we have hardware that needs it. For that 309 * reason, we don't have to consider performance state of the subdomains 310 * of genpd here. 311 */ 312 313 update_state: 314 if (genpd_status_on(genpd)) { 315 ret = genpd->set_performance_state(genpd, state); 316 if (ret) { 317 gpd_data->performance_state = prev; 318 goto unlock; 319 } 320 } 321 322 genpd->performance_state = state; 323 324 unlock: 325 genpd_unlock(genpd); 326 327 return ret; 328 } 329 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 330 331 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 332 { 333 unsigned int state_idx = genpd->state_idx; 334 ktime_t time_start; 335 s64 elapsed_ns; 336 int ret; 337 338 if (!genpd->power_on) 339 return 0; 340 341 if (!timed) 342 return genpd->power_on(genpd); 343 344 time_start = ktime_get(); 345 ret = genpd->power_on(genpd); 346 if (ret) 347 return ret; 348 349 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 350 351 if (unlikely(genpd->set_performance_state)) { 352 ret = genpd->set_performance_state(genpd, genpd->performance_state); 353 if (ret) { 354 pr_warn("%s: Failed to set performance state %d (%d)\n", 355 genpd->name, genpd->performance_state, ret); 356 } 357 } 358 359 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 360 return ret; 361 362 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 363 genpd->max_off_time_changed = true; 364 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 365 genpd->name, "on", elapsed_ns); 366 367 return ret; 368 } 369 370 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 371 { 372 unsigned int state_idx = genpd->state_idx; 373 ktime_t time_start; 374 s64 elapsed_ns; 375 int ret; 376 377 if (!genpd->power_off) 378 return 0; 379 380 if (!timed) 381 return genpd->power_off(genpd); 382 383 time_start = ktime_get(); 384 ret = genpd->power_off(genpd); 385 if (ret == -EBUSY) 386 return ret; 387 388 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 389 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 390 return ret; 391 392 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 393 genpd->max_off_time_changed = true; 394 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 395 genpd->name, "off", elapsed_ns); 396 397 return ret; 398 } 399 400 /** 401 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 402 * @genpd: PM domain to power off. 403 * 404 * Queue up the execution of genpd_power_off() unless it's already been done 405 * before. 406 */ 407 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 408 { 409 queue_work(pm_wq, &genpd->power_off_work); 410 } 411 412 /** 413 * genpd_power_off - Remove power from a given PM domain. 414 * @genpd: PM domain to power down. 415 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 416 * RPM status of the releated device is in an intermediate state, not yet turned 417 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 418 * be RPM_SUSPENDED, while it tries to power off the PM domain. 419 * 420 * If all of the @genpd's devices have been suspended and all of its subdomains 421 * have been powered down, remove power from @genpd. 422 */ 423 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 424 unsigned int depth) 425 { 426 struct pm_domain_data *pdd; 427 struct gpd_link *link; 428 unsigned int not_suspended = 0; 429 430 /* 431 * Do not try to power off the domain in the following situations: 432 * (1) The domain is already in the "power off" state. 433 * (2) System suspend is in progress. 434 */ 435 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 436 return 0; 437 438 /* 439 * Abort power off for the PM domain in the following situations: 440 * (1) The domain is configured as always on. 441 * (2) When the domain has a subdomain being powered on. 442 */ 443 if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0) 444 return -EBUSY; 445 446 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 447 enum pm_qos_flags_status stat; 448 449 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF); 450 if (stat > PM_QOS_FLAGS_NONE) 451 return -EBUSY; 452 453 /* 454 * Do not allow PM domain to be powered off, when an IRQ safe 455 * device is part of a non-IRQ safe domain. 456 */ 457 if (!pm_runtime_suspended(pdd->dev) || 458 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) 459 not_suspended++; 460 } 461 462 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 463 return -EBUSY; 464 465 if (genpd->gov && genpd->gov->power_down_ok) { 466 if (!genpd->gov->power_down_ok(&genpd->domain)) 467 return -EAGAIN; 468 } 469 470 /* Default to shallowest state. */ 471 if (!genpd->gov) 472 genpd->state_idx = 0; 473 474 if (genpd->power_off) { 475 int ret; 476 477 if (atomic_read(&genpd->sd_count) > 0) 478 return -EBUSY; 479 480 /* 481 * If sd_count > 0 at this point, one of the subdomains hasn't 482 * managed to call genpd_power_on() for the master yet after 483 * incrementing it. In that case genpd_power_on() will wait 484 * for us to drop the lock, so we can call .power_off() and let 485 * the genpd_power_on() restore power for us (this shouldn't 486 * happen very often). 487 */ 488 ret = _genpd_power_off(genpd, true); 489 if (ret) 490 return ret; 491 } 492 493 genpd->status = GPD_STATE_POWER_OFF; 494 genpd_update_accounting(genpd); 495 496 list_for_each_entry(link, &genpd->slave_links, slave_node) { 497 genpd_sd_counter_dec(link->master); 498 genpd_lock_nested(link->master, depth + 1); 499 genpd_power_off(link->master, false, depth + 1); 500 genpd_unlock(link->master); 501 } 502 503 return 0; 504 } 505 506 /** 507 * genpd_power_on - Restore power to a given PM domain and its masters. 508 * @genpd: PM domain to power up. 509 * @depth: nesting count for lockdep. 510 * 511 * Restore power to @genpd and all of its masters so that it is possible to 512 * resume a device belonging to it. 513 */ 514 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 515 { 516 struct gpd_link *link; 517 int ret = 0; 518 519 if (genpd_status_on(genpd)) 520 return 0; 521 522 /* 523 * The list is guaranteed not to change while the loop below is being 524 * executed, unless one of the masters' .power_on() callbacks fiddles 525 * with it. 526 */ 527 list_for_each_entry(link, &genpd->slave_links, slave_node) { 528 struct generic_pm_domain *master = link->master; 529 530 genpd_sd_counter_inc(master); 531 532 genpd_lock_nested(master, depth + 1); 533 ret = genpd_power_on(master, depth + 1); 534 genpd_unlock(master); 535 536 if (ret) { 537 genpd_sd_counter_dec(master); 538 goto err; 539 } 540 } 541 542 ret = _genpd_power_on(genpd, true); 543 if (ret) 544 goto err; 545 546 genpd->status = GPD_STATE_ACTIVE; 547 genpd_update_accounting(genpd); 548 549 return 0; 550 551 err: 552 list_for_each_entry_continue_reverse(link, 553 &genpd->slave_links, 554 slave_node) { 555 genpd_sd_counter_dec(link->master); 556 genpd_lock_nested(link->master, depth + 1); 557 genpd_power_off(link->master, false, depth + 1); 558 genpd_unlock(link->master); 559 } 560 561 return ret; 562 } 563 564 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 565 unsigned long val, void *ptr) 566 { 567 struct generic_pm_domain_data *gpd_data; 568 struct device *dev; 569 570 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 571 dev = gpd_data->base.dev; 572 573 for (;;) { 574 struct generic_pm_domain *genpd; 575 struct pm_domain_data *pdd; 576 577 spin_lock_irq(&dev->power.lock); 578 579 pdd = dev->power.subsys_data ? 580 dev->power.subsys_data->domain_data : NULL; 581 if (pdd) { 582 to_gpd_data(pdd)->td.constraint_changed = true; 583 genpd = dev_to_genpd(dev); 584 } else { 585 genpd = ERR_PTR(-ENODATA); 586 } 587 588 spin_unlock_irq(&dev->power.lock); 589 590 if (!IS_ERR(genpd)) { 591 genpd_lock(genpd); 592 genpd->max_off_time_changed = true; 593 genpd_unlock(genpd); 594 } 595 596 dev = dev->parent; 597 if (!dev || dev->power.ignore_children) 598 break; 599 } 600 601 return NOTIFY_DONE; 602 } 603 604 /** 605 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 606 * @work: Work structure used for scheduling the execution of this function. 607 */ 608 static void genpd_power_off_work_fn(struct work_struct *work) 609 { 610 struct generic_pm_domain *genpd; 611 612 genpd = container_of(work, struct generic_pm_domain, power_off_work); 613 614 genpd_lock(genpd); 615 genpd_power_off(genpd, false, 0); 616 genpd_unlock(genpd); 617 } 618 619 /** 620 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 621 * @dev: Device to handle. 622 */ 623 static int __genpd_runtime_suspend(struct device *dev) 624 { 625 int (*cb)(struct device *__dev); 626 627 if (dev->type && dev->type->pm) 628 cb = dev->type->pm->runtime_suspend; 629 else if (dev->class && dev->class->pm) 630 cb = dev->class->pm->runtime_suspend; 631 else if (dev->bus && dev->bus->pm) 632 cb = dev->bus->pm->runtime_suspend; 633 else 634 cb = NULL; 635 636 if (!cb && dev->driver && dev->driver->pm) 637 cb = dev->driver->pm->runtime_suspend; 638 639 return cb ? cb(dev) : 0; 640 } 641 642 /** 643 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 644 * @dev: Device to handle. 645 */ 646 static int __genpd_runtime_resume(struct device *dev) 647 { 648 int (*cb)(struct device *__dev); 649 650 if (dev->type && dev->type->pm) 651 cb = dev->type->pm->runtime_resume; 652 else if (dev->class && dev->class->pm) 653 cb = dev->class->pm->runtime_resume; 654 else if (dev->bus && dev->bus->pm) 655 cb = dev->bus->pm->runtime_resume; 656 else 657 cb = NULL; 658 659 if (!cb && dev->driver && dev->driver->pm) 660 cb = dev->driver->pm->runtime_resume; 661 662 return cb ? cb(dev) : 0; 663 } 664 665 /** 666 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 667 * @dev: Device to suspend. 668 * 669 * Carry out a runtime suspend of a device under the assumption that its 670 * pm_domain field points to the domain member of an object of type 671 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 672 */ 673 static int genpd_runtime_suspend(struct device *dev) 674 { 675 struct generic_pm_domain *genpd; 676 bool (*suspend_ok)(struct device *__dev); 677 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 678 bool runtime_pm = pm_runtime_enabled(dev); 679 ktime_t time_start; 680 s64 elapsed_ns; 681 int ret; 682 683 dev_dbg(dev, "%s()\n", __func__); 684 685 genpd = dev_to_genpd(dev); 686 if (IS_ERR(genpd)) 687 return -EINVAL; 688 689 /* 690 * A runtime PM centric subsystem/driver may re-use the runtime PM 691 * callbacks for other purposes than runtime PM. In those scenarios 692 * runtime PM is disabled. Under these circumstances, we shall skip 693 * validating/measuring the PM QoS latency. 694 */ 695 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 696 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 697 return -EBUSY; 698 699 /* Measure suspend latency. */ 700 time_start = 0; 701 if (runtime_pm) 702 time_start = ktime_get(); 703 704 ret = __genpd_runtime_suspend(dev); 705 if (ret) 706 return ret; 707 708 ret = genpd_stop_dev(genpd, dev); 709 if (ret) { 710 __genpd_runtime_resume(dev); 711 return ret; 712 } 713 714 /* Update suspend latency value if the measured time exceeds it. */ 715 if (runtime_pm) { 716 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 717 if (elapsed_ns > td->suspend_latency_ns) { 718 td->suspend_latency_ns = elapsed_ns; 719 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 720 elapsed_ns); 721 genpd->max_off_time_changed = true; 722 td->constraint_changed = true; 723 } 724 } 725 726 /* 727 * If power.irq_safe is set, this routine may be run with 728 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 729 */ 730 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) 731 return 0; 732 733 genpd_lock(genpd); 734 genpd_power_off(genpd, true, 0); 735 genpd_unlock(genpd); 736 737 return 0; 738 } 739 740 /** 741 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 742 * @dev: Device to resume. 743 * 744 * Carry out a runtime resume of a device under the assumption that its 745 * pm_domain field points to the domain member of an object of type 746 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 747 */ 748 static int genpd_runtime_resume(struct device *dev) 749 { 750 struct generic_pm_domain *genpd; 751 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 752 bool runtime_pm = pm_runtime_enabled(dev); 753 ktime_t time_start; 754 s64 elapsed_ns; 755 int ret; 756 bool timed = true; 757 758 dev_dbg(dev, "%s()\n", __func__); 759 760 genpd = dev_to_genpd(dev); 761 if (IS_ERR(genpd)) 762 return -EINVAL; 763 764 /* 765 * As we don't power off a non IRQ safe domain, which holds 766 * an IRQ safe device, we don't need to restore power to it. 767 */ 768 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { 769 timed = false; 770 goto out; 771 } 772 773 genpd_lock(genpd); 774 ret = genpd_power_on(genpd, 0); 775 genpd_unlock(genpd); 776 777 if (ret) 778 return ret; 779 780 out: 781 /* Measure resume latency. */ 782 time_start = 0; 783 if (timed && runtime_pm) 784 time_start = ktime_get(); 785 786 ret = genpd_start_dev(genpd, dev); 787 if (ret) 788 goto err_poweroff; 789 790 ret = __genpd_runtime_resume(dev); 791 if (ret) 792 goto err_stop; 793 794 /* Update resume latency value if the measured time exceeds it. */ 795 if (timed && runtime_pm) { 796 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 797 if (elapsed_ns > td->resume_latency_ns) { 798 td->resume_latency_ns = elapsed_ns; 799 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 800 elapsed_ns); 801 genpd->max_off_time_changed = true; 802 td->constraint_changed = true; 803 } 804 } 805 806 return 0; 807 808 err_stop: 809 genpd_stop_dev(genpd, dev); 810 err_poweroff: 811 if (!pm_runtime_is_irq_safe(dev) || 812 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { 813 genpd_lock(genpd); 814 genpd_power_off(genpd, true, 0); 815 genpd_unlock(genpd); 816 } 817 818 return ret; 819 } 820 821 static bool pd_ignore_unused; 822 static int __init pd_ignore_unused_setup(char *__unused) 823 { 824 pd_ignore_unused = true; 825 return 1; 826 } 827 __setup("pd_ignore_unused", pd_ignore_unused_setup); 828 829 /** 830 * genpd_power_off_unused - Power off all PM domains with no devices in use. 831 */ 832 static int __init genpd_power_off_unused(void) 833 { 834 struct generic_pm_domain *genpd; 835 836 if (pd_ignore_unused) { 837 pr_warn("genpd: Not disabling unused power domains\n"); 838 return 0; 839 } 840 841 mutex_lock(&gpd_list_lock); 842 843 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 844 genpd_queue_power_off_work(genpd); 845 846 mutex_unlock(&gpd_list_lock); 847 848 return 0; 849 } 850 late_initcall(genpd_power_off_unused); 851 852 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF) 853 854 static bool genpd_present(const struct generic_pm_domain *genpd) 855 { 856 const struct generic_pm_domain *gpd; 857 858 if (IS_ERR_OR_NULL(genpd)) 859 return false; 860 861 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 862 if (gpd == genpd) 863 return true; 864 865 return false; 866 } 867 868 #endif 869 870 #ifdef CONFIG_PM_SLEEP 871 872 /** 873 * genpd_sync_power_off - Synchronously power off a PM domain and its masters. 874 * @genpd: PM domain to power off, if possible. 875 * @use_lock: use the lock. 876 * @depth: nesting count for lockdep. 877 * 878 * Check if the given PM domain can be powered off (during system suspend or 879 * hibernation) and do that if so. Also, in that case propagate to its masters. 880 * 881 * This function is only called in "noirq" and "syscore" stages of system power 882 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 883 * these cases the lock must be held. 884 */ 885 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 886 unsigned int depth) 887 { 888 struct gpd_link *link; 889 890 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 891 return; 892 893 if (genpd->suspended_count != genpd->device_count 894 || atomic_read(&genpd->sd_count) > 0) 895 return; 896 897 /* Choose the deepest state when suspending */ 898 genpd->state_idx = genpd->state_count - 1; 899 if (_genpd_power_off(genpd, false)) 900 return; 901 902 genpd->status = GPD_STATE_POWER_OFF; 903 904 list_for_each_entry(link, &genpd->slave_links, slave_node) { 905 genpd_sd_counter_dec(link->master); 906 907 if (use_lock) 908 genpd_lock_nested(link->master, depth + 1); 909 910 genpd_sync_power_off(link->master, use_lock, depth + 1); 911 912 if (use_lock) 913 genpd_unlock(link->master); 914 } 915 } 916 917 /** 918 * genpd_sync_power_on - Synchronously power on a PM domain and its masters. 919 * @genpd: PM domain to power on. 920 * @use_lock: use the lock. 921 * @depth: nesting count for lockdep. 922 * 923 * This function is only called in "noirq" and "syscore" stages of system power 924 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 925 * these cases the lock must be held. 926 */ 927 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 928 unsigned int depth) 929 { 930 struct gpd_link *link; 931 932 if (genpd_status_on(genpd)) 933 return; 934 935 list_for_each_entry(link, &genpd->slave_links, slave_node) { 936 genpd_sd_counter_inc(link->master); 937 938 if (use_lock) 939 genpd_lock_nested(link->master, depth + 1); 940 941 genpd_sync_power_on(link->master, use_lock, depth + 1); 942 943 if (use_lock) 944 genpd_unlock(link->master); 945 } 946 947 _genpd_power_on(genpd, false); 948 949 genpd->status = GPD_STATE_ACTIVE; 950 } 951 952 /** 953 * resume_needed - Check whether to resume a device before system suspend. 954 * @dev: Device to check. 955 * @genpd: PM domain the device belongs to. 956 * 957 * There are two cases in which a device that can wake up the system from sleep 958 * states should be resumed by genpd_prepare(): (1) if the device is enabled 959 * to wake up the system and it has to remain active for this purpose while the 960 * system is in the sleep state and (2) if the device is not enabled to wake up 961 * the system from sleep states and it generally doesn't generate wakeup signals 962 * by itself (those signals are generated on its behalf by other parts of the 963 * system). In the latter case it may be necessary to reconfigure the device's 964 * wakeup settings during system suspend, because it may have been set up to 965 * signal remote wakeup from the system's working state as needed by runtime PM. 966 * Return 'true' in either of the above cases. 967 */ 968 static bool resume_needed(struct device *dev, 969 const struct generic_pm_domain *genpd) 970 { 971 bool active_wakeup; 972 973 if (!device_can_wakeup(dev)) 974 return false; 975 976 active_wakeup = genpd_is_active_wakeup(genpd); 977 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 978 } 979 980 /** 981 * genpd_prepare - Start power transition of a device in a PM domain. 982 * @dev: Device to start the transition of. 983 * 984 * Start a power transition of a device (during a system-wide power transition) 985 * under the assumption that its pm_domain field points to the domain member of 986 * an object of type struct generic_pm_domain representing a PM domain 987 * consisting of I/O devices. 988 */ 989 static int genpd_prepare(struct device *dev) 990 { 991 struct generic_pm_domain *genpd; 992 int ret; 993 994 dev_dbg(dev, "%s()\n", __func__); 995 996 genpd = dev_to_genpd(dev); 997 if (IS_ERR(genpd)) 998 return -EINVAL; 999 1000 /* 1001 * If a wakeup request is pending for the device, it should be woken up 1002 * at this point and a system wakeup event should be reported if it's 1003 * set up to wake up the system from sleep states. 1004 */ 1005 if (resume_needed(dev, genpd)) 1006 pm_runtime_resume(dev); 1007 1008 genpd_lock(genpd); 1009 1010 if (genpd->prepared_count++ == 0) 1011 genpd->suspended_count = 0; 1012 1013 genpd_unlock(genpd); 1014 1015 ret = pm_generic_prepare(dev); 1016 if (ret < 0) { 1017 genpd_lock(genpd); 1018 1019 genpd->prepared_count--; 1020 1021 genpd_unlock(genpd); 1022 } 1023 1024 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1025 return ret >= 0 ? 0 : ret; 1026 } 1027 1028 /** 1029 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1030 * I/O pm domain. 1031 * @dev: Device to suspend. 1032 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. 1033 * 1034 * Stop the device and remove power from the domain if all devices in it have 1035 * been stopped. 1036 */ 1037 static int genpd_finish_suspend(struct device *dev, bool poweroff) 1038 { 1039 struct generic_pm_domain *genpd; 1040 int ret = 0; 1041 1042 genpd = dev_to_genpd(dev); 1043 if (IS_ERR(genpd)) 1044 return -EINVAL; 1045 1046 if (poweroff) 1047 ret = pm_generic_poweroff_noirq(dev); 1048 else 1049 ret = pm_generic_suspend_noirq(dev); 1050 if (ret) 1051 return ret; 1052 1053 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) 1054 return 0; 1055 1056 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1057 !pm_runtime_status_suspended(dev)) { 1058 ret = genpd_stop_dev(genpd, dev); 1059 if (ret) { 1060 if (poweroff) 1061 pm_generic_restore_noirq(dev); 1062 else 1063 pm_generic_resume_noirq(dev); 1064 return ret; 1065 } 1066 } 1067 1068 genpd_lock(genpd); 1069 genpd->suspended_count++; 1070 genpd_sync_power_off(genpd, true, 0); 1071 genpd_unlock(genpd); 1072 1073 return 0; 1074 } 1075 1076 /** 1077 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1078 * @dev: Device to suspend. 1079 * 1080 * Stop the device and remove power from the domain if all devices in it have 1081 * been stopped. 1082 */ 1083 static int genpd_suspend_noirq(struct device *dev) 1084 { 1085 dev_dbg(dev, "%s()\n", __func__); 1086 1087 return genpd_finish_suspend(dev, false); 1088 } 1089 1090 /** 1091 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1092 * @dev: Device to resume. 1093 * 1094 * Restore power to the device's PM domain, if necessary, and start the device. 1095 */ 1096 static int genpd_resume_noirq(struct device *dev) 1097 { 1098 struct generic_pm_domain *genpd; 1099 int ret; 1100 1101 dev_dbg(dev, "%s()\n", __func__); 1102 1103 genpd = dev_to_genpd(dev); 1104 if (IS_ERR(genpd)) 1105 return -EINVAL; 1106 1107 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) 1108 return pm_generic_resume_noirq(dev); 1109 1110 genpd_lock(genpd); 1111 genpd_sync_power_on(genpd, true, 0); 1112 genpd->suspended_count--; 1113 genpd_unlock(genpd); 1114 1115 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1116 !pm_runtime_status_suspended(dev)) { 1117 ret = genpd_start_dev(genpd, dev); 1118 if (ret) 1119 return ret; 1120 } 1121 1122 return pm_generic_resume_noirq(dev); 1123 } 1124 1125 /** 1126 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1127 * @dev: Device to freeze. 1128 * 1129 * Carry out a late freeze of a device under the assumption that its 1130 * pm_domain field points to the domain member of an object of type 1131 * struct generic_pm_domain representing a power domain consisting of I/O 1132 * devices. 1133 */ 1134 static int genpd_freeze_noirq(struct device *dev) 1135 { 1136 const struct generic_pm_domain *genpd; 1137 int ret = 0; 1138 1139 dev_dbg(dev, "%s()\n", __func__); 1140 1141 genpd = dev_to_genpd(dev); 1142 if (IS_ERR(genpd)) 1143 return -EINVAL; 1144 1145 ret = pm_generic_freeze_noirq(dev); 1146 if (ret) 1147 return ret; 1148 1149 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1150 !pm_runtime_status_suspended(dev)) 1151 ret = genpd_stop_dev(genpd, dev); 1152 1153 return ret; 1154 } 1155 1156 /** 1157 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1158 * @dev: Device to thaw. 1159 * 1160 * Start the device, unless power has been removed from the domain already 1161 * before the system transition. 1162 */ 1163 static int genpd_thaw_noirq(struct device *dev) 1164 { 1165 const struct generic_pm_domain *genpd; 1166 int ret = 0; 1167 1168 dev_dbg(dev, "%s()\n", __func__); 1169 1170 genpd = dev_to_genpd(dev); 1171 if (IS_ERR(genpd)) 1172 return -EINVAL; 1173 1174 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1175 !pm_runtime_status_suspended(dev)) { 1176 ret = genpd_start_dev(genpd, dev); 1177 if (ret) 1178 return ret; 1179 } 1180 1181 return pm_generic_thaw_noirq(dev); 1182 } 1183 1184 /** 1185 * genpd_poweroff_noirq - Completion of hibernation of device in an 1186 * I/O PM domain. 1187 * @dev: Device to poweroff. 1188 * 1189 * Stop the device and remove power from the domain if all devices in it have 1190 * been stopped. 1191 */ 1192 static int genpd_poweroff_noirq(struct device *dev) 1193 { 1194 dev_dbg(dev, "%s()\n", __func__); 1195 1196 return genpd_finish_suspend(dev, true); 1197 } 1198 1199 /** 1200 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1201 * @dev: Device to resume. 1202 * 1203 * Make sure the domain will be in the same power state as before the 1204 * hibernation the system is resuming from and start the device if necessary. 1205 */ 1206 static int genpd_restore_noirq(struct device *dev) 1207 { 1208 struct generic_pm_domain *genpd; 1209 int ret = 0; 1210 1211 dev_dbg(dev, "%s()\n", __func__); 1212 1213 genpd = dev_to_genpd(dev); 1214 if (IS_ERR(genpd)) 1215 return -EINVAL; 1216 1217 /* 1218 * At this point suspended_count == 0 means we are being run for the 1219 * first time for the given domain in the present cycle. 1220 */ 1221 genpd_lock(genpd); 1222 if (genpd->suspended_count++ == 0) 1223 /* 1224 * The boot kernel might put the domain into arbitrary state, 1225 * so make it appear as powered off to genpd_sync_power_on(), 1226 * so that it tries to power it on in case it was really off. 1227 */ 1228 genpd->status = GPD_STATE_POWER_OFF; 1229 1230 genpd_sync_power_on(genpd, true, 0); 1231 genpd_unlock(genpd); 1232 1233 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1234 !pm_runtime_status_suspended(dev)) { 1235 ret = genpd_start_dev(genpd, dev); 1236 if (ret) 1237 return ret; 1238 } 1239 1240 return pm_generic_restore_noirq(dev); 1241 } 1242 1243 /** 1244 * genpd_complete - Complete power transition of a device in a power domain. 1245 * @dev: Device to complete the transition of. 1246 * 1247 * Complete a power transition of a device (during a system-wide power 1248 * transition) under the assumption that its pm_domain field points to the 1249 * domain member of an object of type struct generic_pm_domain representing 1250 * a power domain consisting of I/O devices. 1251 */ 1252 static void genpd_complete(struct device *dev) 1253 { 1254 struct generic_pm_domain *genpd; 1255 1256 dev_dbg(dev, "%s()\n", __func__); 1257 1258 genpd = dev_to_genpd(dev); 1259 if (IS_ERR(genpd)) 1260 return; 1261 1262 pm_generic_complete(dev); 1263 1264 genpd_lock(genpd); 1265 1266 genpd->prepared_count--; 1267 if (!genpd->prepared_count) 1268 genpd_queue_power_off_work(genpd); 1269 1270 genpd_unlock(genpd); 1271 } 1272 1273 /** 1274 * genpd_syscore_switch - Switch power during system core suspend or resume. 1275 * @dev: Device that normally is marked as "always on" to switch power for. 1276 * 1277 * This routine may only be called during the system core (syscore) suspend or 1278 * resume phase for devices whose "always on" flags are set. 1279 */ 1280 static void genpd_syscore_switch(struct device *dev, bool suspend) 1281 { 1282 struct generic_pm_domain *genpd; 1283 1284 genpd = dev_to_genpd(dev); 1285 if (!genpd_present(genpd)) 1286 return; 1287 1288 if (suspend) { 1289 genpd->suspended_count++; 1290 genpd_sync_power_off(genpd, false, 0); 1291 } else { 1292 genpd_sync_power_on(genpd, false, 0); 1293 genpd->suspended_count--; 1294 } 1295 } 1296 1297 void pm_genpd_syscore_poweroff(struct device *dev) 1298 { 1299 genpd_syscore_switch(dev, true); 1300 } 1301 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1302 1303 void pm_genpd_syscore_poweron(struct device *dev) 1304 { 1305 genpd_syscore_switch(dev, false); 1306 } 1307 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1308 1309 #else /* !CONFIG_PM_SLEEP */ 1310 1311 #define genpd_prepare NULL 1312 #define genpd_suspend_noirq NULL 1313 #define genpd_resume_noirq NULL 1314 #define genpd_freeze_noirq NULL 1315 #define genpd_thaw_noirq NULL 1316 #define genpd_poweroff_noirq NULL 1317 #define genpd_restore_noirq NULL 1318 #define genpd_complete NULL 1319 1320 #endif /* CONFIG_PM_SLEEP */ 1321 1322 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1323 struct gpd_timing_data *td) 1324 { 1325 struct generic_pm_domain_data *gpd_data; 1326 int ret; 1327 1328 ret = dev_pm_get_subsys_data(dev); 1329 if (ret) 1330 return ERR_PTR(ret); 1331 1332 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1333 if (!gpd_data) { 1334 ret = -ENOMEM; 1335 goto err_put; 1336 } 1337 1338 if (td) 1339 gpd_data->td = *td; 1340 1341 gpd_data->base.dev = dev; 1342 gpd_data->td.constraint_changed = true; 1343 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1344 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1345 1346 spin_lock_irq(&dev->power.lock); 1347 1348 if (dev->power.subsys_data->domain_data) { 1349 ret = -EINVAL; 1350 goto err_free; 1351 } 1352 1353 dev->power.subsys_data->domain_data = &gpd_data->base; 1354 1355 spin_unlock_irq(&dev->power.lock); 1356 1357 return gpd_data; 1358 1359 err_free: 1360 spin_unlock_irq(&dev->power.lock); 1361 kfree(gpd_data); 1362 err_put: 1363 dev_pm_put_subsys_data(dev); 1364 return ERR_PTR(ret); 1365 } 1366 1367 static void genpd_free_dev_data(struct device *dev, 1368 struct generic_pm_domain_data *gpd_data) 1369 { 1370 spin_lock_irq(&dev->power.lock); 1371 1372 dev->power.subsys_data->domain_data = NULL; 1373 1374 spin_unlock_irq(&dev->power.lock); 1375 1376 kfree(gpd_data); 1377 dev_pm_put_subsys_data(dev); 1378 } 1379 1380 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1381 struct gpd_timing_data *td) 1382 { 1383 struct generic_pm_domain_data *gpd_data; 1384 int ret; 1385 1386 dev_dbg(dev, "%s()\n", __func__); 1387 1388 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1389 return -EINVAL; 1390 1391 gpd_data = genpd_alloc_dev_data(dev, td); 1392 if (IS_ERR(gpd_data)) 1393 return PTR_ERR(gpd_data); 1394 1395 genpd_lock(genpd); 1396 1397 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1398 if (ret) 1399 goto out; 1400 1401 dev_pm_domain_set(dev, &genpd->domain); 1402 1403 genpd->device_count++; 1404 genpd->max_off_time_changed = true; 1405 1406 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1407 1408 out: 1409 genpd_unlock(genpd); 1410 1411 if (ret) 1412 genpd_free_dev_data(dev, gpd_data); 1413 else 1414 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1415 1416 return ret; 1417 } 1418 1419 /** 1420 * pm_genpd_add_device - Add a device to an I/O PM domain. 1421 * @genpd: PM domain to add the device to. 1422 * @dev: Device to be added. 1423 */ 1424 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1425 { 1426 int ret; 1427 1428 mutex_lock(&gpd_list_lock); 1429 ret = genpd_add_device(genpd, dev, NULL); 1430 mutex_unlock(&gpd_list_lock); 1431 1432 return ret; 1433 } 1434 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1435 1436 static int genpd_remove_device(struct generic_pm_domain *genpd, 1437 struct device *dev) 1438 { 1439 struct generic_pm_domain_data *gpd_data; 1440 struct pm_domain_data *pdd; 1441 int ret = 0; 1442 1443 dev_dbg(dev, "%s()\n", __func__); 1444 1445 pdd = dev->power.subsys_data->domain_data; 1446 gpd_data = to_gpd_data(pdd); 1447 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1448 1449 genpd_lock(genpd); 1450 1451 if (genpd->prepared_count > 0) { 1452 ret = -EAGAIN; 1453 goto out; 1454 } 1455 1456 genpd->device_count--; 1457 genpd->max_off_time_changed = true; 1458 1459 if (genpd->detach_dev) 1460 genpd->detach_dev(genpd, dev); 1461 1462 dev_pm_domain_set(dev, NULL); 1463 1464 list_del_init(&pdd->list_node); 1465 1466 genpd_unlock(genpd); 1467 1468 genpd_free_dev_data(dev, gpd_data); 1469 1470 return 0; 1471 1472 out: 1473 genpd_unlock(genpd); 1474 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1475 1476 return ret; 1477 } 1478 1479 /** 1480 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1481 * @dev: Device to be removed. 1482 */ 1483 int pm_genpd_remove_device(struct device *dev) 1484 { 1485 struct generic_pm_domain *genpd = genpd_lookup_dev(dev); 1486 1487 if (!genpd) 1488 return -EINVAL; 1489 1490 return genpd_remove_device(genpd, dev); 1491 } 1492 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1493 1494 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1495 struct generic_pm_domain *subdomain) 1496 { 1497 struct gpd_link *link, *itr; 1498 int ret = 0; 1499 1500 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1501 || genpd == subdomain) 1502 return -EINVAL; 1503 1504 /* 1505 * If the domain can be powered on/off in an IRQ safe 1506 * context, ensure that the subdomain can also be 1507 * powered on/off in that context. 1508 */ 1509 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1510 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1511 genpd->name, subdomain->name); 1512 return -EINVAL; 1513 } 1514 1515 link = kzalloc(sizeof(*link), GFP_KERNEL); 1516 if (!link) 1517 return -ENOMEM; 1518 1519 genpd_lock(subdomain); 1520 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1521 1522 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1523 ret = -EINVAL; 1524 goto out; 1525 } 1526 1527 list_for_each_entry(itr, &genpd->master_links, master_node) { 1528 if (itr->slave == subdomain && itr->master == genpd) { 1529 ret = -EINVAL; 1530 goto out; 1531 } 1532 } 1533 1534 link->master = genpd; 1535 list_add_tail(&link->master_node, &genpd->master_links); 1536 link->slave = subdomain; 1537 list_add_tail(&link->slave_node, &subdomain->slave_links); 1538 if (genpd_status_on(subdomain)) 1539 genpd_sd_counter_inc(genpd); 1540 1541 out: 1542 genpd_unlock(genpd); 1543 genpd_unlock(subdomain); 1544 if (ret) 1545 kfree(link); 1546 return ret; 1547 } 1548 1549 /** 1550 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1551 * @genpd: Master PM domain to add the subdomain to. 1552 * @subdomain: Subdomain to be added. 1553 */ 1554 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1555 struct generic_pm_domain *subdomain) 1556 { 1557 int ret; 1558 1559 mutex_lock(&gpd_list_lock); 1560 ret = genpd_add_subdomain(genpd, subdomain); 1561 mutex_unlock(&gpd_list_lock); 1562 1563 return ret; 1564 } 1565 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1566 1567 /** 1568 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1569 * @genpd: Master PM domain to remove the subdomain from. 1570 * @subdomain: Subdomain to be removed. 1571 */ 1572 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1573 struct generic_pm_domain *subdomain) 1574 { 1575 struct gpd_link *l, *link; 1576 int ret = -EINVAL; 1577 1578 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1579 return -EINVAL; 1580 1581 genpd_lock(subdomain); 1582 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1583 1584 if (!list_empty(&subdomain->master_links) || subdomain->device_count) { 1585 pr_warn("%s: unable to remove subdomain %s\n", genpd->name, 1586 subdomain->name); 1587 ret = -EBUSY; 1588 goto out; 1589 } 1590 1591 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { 1592 if (link->slave != subdomain) 1593 continue; 1594 1595 list_del(&link->master_node); 1596 list_del(&link->slave_node); 1597 kfree(link); 1598 if (genpd_status_on(subdomain)) 1599 genpd_sd_counter_dec(genpd); 1600 1601 ret = 0; 1602 break; 1603 } 1604 1605 out: 1606 genpd_unlock(genpd); 1607 genpd_unlock(subdomain); 1608 1609 return ret; 1610 } 1611 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1612 1613 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1614 { 1615 struct genpd_power_state *state; 1616 1617 state = kzalloc(sizeof(*state), GFP_KERNEL); 1618 if (!state) 1619 return -ENOMEM; 1620 1621 genpd->states = state; 1622 genpd->state_count = 1; 1623 genpd->free = state; 1624 1625 return 0; 1626 } 1627 1628 static void genpd_lock_init(struct generic_pm_domain *genpd) 1629 { 1630 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { 1631 spin_lock_init(&genpd->slock); 1632 genpd->lock_ops = &genpd_spin_ops; 1633 } else { 1634 mutex_init(&genpd->mlock); 1635 genpd->lock_ops = &genpd_mtx_ops; 1636 } 1637 } 1638 1639 /** 1640 * pm_genpd_init - Initialize a generic I/O PM domain object. 1641 * @genpd: PM domain object to initialize. 1642 * @gov: PM domain governor to associate with the domain (may be NULL). 1643 * @is_off: Initial value of the domain's power_is_off field. 1644 * 1645 * Returns 0 on successful initialization, else a negative error code. 1646 */ 1647 int pm_genpd_init(struct generic_pm_domain *genpd, 1648 struct dev_power_governor *gov, bool is_off) 1649 { 1650 int ret; 1651 1652 if (IS_ERR_OR_NULL(genpd)) 1653 return -EINVAL; 1654 1655 INIT_LIST_HEAD(&genpd->master_links); 1656 INIT_LIST_HEAD(&genpd->slave_links); 1657 INIT_LIST_HEAD(&genpd->dev_list); 1658 genpd_lock_init(genpd); 1659 genpd->gov = gov; 1660 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1661 atomic_set(&genpd->sd_count, 0); 1662 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1663 genpd->device_count = 0; 1664 genpd->max_off_time_ns = -1; 1665 genpd->max_off_time_changed = true; 1666 genpd->provider = NULL; 1667 genpd->has_provider = false; 1668 genpd->accounting_time = ktime_get(); 1669 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 1670 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 1671 genpd->domain.ops.prepare = genpd_prepare; 1672 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 1673 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 1674 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 1675 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 1676 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 1677 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 1678 genpd->domain.ops.complete = genpd_complete; 1679 1680 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1681 genpd->dev_ops.stop = pm_clk_suspend; 1682 genpd->dev_ops.start = pm_clk_resume; 1683 } 1684 1685 /* Always-on domains must be powered on at initialization. */ 1686 if (genpd_is_always_on(genpd) && !genpd_status_on(genpd)) 1687 return -EINVAL; 1688 1689 /* Use only one "off" state if there were no states declared */ 1690 if (genpd->state_count == 0) { 1691 ret = genpd_set_default_power_state(genpd); 1692 if (ret) 1693 return ret; 1694 } else if (!gov) { 1695 pr_warn("%s : no governor for states\n", genpd->name); 1696 } 1697 1698 device_initialize(&genpd->dev); 1699 dev_set_name(&genpd->dev, "%s", genpd->name); 1700 1701 mutex_lock(&gpd_list_lock); 1702 list_add(&genpd->gpd_list_node, &gpd_list); 1703 mutex_unlock(&gpd_list_lock); 1704 1705 return 0; 1706 } 1707 EXPORT_SYMBOL_GPL(pm_genpd_init); 1708 1709 static int genpd_remove(struct generic_pm_domain *genpd) 1710 { 1711 struct gpd_link *l, *link; 1712 1713 if (IS_ERR_OR_NULL(genpd)) 1714 return -EINVAL; 1715 1716 genpd_lock(genpd); 1717 1718 if (genpd->has_provider) { 1719 genpd_unlock(genpd); 1720 pr_err("Provider present, unable to remove %s\n", genpd->name); 1721 return -EBUSY; 1722 } 1723 1724 if (!list_empty(&genpd->master_links) || genpd->device_count) { 1725 genpd_unlock(genpd); 1726 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 1727 return -EBUSY; 1728 } 1729 1730 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) { 1731 list_del(&link->master_node); 1732 list_del(&link->slave_node); 1733 kfree(link); 1734 } 1735 1736 list_del(&genpd->gpd_list_node); 1737 genpd_unlock(genpd); 1738 cancel_work_sync(&genpd->power_off_work); 1739 kfree(genpd->free); 1740 pr_debug("%s: removed %s\n", __func__, genpd->name); 1741 1742 return 0; 1743 } 1744 1745 /** 1746 * pm_genpd_remove - Remove a generic I/O PM domain 1747 * @genpd: Pointer to PM domain that is to be removed. 1748 * 1749 * To remove the PM domain, this function: 1750 * - Removes the PM domain as a subdomain to any parent domains, 1751 * if it was added. 1752 * - Removes the PM domain from the list of registered PM domains. 1753 * 1754 * The PM domain will only be removed, if the associated provider has 1755 * been removed, it is not a parent to any other PM domain and has no 1756 * devices associated with it. 1757 */ 1758 int pm_genpd_remove(struct generic_pm_domain *genpd) 1759 { 1760 int ret; 1761 1762 mutex_lock(&gpd_list_lock); 1763 ret = genpd_remove(genpd); 1764 mutex_unlock(&gpd_list_lock); 1765 1766 return ret; 1767 } 1768 EXPORT_SYMBOL_GPL(pm_genpd_remove); 1769 1770 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1771 1772 /* 1773 * Device Tree based PM domain providers. 1774 * 1775 * The code below implements generic device tree based PM domain providers that 1776 * bind device tree nodes with generic PM domains registered in the system. 1777 * 1778 * Any driver that registers generic PM domains and needs to support binding of 1779 * devices to these domains is supposed to register a PM domain provider, which 1780 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1781 * 1782 * Two simple mapping functions have been provided for convenience: 1783 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1784 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 1785 * index. 1786 */ 1787 1788 /** 1789 * struct of_genpd_provider - PM domain provider registration structure 1790 * @link: Entry in global list of PM domain providers 1791 * @node: Pointer to device tree node of PM domain provider 1792 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1793 * into a PM domain. 1794 * @data: context pointer to be passed into @xlate callback 1795 */ 1796 struct of_genpd_provider { 1797 struct list_head link; 1798 struct device_node *node; 1799 genpd_xlate_t xlate; 1800 void *data; 1801 }; 1802 1803 /* List of registered PM domain providers. */ 1804 static LIST_HEAD(of_genpd_providers); 1805 /* Mutex to protect the list above. */ 1806 static DEFINE_MUTEX(of_genpd_mutex); 1807 1808 /** 1809 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 1810 * @genpdspec: OF phandle args to map into a PM domain 1811 * @data: xlate function private data - pointer to struct generic_pm_domain 1812 * 1813 * This is a generic xlate function that can be used to model PM domains that 1814 * have their own device tree nodes. The private data of xlate function needs 1815 * to be a valid pointer to struct generic_pm_domain. 1816 */ 1817 static struct generic_pm_domain *genpd_xlate_simple( 1818 struct of_phandle_args *genpdspec, 1819 void *data) 1820 { 1821 return data; 1822 } 1823 1824 /** 1825 * genpd_xlate_onecell() - Xlate function using a single index. 1826 * @genpdspec: OF phandle args to map into a PM domain 1827 * @data: xlate function private data - pointer to struct genpd_onecell_data 1828 * 1829 * This is a generic xlate function that can be used to model simple PM domain 1830 * controllers that have one device tree node and provide multiple PM domains. 1831 * A single cell is used as an index into an array of PM domains specified in 1832 * the genpd_onecell_data struct when registering the provider. 1833 */ 1834 static struct generic_pm_domain *genpd_xlate_onecell( 1835 struct of_phandle_args *genpdspec, 1836 void *data) 1837 { 1838 struct genpd_onecell_data *genpd_data = data; 1839 unsigned int idx = genpdspec->args[0]; 1840 1841 if (genpdspec->args_count != 1) 1842 return ERR_PTR(-EINVAL); 1843 1844 if (idx >= genpd_data->num_domains) { 1845 pr_err("%s: invalid domain index %u\n", __func__, idx); 1846 return ERR_PTR(-EINVAL); 1847 } 1848 1849 if (!genpd_data->domains[idx]) 1850 return ERR_PTR(-ENOENT); 1851 1852 return genpd_data->domains[idx]; 1853 } 1854 1855 /** 1856 * genpd_add_provider() - Register a PM domain provider for a node 1857 * @np: Device node pointer associated with the PM domain provider. 1858 * @xlate: Callback for decoding PM domain from phandle arguments. 1859 * @data: Context pointer for @xlate callback. 1860 */ 1861 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 1862 void *data) 1863 { 1864 struct of_genpd_provider *cp; 1865 1866 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 1867 if (!cp) 1868 return -ENOMEM; 1869 1870 cp->node = of_node_get(np); 1871 cp->data = data; 1872 cp->xlate = xlate; 1873 1874 mutex_lock(&of_genpd_mutex); 1875 list_add(&cp->link, &of_genpd_providers); 1876 mutex_unlock(&of_genpd_mutex); 1877 pr_debug("Added domain provider from %pOF\n", np); 1878 1879 return 0; 1880 } 1881 1882 /** 1883 * of_genpd_add_provider_simple() - Register a simple PM domain provider 1884 * @np: Device node pointer associated with the PM domain provider. 1885 * @genpd: Pointer to PM domain associated with the PM domain provider. 1886 */ 1887 int of_genpd_add_provider_simple(struct device_node *np, 1888 struct generic_pm_domain *genpd) 1889 { 1890 int ret = -EINVAL; 1891 1892 if (!np || !genpd) 1893 return -EINVAL; 1894 1895 mutex_lock(&gpd_list_lock); 1896 1897 if (!genpd_present(genpd)) 1898 goto unlock; 1899 1900 genpd->dev.of_node = np; 1901 1902 /* Parse genpd OPP table */ 1903 if (genpd->set_performance_state) { 1904 ret = dev_pm_opp_of_add_table(&genpd->dev); 1905 if (ret) { 1906 dev_err(&genpd->dev, "Failed to add OPP table: %d\n", 1907 ret); 1908 goto unlock; 1909 } 1910 } 1911 1912 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 1913 if (ret) { 1914 if (genpd->set_performance_state) 1915 dev_pm_opp_of_remove_table(&genpd->dev); 1916 1917 goto unlock; 1918 } 1919 1920 genpd->provider = &np->fwnode; 1921 genpd->has_provider = true; 1922 1923 unlock: 1924 mutex_unlock(&gpd_list_lock); 1925 1926 return ret; 1927 } 1928 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 1929 1930 /** 1931 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 1932 * @np: Device node pointer associated with the PM domain provider. 1933 * @data: Pointer to the data associated with the PM domain provider. 1934 */ 1935 int of_genpd_add_provider_onecell(struct device_node *np, 1936 struct genpd_onecell_data *data) 1937 { 1938 struct generic_pm_domain *genpd; 1939 unsigned int i; 1940 int ret = -EINVAL; 1941 1942 if (!np || !data) 1943 return -EINVAL; 1944 1945 mutex_lock(&gpd_list_lock); 1946 1947 if (!data->xlate) 1948 data->xlate = genpd_xlate_onecell; 1949 1950 for (i = 0; i < data->num_domains; i++) { 1951 genpd = data->domains[i]; 1952 1953 if (!genpd) 1954 continue; 1955 if (!genpd_present(genpd)) 1956 goto error; 1957 1958 genpd->dev.of_node = np; 1959 1960 /* Parse genpd OPP table */ 1961 if (genpd->set_performance_state) { 1962 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 1963 if (ret) { 1964 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", 1965 i, ret); 1966 goto error; 1967 } 1968 } 1969 1970 genpd->provider = &np->fwnode; 1971 genpd->has_provider = true; 1972 } 1973 1974 ret = genpd_add_provider(np, data->xlate, data); 1975 if (ret < 0) 1976 goto error; 1977 1978 mutex_unlock(&gpd_list_lock); 1979 1980 return 0; 1981 1982 error: 1983 while (i--) { 1984 genpd = data->domains[i]; 1985 1986 if (!genpd) 1987 continue; 1988 1989 genpd->provider = NULL; 1990 genpd->has_provider = false; 1991 1992 if (genpd->set_performance_state) 1993 dev_pm_opp_of_remove_table(&genpd->dev); 1994 } 1995 1996 mutex_unlock(&gpd_list_lock); 1997 1998 return ret; 1999 } 2000 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2001 2002 /** 2003 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2004 * @np: Device node pointer associated with the PM domain provider 2005 */ 2006 void of_genpd_del_provider(struct device_node *np) 2007 { 2008 struct of_genpd_provider *cp, *tmp; 2009 struct generic_pm_domain *gpd; 2010 2011 mutex_lock(&gpd_list_lock); 2012 mutex_lock(&of_genpd_mutex); 2013 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2014 if (cp->node == np) { 2015 /* 2016 * For each PM domain associated with the 2017 * provider, set the 'has_provider' to false 2018 * so that the PM domain can be safely removed. 2019 */ 2020 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2021 if (gpd->provider == &np->fwnode) { 2022 gpd->has_provider = false; 2023 2024 if (!gpd->set_performance_state) 2025 continue; 2026 2027 dev_pm_opp_of_remove_table(&gpd->dev); 2028 } 2029 } 2030 2031 list_del(&cp->link); 2032 of_node_put(cp->node); 2033 kfree(cp); 2034 break; 2035 } 2036 } 2037 mutex_unlock(&of_genpd_mutex); 2038 mutex_unlock(&gpd_list_lock); 2039 } 2040 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2041 2042 /** 2043 * genpd_get_from_provider() - Look-up PM domain 2044 * @genpdspec: OF phandle args to use for look-up 2045 * 2046 * Looks for a PM domain provider under the node specified by @genpdspec and if 2047 * found, uses xlate function of the provider to map phandle args to a PM 2048 * domain. 2049 * 2050 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2051 * on failure. 2052 */ 2053 static struct generic_pm_domain *genpd_get_from_provider( 2054 struct of_phandle_args *genpdspec) 2055 { 2056 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2057 struct of_genpd_provider *provider; 2058 2059 if (!genpdspec) 2060 return ERR_PTR(-EINVAL); 2061 2062 mutex_lock(&of_genpd_mutex); 2063 2064 /* Check if we have such a provider in our array */ 2065 list_for_each_entry(provider, &of_genpd_providers, link) { 2066 if (provider->node == genpdspec->np) 2067 genpd = provider->xlate(genpdspec, provider->data); 2068 if (!IS_ERR(genpd)) 2069 break; 2070 } 2071 2072 mutex_unlock(&of_genpd_mutex); 2073 2074 return genpd; 2075 } 2076 2077 /** 2078 * of_genpd_add_device() - Add a device to an I/O PM domain 2079 * @genpdspec: OF phandle args to use for look-up PM domain 2080 * @dev: Device to be added. 2081 * 2082 * Looks-up an I/O PM domain based upon phandle args provided and adds 2083 * the device to the PM domain. Returns a negative error code on failure. 2084 */ 2085 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2086 { 2087 struct generic_pm_domain *genpd; 2088 int ret; 2089 2090 mutex_lock(&gpd_list_lock); 2091 2092 genpd = genpd_get_from_provider(genpdspec); 2093 if (IS_ERR(genpd)) { 2094 ret = PTR_ERR(genpd); 2095 goto out; 2096 } 2097 2098 ret = genpd_add_device(genpd, dev, NULL); 2099 2100 out: 2101 mutex_unlock(&gpd_list_lock); 2102 2103 return ret; 2104 } 2105 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2106 2107 /** 2108 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2109 * @parent_spec: OF phandle args to use for parent PM domain look-up 2110 * @subdomain_spec: OF phandle args to use for subdomain look-up 2111 * 2112 * Looks-up a parent PM domain and subdomain based upon phandle args 2113 * provided and adds the subdomain to the parent PM domain. Returns a 2114 * negative error code on failure. 2115 */ 2116 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2117 struct of_phandle_args *subdomain_spec) 2118 { 2119 struct generic_pm_domain *parent, *subdomain; 2120 int ret; 2121 2122 mutex_lock(&gpd_list_lock); 2123 2124 parent = genpd_get_from_provider(parent_spec); 2125 if (IS_ERR(parent)) { 2126 ret = PTR_ERR(parent); 2127 goto out; 2128 } 2129 2130 subdomain = genpd_get_from_provider(subdomain_spec); 2131 if (IS_ERR(subdomain)) { 2132 ret = PTR_ERR(subdomain); 2133 goto out; 2134 } 2135 2136 ret = genpd_add_subdomain(parent, subdomain); 2137 2138 out: 2139 mutex_unlock(&gpd_list_lock); 2140 2141 return ret; 2142 } 2143 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2144 2145 /** 2146 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2147 * @provider: Pointer to device structure associated with provider 2148 * 2149 * Find the last PM domain that was added by a particular provider and 2150 * remove this PM domain from the list of PM domains. The provider is 2151 * identified by the 'provider' device structure that is passed. The PM 2152 * domain will only be removed, if the provider associated with domain 2153 * has been removed. 2154 * 2155 * Returns a valid pointer to struct generic_pm_domain on success or 2156 * ERR_PTR() on failure. 2157 */ 2158 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2159 { 2160 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2161 int ret; 2162 2163 if (IS_ERR_OR_NULL(np)) 2164 return ERR_PTR(-EINVAL); 2165 2166 mutex_lock(&gpd_list_lock); 2167 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2168 if (gpd->provider == &np->fwnode) { 2169 ret = genpd_remove(gpd); 2170 genpd = ret ? ERR_PTR(ret) : gpd; 2171 break; 2172 } 2173 } 2174 mutex_unlock(&gpd_list_lock); 2175 2176 return genpd; 2177 } 2178 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2179 2180 static void genpd_release_dev(struct device *dev) 2181 { 2182 kfree(dev); 2183 } 2184 2185 static struct bus_type genpd_bus_type = { 2186 .name = "genpd", 2187 }; 2188 2189 /** 2190 * genpd_dev_pm_detach - Detach a device from its PM domain. 2191 * @dev: Device to detach. 2192 * @power_off: Currently not used 2193 * 2194 * Try to locate a corresponding generic PM domain, which the device was 2195 * attached to previously. If such is found, the device is detached from it. 2196 */ 2197 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2198 { 2199 struct generic_pm_domain *pd; 2200 unsigned int i; 2201 int ret = 0; 2202 2203 pd = dev_to_genpd(dev); 2204 if (IS_ERR(pd)) 2205 return; 2206 2207 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2208 2209 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2210 ret = genpd_remove_device(pd, dev); 2211 if (ret != -EAGAIN) 2212 break; 2213 2214 mdelay(i); 2215 cond_resched(); 2216 } 2217 2218 if (ret < 0) { 2219 dev_err(dev, "failed to remove from PM domain %s: %d", 2220 pd->name, ret); 2221 return; 2222 } 2223 2224 /* Check if PM domain can be powered off after removing this device. */ 2225 genpd_queue_power_off_work(pd); 2226 2227 /* Unregister the device if it was created by genpd. */ 2228 if (dev->bus == &genpd_bus_type) 2229 device_unregister(dev); 2230 } 2231 2232 static void genpd_dev_pm_sync(struct device *dev) 2233 { 2234 struct generic_pm_domain *pd; 2235 2236 pd = dev_to_genpd(dev); 2237 if (IS_ERR(pd)) 2238 return; 2239 2240 genpd_queue_power_off_work(pd); 2241 } 2242 2243 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, 2244 unsigned int index, bool power_on) 2245 { 2246 struct of_phandle_args pd_args; 2247 struct generic_pm_domain *pd; 2248 int ret; 2249 2250 ret = of_parse_phandle_with_args(np, "power-domains", 2251 "#power-domain-cells", index, &pd_args); 2252 if (ret < 0) 2253 return ret; 2254 2255 mutex_lock(&gpd_list_lock); 2256 pd = genpd_get_from_provider(&pd_args); 2257 of_node_put(pd_args.np); 2258 if (IS_ERR(pd)) { 2259 mutex_unlock(&gpd_list_lock); 2260 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2261 __func__, PTR_ERR(pd)); 2262 return driver_deferred_probe_check_state(dev); 2263 } 2264 2265 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2266 2267 ret = genpd_add_device(pd, dev, NULL); 2268 mutex_unlock(&gpd_list_lock); 2269 2270 if (ret < 0) { 2271 if (ret != -EPROBE_DEFER) 2272 dev_err(dev, "failed to add to PM domain %s: %d", 2273 pd->name, ret); 2274 return ret; 2275 } 2276 2277 dev->pm_domain->detach = genpd_dev_pm_detach; 2278 dev->pm_domain->sync = genpd_dev_pm_sync; 2279 2280 if (power_on) { 2281 genpd_lock(pd); 2282 ret = genpd_power_on(pd, 0); 2283 genpd_unlock(pd); 2284 } 2285 2286 if (ret) 2287 genpd_remove_device(pd, dev); 2288 2289 return ret ? -EPROBE_DEFER : 1; 2290 } 2291 2292 /** 2293 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2294 * @dev: Device to attach. 2295 * 2296 * Parse device's OF node to find a PM domain specifier. If such is found, 2297 * attaches the device to retrieved pm_domain ops. 2298 * 2299 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2300 * PM domain or when multiple power-domains exists for it, else a negative error 2301 * code. Note that if a power-domain exists for the device, but it cannot be 2302 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2303 * not probed and to re-try again later. 2304 */ 2305 int genpd_dev_pm_attach(struct device *dev) 2306 { 2307 if (!dev->of_node) 2308 return 0; 2309 2310 /* 2311 * Devices with multiple PM domains must be attached separately, as we 2312 * can only attach one PM domain per device. 2313 */ 2314 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2315 "#power-domain-cells") != 1) 2316 return 0; 2317 2318 return __genpd_dev_pm_attach(dev, dev->of_node, 0, true); 2319 } 2320 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2321 2322 /** 2323 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2324 * @dev: The device used to lookup the PM domain. 2325 * @index: The index of the PM domain. 2326 * 2327 * Parse device's OF node to find a PM domain specifier at the provided @index. 2328 * If such is found, creates a virtual device and attaches it to the retrieved 2329 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2330 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2331 * 2332 * Returns the created virtual device if successfully attached PM domain, NULL 2333 * when the device don't need a PM domain, else an ERR_PTR() in case of 2334 * failures. If a power-domain exists for the device, but cannot be found or 2335 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2336 * is not probed and to re-try again later. 2337 */ 2338 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2339 unsigned int index) 2340 { 2341 struct device *genpd_dev; 2342 int num_domains; 2343 int ret; 2344 2345 if (!dev->of_node) 2346 return NULL; 2347 2348 /* Deal only with devices using multiple PM domains. */ 2349 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2350 "#power-domain-cells"); 2351 if (num_domains < 2 || index >= num_domains) 2352 return NULL; 2353 2354 /* Allocate and register device on the genpd bus. */ 2355 genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); 2356 if (!genpd_dev) 2357 return ERR_PTR(-ENOMEM); 2358 2359 dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); 2360 genpd_dev->bus = &genpd_bus_type; 2361 genpd_dev->release = genpd_release_dev; 2362 2363 ret = device_register(genpd_dev); 2364 if (ret) { 2365 kfree(genpd_dev); 2366 return ERR_PTR(ret); 2367 } 2368 2369 /* Try to attach the device to the PM domain at the specified index. */ 2370 ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); 2371 if (ret < 1) { 2372 device_unregister(genpd_dev); 2373 return ret ? ERR_PTR(ret) : NULL; 2374 } 2375 2376 pm_runtime_enable(genpd_dev); 2377 genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); 2378 2379 return genpd_dev; 2380 } 2381 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 2382 2383 /** 2384 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 2385 * @dev: The device used to lookup the PM domain. 2386 * @name: The name of the PM domain. 2387 * 2388 * Parse device's OF node to find a PM domain specifier using the 2389 * power-domain-names DT property. For further description see 2390 * genpd_dev_pm_attach_by_id(). 2391 */ 2392 struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) 2393 { 2394 int index; 2395 2396 if (!dev->of_node) 2397 return NULL; 2398 2399 index = of_property_match_string(dev->of_node, "power-domain-names", 2400 name); 2401 if (index < 0) 2402 return NULL; 2403 2404 return genpd_dev_pm_attach_by_id(dev, index); 2405 } 2406 2407 static const struct of_device_id idle_state_match[] = { 2408 { .compatible = "domain-idle-state", }, 2409 { } 2410 }; 2411 2412 static int genpd_parse_state(struct genpd_power_state *genpd_state, 2413 struct device_node *state_node) 2414 { 2415 int err; 2416 u32 residency; 2417 u32 entry_latency, exit_latency; 2418 2419 err = of_property_read_u32(state_node, "entry-latency-us", 2420 &entry_latency); 2421 if (err) { 2422 pr_debug(" * %pOF missing entry-latency-us property\n", 2423 state_node); 2424 return -EINVAL; 2425 } 2426 2427 err = of_property_read_u32(state_node, "exit-latency-us", 2428 &exit_latency); 2429 if (err) { 2430 pr_debug(" * %pOF missing exit-latency-us property\n", 2431 state_node); 2432 return -EINVAL; 2433 } 2434 2435 err = of_property_read_u32(state_node, "min-residency-us", &residency); 2436 if (!err) 2437 genpd_state->residency_ns = 1000 * residency; 2438 2439 genpd_state->power_on_latency_ns = 1000 * exit_latency; 2440 genpd_state->power_off_latency_ns = 1000 * entry_latency; 2441 genpd_state->fwnode = &state_node->fwnode; 2442 2443 return 0; 2444 } 2445 2446 static int genpd_iterate_idle_states(struct device_node *dn, 2447 struct genpd_power_state *states) 2448 { 2449 int ret; 2450 struct of_phandle_iterator it; 2451 struct device_node *np; 2452 int i = 0; 2453 2454 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 2455 if (ret <= 0) 2456 return ret; 2457 2458 /* Loop over the phandles until all the requested entry is found */ 2459 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 2460 np = it.node; 2461 if (!of_match_node(idle_state_match, np)) 2462 continue; 2463 if (states) { 2464 ret = genpd_parse_state(&states[i], np); 2465 if (ret) { 2466 pr_err("Parsing idle state node %pOF failed with err %d\n", 2467 np, ret); 2468 of_node_put(np); 2469 return ret; 2470 } 2471 } 2472 i++; 2473 } 2474 2475 return i; 2476 } 2477 2478 /** 2479 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 2480 * 2481 * @dn: The genpd device node 2482 * @states: The pointer to which the state array will be saved. 2483 * @n: The count of elements in the array returned from this function. 2484 * 2485 * Returns the device states parsed from the OF node. The memory for the states 2486 * is allocated by this function and is the responsibility of the caller to 2487 * free the memory after use. If any or zero compatible domain idle states is 2488 * found it returns 0 and in case of errors, a negative error code is returned. 2489 */ 2490 int of_genpd_parse_idle_states(struct device_node *dn, 2491 struct genpd_power_state **states, int *n) 2492 { 2493 struct genpd_power_state *st; 2494 int ret; 2495 2496 ret = genpd_iterate_idle_states(dn, NULL); 2497 if (ret < 0) 2498 return ret; 2499 2500 if (!ret) { 2501 *states = NULL; 2502 *n = 0; 2503 return 0; 2504 } 2505 2506 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 2507 if (!st) 2508 return -ENOMEM; 2509 2510 ret = genpd_iterate_idle_states(dn, st); 2511 if (ret <= 0) { 2512 kfree(st); 2513 return ret < 0 ? ret : -EINVAL; 2514 } 2515 2516 *states = st; 2517 *n = ret; 2518 2519 return 0; 2520 } 2521 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 2522 2523 /** 2524 * of_genpd_opp_to_performance_state- Gets performance state of device's 2525 * power domain corresponding to a DT node's "required-opps" property. 2526 * 2527 * @dev: Device for which the performance-state needs to be found. 2528 * @np: DT node where the "required-opps" property is present. This can be 2529 * the device node itself (if it doesn't have an OPP table) or a node 2530 * within the OPP table of a device (if device has an OPP table). 2531 * 2532 * Returns performance state corresponding to the "required-opps" property of 2533 * a DT node. This calls platform specific genpd->opp_to_performance_state() 2534 * callback to translate power domain OPP to performance state. 2535 * 2536 * Returns performance state on success and 0 on failure. 2537 */ 2538 unsigned int of_genpd_opp_to_performance_state(struct device *dev, 2539 struct device_node *np) 2540 { 2541 struct generic_pm_domain *genpd; 2542 struct dev_pm_opp *opp; 2543 int state = 0; 2544 2545 genpd = dev_to_genpd(dev); 2546 if (IS_ERR(genpd)) 2547 return 0; 2548 2549 if (unlikely(!genpd->set_performance_state)) 2550 return 0; 2551 2552 genpd_lock(genpd); 2553 2554 opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); 2555 if (IS_ERR(opp)) { 2556 dev_err(dev, "Failed to find required OPP: %ld\n", 2557 PTR_ERR(opp)); 2558 goto unlock; 2559 } 2560 2561 state = genpd->opp_to_performance_state(genpd, opp); 2562 dev_pm_opp_put(opp); 2563 2564 unlock: 2565 genpd_unlock(genpd); 2566 2567 return state; 2568 } 2569 EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); 2570 2571 static int __init genpd_bus_init(void) 2572 { 2573 return bus_register(&genpd_bus_type); 2574 } 2575 core_initcall(genpd_bus_init); 2576 2577 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2578 2579 2580 /*** debugfs support ***/ 2581 2582 #ifdef CONFIG_DEBUG_FS 2583 #include <linux/pm.h> 2584 #include <linux/device.h> 2585 #include <linux/debugfs.h> 2586 #include <linux/seq_file.h> 2587 #include <linux/init.h> 2588 #include <linux/kobject.h> 2589 static struct dentry *genpd_debugfs_dir; 2590 2591 /* 2592 * TODO: This function is a slightly modified version of rtpm_status_show 2593 * from sysfs.c, so generalize it. 2594 */ 2595 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2596 { 2597 static const char * const status_lookup[] = { 2598 [RPM_ACTIVE] = "active", 2599 [RPM_RESUMING] = "resuming", 2600 [RPM_SUSPENDED] = "suspended", 2601 [RPM_SUSPENDING] = "suspending" 2602 }; 2603 const char *p = ""; 2604 2605 if (dev->power.runtime_error) 2606 p = "error"; 2607 else if (dev->power.disable_depth) 2608 p = "unsupported"; 2609 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 2610 p = status_lookup[dev->power.runtime_status]; 2611 else 2612 WARN_ON(1); 2613 2614 seq_puts(s, p); 2615 } 2616 2617 static int genpd_summary_one(struct seq_file *s, 2618 struct generic_pm_domain *genpd) 2619 { 2620 static const char * const status_lookup[] = { 2621 [GPD_STATE_ACTIVE] = "on", 2622 [GPD_STATE_POWER_OFF] = "off" 2623 }; 2624 struct pm_domain_data *pm_data; 2625 const char *kobj_path; 2626 struct gpd_link *link; 2627 char state[16]; 2628 int ret; 2629 2630 ret = genpd_lock_interruptible(genpd); 2631 if (ret) 2632 return -ERESTARTSYS; 2633 2634 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 2635 goto exit; 2636 if (!genpd_status_on(genpd)) 2637 snprintf(state, sizeof(state), "%s-%u", 2638 status_lookup[genpd->status], genpd->state_idx); 2639 else 2640 snprintf(state, sizeof(state), "%s", 2641 status_lookup[genpd->status]); 2642 seq_printf(s, "%-30s %-15s ", genpd->name, state); 2643 2644 /* 2645 * Modifications on the list require holding locks on both 2646 * master and slave, so we are safe. 2647 * Also genpd->name is immutable. 2648 */ 2649 list_for_each_entry(link, &genpd->master_links, master_node) { 2650 seq_printf(s, "%s", link->slave->name); 2651 if (!list_is_last(&link->master_node, &genpd->master_links)) 2652 seq_puts(s, ", "); 2653 } 2654 2655 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 2656 kobj_path = kobject_get_path(&pm_data->dev->kobj, 2657 genpd_is_irq_safe(genpd) ? 2658 GFP_ATOMIC : GFP_KERNEL); 2659 if (kobj_path == NULL) 2660 continue; 2661 2662 seq_printf(s, "\n %-50s ", kobj_path); 2663 rtpm_status_str(s, pm_data->dev); 2664 kfree(kobj_path); 2665 } 2666 2667 seq_puts(s, "\n"); 2668 exit: 2669 genpd_unlock(genpd); 2670 2671 return 0; 2672 } 2673 2674 static int genpd_summary_show(struct seq_file *s, void *data) 2675 { 2676 struct generic_pm_domain *genpd; 2677 int ret = 0; 2678 2679 seq_puts(s, "domain status slaves\n"); 2680 seq_puts(s, " /device runtime status\n"); 2681 seq_puts(s, "----------------------------------------------------------------------\n"); 2682 2683 ret = mutex_lock_interruptible(&gpd_list_lock); 2684 if (ret) 2685 return -ERESTARTSYS; 2686 2687 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 2688 ret = genpd_summary_one(s, genpd); 2689 if (ret) 2690 break; 2691 } 2692 mutex_unlock(&gpd_list_lock); 2693 2694 return ret; 2695 } 2696 2697 static int genpd_status_show(struct seq_file *s, void *data) 2698 { 2699 static const char * const status_lookup[] = { 2700 [GPD_STATE_ACTIVE] = "on", 2701 [GPD_STATE_POWER_OFF] = "off" 2702 }; 2703 2704 struct generic_pm_domain *genpd = s->private; 2705 int ret = 0; 2706 2707 ret = genpd_lock_interruptible(genpd); 2708 if (ret) 2709 return -ERESTARTSYS; 2710 2711 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 2712 goto exit; 2713 2714 if (genpd->status == GPD_STATE_POWER_OFF) 2715 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 2716 genpd->state_idx); 2717 else 2718 seq_printf(s, "%s\n", status_lookup[genpd->status]); 2719 exit: 2720 genpd_unlock(genpd); 2721 return ret; 2722 } 2723 2724 static int genpd_sub_domains_show(struct seq_file *s, void *data) 2725 { 2726 struct generic_pm_domain *genpd = s->private; 2727 struct gpd_link *link; 2728 int ret = 0; 2729 2730 ret = genpd_lock_interruptible(genpd); 2731 if (ret) 2732 return -ERESTARTSYS; 2733 2734 list_for_each_entry(link, &genpd->master_links, master_node) 2735 seq_printf(s, "%s\n", link->slave->name); 2736 2737 genpd_unlock(genpd); 2738 return ret; 2739 } 2740 2741 static int genpd_idle_states_show(struct seq_file *s, void *data) 2742 { 2743 struct generic_pm_domain *genpd = s->private; 2744 unsigned int i; 2745 int ret = 0; 2746 2747 ret = genpd_lock_interruptible(genpd); 2748 if (ret) 2749 return -ERESTARTSYS; 2750 2751 seq_puts(s, "State Time Spent(ms)\n"); 2752 2753 for (i = 0; i < genpd->state_count; i++) { 2754 ktime_t delta = 0; 2755 s64 msecs; 2756 2757 if ((genpd->status == GPD_STATE_POWER_OFF) && 2758 (genpd->state_idx == i)) 2759 delta = ktime_sub(ktime_get(), genpd->accounting_time); 2760 2761 msecs = ktime_to_ms( 2762 ktime_add(genpd->states[i].idle_time, delta)); 2763 seq_printf(s, "S%-13i %lld\n", i, msecs); 2764 } 2765 2766 genpd_unlock(genpd); 2767 return ret; 2768 } 2769 2770 static int genpd_active_time_show(struct seq_file *s, void *data) 2771 { 2772 struct generic_pm_domain *genpd = s->private; 2773 ktime_t delta = 0; 2774 int ret = 0; 2775 2776 ret = genpd_lock_interruptible(genpd); 2777 if (ret) 2778 return -ERESTARTSYS; 2779 2780 if (genpd->status == GPD_STATE_ACTIVE) 2781 delta = ktime_sub(ktime_get(), genpd->accounting_time); 2782 2783 seq_printf(s, "%lld ms\n", ktime_to_ms( 2784 ktime_add(genpd->on_time, delta))); 2785 2786 genpd_unlock(genpd); 2787 return ret; 2788 } 2789 2790 static int genpd_total_idle_time_show(struct seq_file *s, void *data) 2791 { 2792 struct generic_pm_domain *genpd = s->private; 2793 ktime_t delta = 0, total = 0; 2794 unsigned int i; 2795 int ret = 0; 2796 2797 ret = genpd_lock_interruptible(genpd); 2798 if (ret) 2799 return -ERESTARTSYS; 2800 2801 for (i = 0; i < genpd->state_count; i++) { 2802 2803 if ((genpd->status == GPD_STATE_POWER_OFF) && 2804 (genpd->state_idx == i)) 2805 delta = ktime_sub(ktime_get(), genpd->accounting_time); 2806 2807 total = ktime_add(total, genpd->states[i].idle_time); 2808 } 2809 total = ktime_add(total, delta); 2810 2811 seq_printf(s, "%lld ms\n", ktime_to_ms(total)); 2812 2813 genpd_unlock(genpd); 2814 return ret; 2815 } 2816 2817 2818 static int genpd_devices_show(struct seq_file *s, void *data) 2819 { 2820 struct generic_pm_domain *genpd = s->private; 2821 struct pm_domain_data *pm_data; 2822 const char *kobj_path; 2823 int ret = 0; 2824 2825 ret = genpd_lock_interruptible(genpd); 2826 if (ret) 2827 return -ERESTARTSYS; 2828 2829 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 2830 kobj_path = kobject_get_path(&pm_data->dev->kobj, 2831 genpd_is_irq_safe(genpd) ? 2832 GFP_ATOMIC : GFP_KERNEL); 2833 if (kobj_path == NULL) 2834 continue; 2835 2836 seq_printf(s, "%s\n", kobj_path); 2837 kfree(kobj_path); 2838 } 2839 2840 genpd_unlock(genpd); 2841 return ret; 2842 } 2843 2844 static int genpd_perf_state_show(struct seq_file *s, void *data) 2845 { 2846 struct generic_pm_domain *genpd = s->private; 2847 2848 if (genpd_lock_interruptible(genpd)) 2849 return -ERESTARTSYS; 2850 2851 seq_printf(s, "%u\n", genpd->performance_state); 2852 2853 genpd_unlock(genpd); 2854 return 0; 2855 } 2856 2857 #define define_genpd_open_function(name) \ 2858 static int genpd_##name##_open(struct inode *inode, struct file *file) \ 2859 { \ 2860 return single_open(file, genpd_##name##_show, inode->i_private); \ 2861 } 2862 2863 define_genpd_open_function(summary); 2864 define_genpd_open_function(status); 2865 define_genpd_open_function(sub_domains); 2866 define_genpd_open_function(idle_states); 2867 define_genpd_open_function(active_time); 2868 define_genpd_open_function(total_idle_time); 2869 define_genpd_open_function(devices); 2870 define_genpd_open_function(perf_state); 2871 2872 #define define_genpd_debugfs_fops(name) \ 2873 static const struct file_operations genpd_##name##_fops = { \ 2874 .open = genpd_##name##_open, \ 2875 .read = seq_read, \ 2876 .llseek = seq_lseek, \ 2877 .release = single_release, \ 2878 } 2879 2880 define_genpd_debugfs_fops(summary); 2881 define_genpd_debugfs_fops(status); 2882 define_genpd_debugfs_fops(sub_domains); 2883 define_genpd_debugfs_fops(idle_states); 2884 define_genpd_debugfs_fops(active_time); 2885 define_genpd_debugfs_fops(total_idle_time); 2886 define_genpd_debugfs_fops(devices); 2887 define_genpd_debugfs_fops(perf_state); 2888 2889 static int __init genpd_debug_init(void) 2890 { 2891 struct dentry *d; 2892 struct generic_pm_domain *genpd; 2893 2894 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 2895 2896 if (!genpd_debugfs_dir) 2897 return -ENOMEM; 2898 2899 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 2900 genpd_debugfs_dir, NULL, &genpd_summary_fops); 2901 if (!d) 2902 return -ENOMEM; 2903 2904 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 2905 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 2906 if (!d) 2907 return -ENOMEM; 2908 2909 debugfs_create_file("current_state", 0444, 2910 d, genpd, &genpd_status_fops); 2911 debugfs_create_file("sub_domains", 0444, 2912 d, genpd, &genpd_sub_domains_fops); 2913 debugfs_create_file("idle_states", 0444, 2914 d, genpd, &genpd_idle_states_fops); 2915 debugfs_create_file("active_time", 0444, 2916 d, genpd, &genpd_active_time_fops); 2917 debugfs_create_file("total_idle_time", 0444, 2918 d, genpd, &genpd_total_idle_time_fops); 2919 debugfs_create_file("devices", 0444, 2920 d, genpd, &genpd_devices_fops); 2921 if (genpd->set_performance_state) 2922 debugfs_create_file("perf_state", 0444, 2923 d, genpd, &genpd_perf_state_fops); 2924 } 2925 2926 return 0; 2927 } 2928 late_initcall(genpd_debug_init); 2929 2930 static void __exit genpd_debug_exit(void) 2931 { 2932 debugfs_remove_recursive(genpd_debugfs_dir); 2933 } 2934 __exitcall(genpd_debug_exit); 2935 #endif /* CONFIG_DEBUG_FS */ 2936