1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 #include <linux/cpu.h> 24 #include <linux/debugfs.h> 25 26 #include "power.h" 27 28 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 29 30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 31 ({ \ 32 type (*__routine)(struct device *__d); \ 33 type __ret = (type)0; \ 34 \ 35 __routine = genpd->dev_ops.callback; \ 36 if (__routine) { \ 37 __ret = __routine(dev); \ 38 } \ 39 __ret; \ 40 }) 41 42 static LIST_HEAD(gpd_list); 43 static DEFINE_MUTEX(gpd_list_lock); 44 45 struct genpd_lock_ops { 46 void (*lock)(struct generic_pm_domain *genpd); 47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 48 int (*lock_interruptible)(struct generic_pm_domain *genpd); 49 void (*unlock)(struct generic_pm_domain *genpd); 50 }; 51 52 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 53 { 54 mutex_lock(&genpd->mlock); 55 } 56 57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 58 int depth) 59 { 60 mutex_lock_nested(&genpd->mlock, depth); 61 } 62 63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 64 { 65 return mutex_lock_interruptible(&genpd->mlock); 66 } 67 68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 69 { 70 return mutex_unlock(&genpd->mlock); 71 } 72 73 static const struct genpd_lock_ops genpd_mtx_ops = { 74 .lock = genpd_lock_mtx, 75 .lock_nested = genpd_lock_nested_mtx, 76 .lock_interruptible = genpd_lock_interruptible_mtx, 77 .unlock = genpd_unlock_mtx, 78 }; 79 80 static void genpd_lock_spin(struct generic_pm_domain *genpd) 81 __acquires(&genpd->slock) 82 { 83 unsigned long flags; 84 85 spin_lock_irqsave(&genpd->slock, flags); 86 genpd->lock_flags = flags; 87 } 88 89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 90 int depth) 91 __acquires(&genpd->slock) 92 { 93 unsigned long flags; 94 95 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 96 genpd->lock_flags = flags; 97 } 98 99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 100 __acquires(&genpd->slock) 101 { 102 unsigned long flags; 103 104 spin_lock_irqsave(&genpd->slock, flags); 105 genpd->lock_flags = flags; 106 return 0; 107 } 108 109 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 110 __releases(&genpd->slock) 111 { 112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 113 } 114 115 static const struct genpd_lock_ops genpd_spin_ops = { 116 .lock = genpd_lock_spin, 117 .lock_nested = genpd_lock_nested_spin, 118 .lock_interruptible = genpd_lock_interruptible_spin, 119 .unlock = genpd_unlock_spin, 120 }; 121 122 #define genpd_lock(p) p->lock_ops->lock(p) 123 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 125 #define genpd_unlock(p) p->lock_ops->unlock(p) 126 127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 133 134 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, 135 const struct generic_pm_domain *genpd) 136 { 137 bool ret; 138 139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 140 141 /* 142 * Warn once if an IRQ safe device is attached to a no sleep domain, as 143 * to indicate a suboptimal configuration for PM. For an always on 144 * domain this isn't case, thus don't warn. 145 */ 146 if (ret && !genpd_is_always_on(genpd)) 147 dev_warn_once(dev, "PM domain %s will not be powered off\n", 148 genpd->name); 149 150 return ret; 151 } 152 153 static int genpd_runtime_suspend(struct device *dev); 154 155 /* 156 * Get the generic PM domain for a particular struct device. 157 * This validates the struct device pointer, the PM domain pointer, 158 * and checks that the PM domain pointer is a real generic PM domain. 159 * Any failure results in NULL being returned. 160 */ 161 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 162 { 163 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 164 return NULL; 165 166 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 167 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 168 return pd_to_genpd(dev->pm_domain); 169 170 return NULL; 171 } 172 173 /* 174 * This should only be used where we are certain that the pm_domain 175 * attached to the device is a genpd domain. 176 */ 177 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 178 { 179 if (IS_ERR_OR_NULL(dev->pm_domain)) 180 return ERR_PTR(-EINVAL); 181 182 return pd_to_genpd(dev->pm_domain); 183 } 184 185 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 186 struct device *dev) 187 { 188 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 189 } 190 191 static int genpd_start_dev(const struct generic_pm_domain *genpd, 192 struct device *dev) 193 { 194 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 195 } 196 197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 198 { 199 bool ret = false; 200 201 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 202 ret = !!atomic_dec_and_test(&genpd->sd_count); 203 204 return ret; 205 } 206 207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 208 { 209 atomic_inc(&genpd->sd_count); 210 smp_mb__after_atomic(); 211 } 212 213 #ifdef CONFIG_DEBUG_FS 214 static struct dentry *genpd_debugfs_dir; 215 216 static void genpd_debug_add(struct generic_pm_domain *genpd); 217 218 static void genpd_debug_remove(struct generic_pm_domain *genpd) 219 { 220 struct dentry *d; 221 222 d = debugfs_lookup(genpd->name, genpd_debugfs_dir); 223 debugfs_remove(d); 224 } 225 226 static void genpd_update_accounting(struct generic_pm_domain *genpd) 227 { 228 ktime_t delta, now; 229 230 now = ktime_get(); 231 delta = ktime_sub(now, genpd->accounting_time); 232 233 /* 234 * If genpd->status is active, it means we are just 235 * out of off and so update the idle time and vice 236 * versa. 237 */ 238 if (genpd->status == GENPD_STATE_ON) { 239 int state_idx = genpd->state_idx; 240 241 genpd->states[state_idx].idle_time = 242 ktime_add(genpd->states[state_idx].idle_time, delta); 243 } else { 244 genpd->on_time = ktime_add(genpd->on_time, delta); 245 } 246 247 genpd->accounting_time = now; 248 } 249 #else 250 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 251 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 252 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 253 #endif 254 255 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 256 unsigned int state) 257 { 258 struct generic_pm_domain_data *pd_data; 259 struct pm_domain_data *pdd; 260 struct gpd_link *link; 261 262 /* New requested state is same as Max requested state */ 263 if (state == genpd->performance_state) 264 return state; 265 266 /* New requested state is higher than Max requested state */ 267 if (state > genpd->performance_state) 268 return state; 269 270 /* Traverse all devices within the domain */ 271 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 272 pd_data = to_gpd_data(pdd); 273 274 if (pd_data->performance_state > state) 275 state = pd_data->performance_state; 276 } 277 278 /* 279 * Traverse all sub-domains within the domain. This can be 280 * done without any additional locking as the link->performance_state 281 * field is protected by the parent genpd->lock, which is already taken. 282 * 283 * Also note that link->performance_state (subdomain's performance state 284 * requirement to parent domain) is different from 285 * link->child->performance_state (current performance state requirement 286 * of the devices/sub-domains of the subdomain) and so can have a 287 * different value. 288 * 289 * Note that we also take vote from powered-off sub-domains into account 290 * as the same is done for devices right now. 291 */ 292 list_for_each_entry(link, &genpd->parent_links, parent_node) { 293 if (link->performance_state > state) 294 state = link->performance_state; 295 } 296 297 return state; 298 } 299 300 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 301 struct generic_pm_domain *parent, 302 unsigned int pstate) 303 { 304 if (!parent->set_performance_state) 305 return pstate; 306 307 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 308 parent->opp_table, 309 pstate); 310 } 311 312 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 313 unsigned int state, int depth) 314 { 315 struct generic_pm_domain *parent; 316 struct gpd_link *link; 317 int parent_state, ret; 318 319 if (state == genpd->performance_state) 320 return 0; 321 322 /* Propagate to parents of genpd */ 323 list_for_each_entry(link, &genpd->child_links, child_node) { 324 parent = link->parent; 325 326 /* Find parent's performance state */ 327 ret = genpd_xlate_performance_state(genpd, parent, state); 328 if (unlikely(ret < 0)) 329 goto err; 330 331 parent_state = ret; 332 333 genpd_lock_nested(parent, depth + 1); 334 335 link->prev_performance_state = link->performance_state; 336 link->performance_state = parent_state; 337 parent_state = _genpd_reeval_performance_state(parent, 338 parent_state); 339 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 340 if (ret) 341 link->performance_state = link->prev_performance_state; 342 343 genpd_unlock(parent); 344 345 if (ret) 346 goto err; 347 } 348 349 if (genpd->set_performance_state) { 350 ret = genpd->set_performance_state(genpd, state); 351 if (ret) 352 goto err; 353 } 354 355 genpd->performance_state = state; 356 return 0; 357 358 err: 359 /* Encountered an error, lets rollback */ 360 list_for_each_entry_continue_reverse(link, &genpd->child_links, 361 child_node) { 362 parent = link->parent; 363 364 genpd_lock_nested(parent, depth + 1); 365 366 parent_state = link->prev_performance_state; 367 link->performance_state = parent_state; 368 369 parent_state = _genpd_reeval_performance_state(parent, 370 parent_state); 371 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 372 pr_err("%s: Failed to roll back to %d performance state\n", 373 parent->name, parent_state); 374 } 375 376 genpd_unlock(parent); 377 } 378 379 return ret; 380 } 381 382 static int genpd_set_performance_state(struct device *dev, unsigned int state) 383 { 384 struct generic_pm_domain *genpd = dev_to_genpd(dev); 385 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 386 unsigned int prev_state; 387 int ret; 388 389 prev_state = gpd_data->performance_state; 390 if (prev_state == state) 391 return 0; 392 393 gpd_data->performance_state = state; 394 state = _genpd_reeval_performance_state(genpd, state); 395 396 ret = _genpd_set_performance_state(genpd, state, 0); 397 if (ret) 398 gpd_data->performance_state = prev_state; 399 400 return ret; 401 } 402 403 static int genpd_drop_performance_state(struct device *dev) 404 { 405 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 406 407 if (!genpd_set_performance_state(dev, 0)) 408 return prev_state; 409 410 return 0; 411 } 412 413 static void genpd_restore_performance_state(struct device *dev, 414 unsigned int state) 415 { 416 if (state) 417 genpd_set_performance_state(dev, state); 418 } 419 420 /** 421 * dev_pm_genpd_set_performance_state- Set performance state of device's power 422 * domain. 423 * 424 * @dev: Device for which the performance-state needs to be set. 425 * @state: Target performance state of the device. This can be set as 0 when the 426 * device doesn't have any performance state constraints left (And so 427 * the device wouldn't participate anymore to find the target 428 * performance state of the genpd). 429 * 430 * It is assumed that the users guarantee that the genpd wouldn't be detached 431 * while this routine is getting called. 432 * 433 * Returns 0 on success and negative error values on failures. 434 */ 435 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 436 { 437 struct generic_pm_domain *genpd; 438 int ret = 0; 439 440 genpd = dev_to_genpd_safe(dev); 441 if (!genpd) 442 return -ENODEV; 443 444 if (WARN_ON(!dev->power.subsys_data || 445 !dev->power.subsys_data->domain_data)) 446 return -EINVAL; 447 448 genpd_lock(genpd); 449 if (pm_runtime_suspended(dev)) { 450 dev_gpd_data(dev)->rpm_pstate = state; 451 } else { 452 ret = genpd_set_performance_state(dev, state); 453 if (!ret) 454 dev_gpd_data(dev)->rpm_pstate = 0; 455 } 456 genpd_unlock(genpd); 457 458 return ret; 459 } 460 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 461 462 /** 463 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 464 * 465 * @dev: Device to handle 466 * @next: impending interrupt/wakeup for the device 467 * 468 * 469 * Allow devices to inform of the next wakeup. It's assumed that the users 470 * guarantee that the genpd wouldn't be detached while this routine is getting 471 * called. Additionally, it's also assumed that @dev isn't runtime suspended 472 * (RPM_SUSPENDED)." 473 * Although devices are expected to update the next_wakeup after the end of 474 * their usecase as well, it is possible the devices themselves may not know 475 * about that, so stale @next will be ignored when powering off the domain. 476 */ 477 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 478 { 479 struct generic_pm_domain_data *gpd_data; 480 struct generic_pm_domain *genpd; 481 482 genpd = dev_to_genpd_safe(dev); 483 if (!genpd) 484 return; 485 486 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 487 gpd_data->next_wakeup = next; 488 } 489 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 490 491 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 492 { 493 unsigned int state_idx = genpd->state_idx; 494 ktime_t time_start; 495 s64 elapsed_ns; 496 int ret; 497 498 /* Notify consumers that we are about to power on. */ 499 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 500 GENPD_NOTIFY_PRE_ON, 501 GENPD_NOTIFY_OFF, NULL); 502 ret = notifier_to_errno(ret); 503 if (ret) 504 return ret; 505 506 if (!genpd->power_on) 507 goto out; 508 509 if (!timed) { 510 ret = genpd->power_on(genpd); 511 if (ret) 512 goto err; 513 514 goto out; 515 } 516 517 time_start = ktime_get(); 518 ret = genpd->power_on(genpd); 519 if (ret) 520 goto err; 521 522 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 523 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 524 goto out; 525 526 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 527 genpd->max_off_time_changed = true; 528 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 529 genpd->name, "on", elapsed_ns); 530 531 out: 532 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 533 return 0; 534 err: 535 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 536 NULL); 537 return ret; 538 } 539 540 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 541 { 542 unsigned int state_idx = genpd->state_idx; 543 ktime_t time_start; 544 s64 elapsed_ns; 545 int ret; 546 547 /* Notify consumers that we are about to power off. */ 548 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 549 GENPD_NOTIFY_PRE_OFF, 550 GENPD_NOTIFY_ON, NULL); 551 ret = notifier_to_errno(ret); 552 if (ret) 553 return ret; 554 555 if (!genpd->power_off) 556 goto out; 557 558 if (!timed) { 559 ret = genpd->power_off(genpd); 560 if (ret) 561 goto busy; 562 563 goto out; 564 } 565 566 time_start = ktime_get(); 567 ret = genpd->power_off(genpd); 568 if (ret) 569 goto busy; 570 571 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 572 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 573 goto out; 574 575 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 576 genpd->max_off_time_changed = true; 577 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 578 genpd->name, "off", elapsed_ns); 579 580 out: 581 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 582 NULL); 583 return 0; 584 busy: 585 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 586 return ret; 587 } 588 589 /** 590 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 591 * @genpd: PM domain to power off. 592 * 593 * Queue up the execution of genpd_power_off() unless it's already been done 594 * before. 595 */ 596 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 597 { 598 queue_work(pm_wq, &genpd->power_off_work); 599 } 600 601 /** 602 * genpd_power_off - Remove power from a given PM domain. 603 * @genpd: PM domain to power down. 604 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 605 * RPM status of the releated device is in an intermediate state, not yet turned 606 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 607 * be RPM_SUSPENDED, while it tries to power off the PM domain. 608 * @depth: nesting count for lockdep. 609 * 610 * If all of the @genpd's devices have been suspended and all of its subdomains 611 * have been powered down, remove power from @genpd. 612 */ 613 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 614 unsigned int depth) 615 { 616 struct pm_domain_data *pdd; 617 struct gpd_link *link; 618 unsigned int not_suspended = 0; 619 int ret; 620 621 /* 622 * Do not try to power off the domain in the following situations: 623 * (1) The domain is already in the "power off" state. 624 * (2) System suspend is in progress. 625 */ 626 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 627 return 0; 628 629 /* 630 * Abort power off for the PM domain in the following situations: 631 * (1) The domain is configured as always on. 632 * (2) When the domain has a subdomain being powered on. 633 */ 634 if (genpd_is_always_on(genpd) || 635 genpd_is_rpm_always_on(genpd) || 636 atomic_read(&genpd->sd_count) > 0) 637 return -EBUSY; 638 639 /* 640 * The children must be in their deepest (powered-off) states to allow 641 * the parent to be powered off. Note that, there's no need for 642 * additional locking, as powering on a child, requires the parent's 643 * lock to be acquired first. 644 */ 645 list_for_each_entry(link, &genpd->parent_links, parent_node) { 646 struct generic_pm_domain *child = link->child; 647 if (child->state_idx < child->state_count - 1) 648 return -EBUSY; 649 } 650 651 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 652 enum pm_qos_flags_status stat; 653 654 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF); 655 if (stat > PM_QOS_FLAGS_NONE) 656 return -EBUSY; 657 658 /* 659 * Do not allow PM domain to be powered off, when an IRQ safe 660 * device is part of a non-IRQ safe domain. 661 */ 662 if (!pm_runtime_suspended(pdd->dev) || 663 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) 664 not_suspended++; 665 } 666 667 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 668 return -EBUSY; 669 670 if (genpd->gov && genpd->gov->power_down_ok) { 671 if (!genpd->gov->power_down_ok(&genpd->domain)) 672 return -EAGAIN; 673 } 674 675 /* Default to shallowest state. */ 676 if (!genpd->gov) 677 genpd->state_idx = 0; 678 679 /* Don't power off, if a child domain is waiting to power on. */ 680 if (atomic_read(&genpd->sd_count) > 0) 681 return -EBUSY; 682 683 ret = _genpd_power_off(genpd, true); 684 if (ret) { 685 genpd->states[genpd->state_idx].rejected++; 686 return ret; 687 } 688 689 genpd->status = GENPD_STATE_OFF; 690 genpd_update_accounting(genpd); 691 genpd->states[genpd->state_idx].usage++; 692 693 list_for_each_entry(link, &genpd->child_links, child_node) { 694 genpd_sd_counter_dec(link->parent); 695 genpd_lock_nested(link->parent, depth + 1); 696 genpd_power_off(link->parent, false, depth + 1); 697 genpd_unlock(link->parent); 698 } 699 700 return 0; 701 } 702 703 /** 704 * genpd_power_on - Restore power to a given PM domain and its parents. 705 * @genpd: PM domain to power up. 706 * @depth: nesting count for lockdep. 707 * 708 * Restore power to @genpd and all of its parents so that it is possible to 709 * resume a device belonging to it. 710 */ 711 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 712 { 713 struct gpd_link *link; 714 int ret = 0; 715 716 if (genpd_status_on(genpd)) 717 return 0; 718 719 /* 720 * The list is guaranteed not to change while the loop below is being 721 * executed, unless one of the parents' .power_on() callbacks fiddles 722 * with it. 723 */ 724 list_for_each_entry(link, &genpd->child_links, child_node) { 725 struct generic_pm_domain *parent = link->parent; 726 727 genpd_sd_counter_inc(parent); 728 729 genpd_lock_nested(parent, depth + 1); 730 ret = genpd_power_on(parent, depth + 1); 731 genpd_unlock(parent); 732 733 if (ret) { 734 genpd_sd_counter_dec(parent); 735 goto err; 736 } 737 } 738 739 ret = _genpd_power_on(genpd, true); 740 if (ret) 741 goto err; 742 743 genpd->status = GENPD_STATE_ON; 744 genpd_update_accounting(genpd); 745 746 return 0; 747 748 err: 749 list_for_each_entry_continue_reverse(link, 750 &genpd->child_links, 751 child_node) { 752 genpd_sd_counter_dec(link->parent); 753 genpd_lock_nested(link->parent, depth + 1); 754 genpd_power_off(link->parent, false, depth + 1); 755 genpd_unlock(link->parent); 756 } 757 758 return ret; 759 } 760 761 static int genpd_dev_pm_start(struct device *dev) 762 { 763 struct generic_pm_domain *genpd = dev_to_genpd(dev); 764 765 return genpd_start_dev(genpd, dev); 766 } 767 768 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 769 unsigned long val, void *ptr) 770 { 771 struct generic_pm_domain_data *gpd_data; 772 struct device *dev; 773 774 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 775 dev = gpd_data->base.dev; 776 777 for (;;) { 778 struct generic_pm_domain *genpd; 779 struct pm_domain_data *pdd; 780 781 spin_lock_irq(&dev->power.lock); 782 783 pdd = dev->power.subsys_data ? 784 dev->power.subsys_data->domain_data : NULL; 785 if (pdd) { 786 to_gpd_data(pdd)->td.constraint_changed = true; 787 genpd = dev_to_genpd(dev); 788 } else { 789 genpd = ERR_PTR(-ENODATA); 790 } 791 792 spin_unlock_irq(&dev->power.lock); 793 794 if (!IS_ERR(genpd)) { 795 genpd_lock(genpd); 796 genpd->max_off_time_changed = true; 797 genpd_unlock(genpd); 798 } 799 800 dev = dev->parent; 801 if (!dev || dev->power.ignore_children) 802 break; 803 } 804 805 return NOTIFY_DONE; 806 } 807 808 /** 809 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 810 * @work: Work structure used for scheduling the execution of this function. 811 */ 812 static void genpd_power_off_work_fn(struct work_struct *work) 813 { 814 struct generic_pm_domain *genpd; 815 816 genpd = container_of(work, struct generic_pm_domain, power_off_work); 817 818 genpd_lock(genpd); 819 genpd_power_off(genpd, false, 0); 820 genpd_unlock(genpd); 821 } 822 823 /** 824 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 825 * @dev: Device to handle. 826 */ 827 static int __genpd_runtime_suspend(struct device *dev) 828 { 829 int (*cb)(struct device *__dev); 830 831 if (dev->type && dev->type->pm) 832 cb = dev->type->pm->runtime_suspend; 833 else if (dev->class && dev->class->pm) 834 cb = dev->class->pm->runtime_suspend; 835 else if (dev->bus && dev->bus->pm) 836 cb = dev->bus->pm->runtime_suspend; 837 else 838 cb = NULL; 839 840 if (!cb && dev->driver && dev->driver->pm) 841 cb = dev->driver->pm->runtime_suspend; 842 843 return cb ? cb(dev) : 0; 844 } 845 846 /** 847 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 848 * @dev: Device to handle. 849 */ 850 static int __genpd_runtime_resume(struct device *dev) 851 { 852 int (*cb)(struct device *__dev); 853 854 if (dev->type && dev->type->pm) 855 cb = dev->type->pm->runtime_resume; 856 else if (dev->class && dev->class->pm) 857 cb = dev->class->pm->runtime_resume; 858 else if (dev->bus && dev->bus->pm) 859 cb = dev->bus->pm->runtime_resume; 860 else 861 cb = NULL; 862 863 if (!cb && dev->driver && dev->driver->pm) 864 cb = dev->driver->pm->runtime_resume; 865 866 return cb ? cb(dev) : 0; 867 } 868 869 /** 870 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 871 * @dev: Device to suspend. 872 * 873 * Carry out a runtime suspend of a device under the assumption that its 874 * pm_domain field points to the domain member of an object of type 875 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 876 */ 877 static int genpd_runtime_suspend(struct device *dev) 878 { 879 struct generic_pm_domain *genpd; 880 bool (*suspend_ok)(struct device *__dev); 881 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 882 struct gpd_timing_data *td = &gpd_data->td; 883 bool runtime_pm = pm_runtime_enabled(dev); 884 ktime_t time_start; 885 s64 elapsed_ns; 886 int ret; 887 888 dev_dbg(dev, "%s()\n", __func__); 889 890 genpd = dev_to_genpd(dev); 891 if (IS_ERR(genpd)) 892 return -EINVAL; 893 894 /* 895 * A runtime PM centric subsystem/driver may re-use the runtime PM 896 * callbacks for other purposes than runtime PM. In those scenarios 897 * runtime PM is disabled. Under these circumstances, we shall skip 898 * validating/measuring the PM QoS latency. 899 */ 900 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 901 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 902 return -EBUSY; 903 904 /* Measure suspend latency. */ 905 time_start = 0; 906 if (runtime_pm) 907 time_start = ktime_get(); 908 909 ret = __genpd_runtime_suspend(dev); 910 if (ret) 911 return ret; 912 913 ret = genpd_stop_dev(genpd, dev); 914 if (ret) { 915 __genpd_runtime_resume(dev); 916 return ret; 917 } 918 919 /* Update suspend latency value if the measured time exceeds it. */ 920 if (runtime_pm) { 921 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 922 if (elapsed_ns > td->suspend_latency_ns) { 923 td->suspend_latency_ns = elapsed_ns; 924 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 925 elapsed_ns); 926 genpd->max_off_time_changed = true; 927 td->constraint_changed = true; 928 } 929 } 930 931 /* 932 * If power.irq_safe is set, this routine may be run with 933 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 934 */ 935 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) 936 return 0; 937 938 genpd_lock(genpd); 939 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 940 genpd_power_off(genpd, true, 0); 941 genpd_unlock(genpd); 942 943 return 0; 944 } 945 946 /** 947 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 948 * @dev: Device to resume. 949 * 950 * Carry out a runtime resume of a device under the assumption that its 951 * pm_domain field points to the domain member of an object of type 952 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 953 */ 954 static int genpd_runtime_resume(struct device *dev) 955 { 956 struct generic_pm_domain *genpd; 957 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 958 struct gpd_timing_data *td = &gpd_data->td; 959 bool runtime_pm = pm_runtime_enabled(dev); 960 ktime_t time_start; 961 s64 elapsed_ns; 962 int ret; 963 bool timed = true; 964 965 dev_dbg(dev, "%s()\n", __func__); 966 967 genpd = dev_to_genpd(dev); 968 if (IS_ERR(genpd)) 969 return -EINVAL; 970 971 /* 972 * As we don't power off a non IRQ safe domain, which holds 973 * an IRQ safe device, we don't need to restore power to it. 974 */ 975 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { 976 timed = false; 977 goto out; 978 } 979 980 genpd_lock(genpd); 981 ret = genpd_power_on(genpd, 0); 982 if (!ret) 983 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 984 genpd_unlock(genpd); 985 986 if (ret) 987 return ret; 988 989 out: 990 /* Measure resume latency. */ 991 time_start = 0; 992 if (timed && runtime_pm) 993 time_start = ktime_get(); 994 995 ret = genpd_start_dev(genpd, dev); 996 if (ret) 997 goto err_poweroff; 998 999 ret = __genpd_runtime_resume(dev); 1000 if (ret) 1001 goto err_stop; 1002 1003 /* Update resume latency value if the measured time exceeds it. */ 1004 if (timed && runtime_pm) { 1005 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1006 if (elapsed_ns > td->resume_latency_ns) { 1007 td->resume_latency_ns = elapsed_ns; 1008 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1009 elapsed_ns); 1010 genpd->max_off_time_changed = true; 1011 td->constraint_changed = true; 1012 } 1013 } 1014 1015 return 0; 1016 1017 err_stop: 1018 genpd_stop_dev(genpd, dev); 1019 err_poweroff: 1020 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1021 genpd_lock(genpd); 1022 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1023 genpd_power_off(genpd, true, 0); 1024 genpd_unlock(genpd); 1025 } 1026 1027 return ret; 1028 } 1029 1030 static bool pd_ignore_unused; 1031 static int __init pd_ignore_unused_setup(char *__unused) 1032 { 1033 pd_ignore_unused = true; 1034 return 1; 1035 } 1036 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1037 1038 /** 1039 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1040 */ 1041 static int __init genpd_power_off_unused(void) 1042 { 1043 struct generic_pm_domain *genpd; 1044 1045 if (pd_ignore_unused) { 1046 pr_warn("genpd: Not disabling unused power domains\n"); 1047 return 0; 1048 } 1049 1050 mutex_lock(&gpd_list_lock); 1051 1052 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1053 genpd_queue_power_off_work(genpd); 1054 1055 mutex_unlock(&gpd_list_lock); 1056 1057 return 0; 1058 } 1059 late_initcall(genpd_power_off_unused); 1060 1061 #ifdef CONFIG_PM_SLEEP 1062 1063 /** 1064 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1065 * @genpd: PM domain to power off, if possible. 1066 * @use_lock: use the lock. 1067 * @depth: nesting count for lockdep. 1068 * 1069 * Check if the given PM domain can be powered off (during system suspend or 1070 * hibernation) and do that if so. Also, in that case propagate to its parents. 1071 * 1072 * This function is only called in "noirq" and "syscore" stages of system power 1073 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1074 * these cases the lock must be held. 1075 */ 1076 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1077 unsigned int depth) 1078 { 1079 struct gpd_link *link; 1080 1081 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1082 return; 1083 1084 if (genpd->suspended_count != genpd->device_count 1085 || atomic_read(&genpd->sd_count) > 0) 1086 return; 1087 1088 /* Check that the children are in their deepest (powered-off) state. */ 1089 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1090 struct generic_pm_domain *child = link->child; 1091 if (child->state_idx < child->state_count - 1) 1092 return; 1093 } 1094 1095 /* Choose the deepest state when suspending */ 1096 genpd->state_idx = genpd->state_count - 1; 1097 if (_genpd_power_off(genpd, false)) 1098 return; 1099 1100 genpd->status = GENPD_STATE_OFF; 1101 1102 list_for_each_entry(link, &genpd->child_links, child_node) { 1103 genpd_sd_counter_dec(link->parent); 1104 1105 if (use_lock) 1106 genpd_lock_nested(link->parent, depth + 1); 1107 1108 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1109 1110 if (use_lock) 1111 genpd_unlock(link->parent); 1112 } 1113 } 1114 1115 /** 1116 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1117 * @genpd: PM domain to power on. 1118 * @use_lock: use the lock. 1119 * @depth: nesting count for lockdep. 1120 * 1121 * This function is only called in "noirq" and "syscore" stages of system power 1122 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1123 * these cases the lock must be held. 1124 */ 1125 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1126 unsigned int depth) 1127 { 1128 struct gpd_link *link; 1129 1130 if (genpd_status_on(genpd)) 1131 return; 1132 1133 list_for_each_entry(link, &genpd->child_links, child_node) { 1134 genpd_sd_counter_inc(link->parent); 1135 1136 if (use_lock) 1137 genpd_lock_nested(link->parent, depth + 1); 1138 1139 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1140 1141 if (use_lock) 1142 genpd_unlock(link->parent); 1143 } 1144 1145 _genpd_power_on(genpd, false); 1146 genpd->status = GENPD_STATE_ON; 1147 } 1148 1149 /** 1150 * genpd_prepare - Start power transition of a device in a PM domain. 1151 * @dev: Device to start the transition of. 1152 * 1153 * Start a power transition of a device (during a system-wide power transition) 1154 * under the assumption that its pm_domain field points to the domain member of 1155 * an object of type struct generic_pm_domain representing a PM domain 1156 * consisting of I/O devices. 1157 */ 1158 static int genpd_prepare(struct device *dev) 1159 { 1160 struct generic_pm_domain *genpd; 1161 int ret; 1162 1163 dev_dbg(dev, "%s()\n", __func__); 1164 1165 genpd = dev_to_genpd(dev); 1166 if (IS_ERR(genpd)) 1167 return -EINVAL; 1168 1169 genpd_lock(genpd); 1170 1171 if (genpd->prepared_count++ == 0) 1172 genpd->suspended_count = 0; 1173 1174 genpd_unlock(genpd); 1175 1176 ret = pm_generic_prepare(dev); 1177 if (ret < 0) { 1178 genpd_lock(genpd); 1179 1180 genpd->prepared_count--; 1181 1182 genpd_unlock(genpd); 1183 } 1184 1185 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1186 return ret >= 0 ? 0 : ret; 1187 } 1188 1189 /** 1190 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1191 * I/O pm domain. 1192 * @dev: Device to suspend. 1193 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. 1194 * 1195 * Stop the device and remove power from the domain if all devices in it have 1196 * been stopped. 1197 */ 1198 static int genpd_finish_suspend(struct device *dev, bool poweroff) 1199 { 1200 struct generic_pm_domain *genpd; 1201 int ret = 0; 1202 1203 genpd = dev_to_genpd(dev); 1204 if (IS_ERR(genpd)) 1205 return -EINVAL; 1206 1207 if (poweroff) 1208 ret = pm_generic_poweroff_noirq(dev); 1209 else 1210 ret = pm_generic_suspend_noirq(dev); 1211 if (ret) 1212 return ret; 1213 1214 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1215 return 0; 1216 1217 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1218 !pm_runtime_status_suspended(dev)) { 1219 ret = genpd_stop_dev(genpd, dev); 1220 if (ret) { 1221 if (poweroff) 1222 pm_generic_restore_noirq(dev); 1223 else 1224 pm_generic_resume_noirq(dev); 1225 return ret; 1226 } 1227 } 1228 1229 genpd_lock(genpd); 1230 genpd->suspended_count++; 1231 genpd_sync_power_off(genpd, true, 0); 1232 genpd_unlock(genpd); 1233 1234 return 0; 1235 } 1236 1237 /** 1238 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1239 * @dev: Device to suspend. 1240 * 1241 * Stop the device and remove power from the domain if all devices in it have 1242 * been stopped. 1243 */ 1244 static int genpd_suspend_noirq(struct device *dev) 1245 { 1246 dev_dbg(dev, "%s()\n", __func__); 1247 1248 return genpd_finish_suspend(dev, false); 1249 } 1250 1251 /** 1252 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1253 * @dev: Device to resume. 1254 * 1255 * Restore power to the device's PM domain, if necessary, and start the device. 1256 */ 1257 static int genpd_resume_noirq(struct device *dev) 1258 { 1259 struct generic_pm_domain *genpd; 1260 int ret; 1261 1262 dev_dbg(dev, "%s()\n", __func__); 1263 1264 genpd = dev_to_genpd(dev); 1265 if (IS_ERR(genpd)) 1266 return -EINVAL; 1267 1268 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1269 return pm_generic_resume_noirq(dev); 1270 1271 genpd_lock(genpd); 1272 genpd_sync_power_on(genpd, true, 0); 1273 genpd->suspended_count--; 1274 genpd_unlock(genpd); 1275 1276 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1277 !pm_runtime_status_suspended(dev)) { 1278 ret = genpd_start_dev(genpd, dev); 1279 if (ret) 1280 return ret; 1281 } 1282 1283 return pm_generic_resume_noirq(dev); 1284 } 1285 1286 /** 1287 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1288 * @dev: Device to freeze. 1289 * 1290 * Carry out a late freeze of a device under the assumption that its 1291 * pm_domain field points to the domain member of an object of type 1292 * struct generic_pm_domain representing a power domain consisting of I/O 1293 * devices. 1294 */ 1295 static int genpd_freeze_noirq(struct device *dev) 1296 { 1297 const struct generic_pm_domain *genpd; 1298 int ret = 0; 1299 1300 dev_dbg(dev, "%s()\n", __func__); 1301 1302 genpd = dev_to_genpd(dev); 1303 if (IS_ERR(genpd)) 1304 return -EINVAL; 1305 1306 ret = pm_generic_freeze_noirq(dev); 1307 if (ret) 1308 return ret; 1309 1310 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1311 !pm_runtime_status_suspended(dev)) 1312 ret = genpd_stop_dev(genpd, dev); 1313 1314 return ret; 1315 } 1316 1317 /** 1318 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1319 * @dev: Device to thaw. 1320 * 1321 * Start the device, unless power has been removed from the domain already 1322 * before the system transition. 1323 */ 1324 static int genpd_thaw_noirq(struct device *dev) 1325 { 1326 const struct generic_pm_domain *genpd; 1327 int ret = 0; 1328 1329 dev_dbg(dev, "%s()\n", __func__); 1330 1331 genpd = dev_to_genpd(dev); 1332 if (IS_ERR(genpd)) 1333 return -EINVAL; 1334 1335 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1336 !pm_runtime_status_suspended(dev)) { 1337 ret = genpd_start_dev(genpd, dev); 1338 if (ret) 1339 return ret; 1340 } 1341 1342 return pm_generic_thaw_noirq(dev); 1343 } 1344 1345 /** 1346 * genpd_poweroff_noirq - Completion of hibernation of device in an 1347 * I/O PM domain. 1348 * @dev: Device to poweroff. 1349 * 1350 * Stop the device and remove power from the domain if all devices in it have 1351 * been stopped. 1352 */ 1353 static int genpd_poweroff_noirq(struct device *dev) 1354 { 1355 dev_dbg(dev, "%s()\n", __func__); 1356 1357 return genpd_finish_suspend(dev, true); 1358 } 1359 1360 /** 1361 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1362 * @dev: Device to resume. 1363 * 1364 * Make sure the domain will be in the same power state as before the 1365 * hibernation the system is resuming from and start the device if necessary. 1366 */ 1367 static int genpd_restore_noirq(struct device *dev) 1368 { 1369 struct generic_pm_domain *genpd; 1370 int ret = 0; 1371 1372 dev_dbg(dev, "%s()\n", __func__); 1373 1374 genpd = dev_to_genpd(dev); 1375 if (IS_ERR(genpd)) 1376 return -EINVAL; 1377 1378 /* 1379 * At this point suspended_count == 0 means we are being run for the 1380 * first time for the given domain in the present cycle. 1381 */ 1382 genpd_lock(genpd); 1383 if (genpd->suspended_count++ == 0) { 1384 /* 1385 * The boot kernel might put the domain into arbitrary state, 1386 * so make it appear as powered off to genpd_sync_power_on(), 1387 * so that it tries to power it on in case it was really off. 1388 */ 1389 genpd->status = GENPD_STATE_OFF; 1390 } 1391 1392 genpd_sync_power_on(genpd, true, 0); 1393 genpd_unlock(genpd); 1394 1395 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1396 !pm_runtime_status_suspended(dev)) { 1397 ret = genpd_start_dev(genpd, dev); 1398 if (ret) 1399 return ret; 1400 } 1401 1402 return pm_generic_restore_noirq(dev); 1403 } 1404 1405 /** 1406 * genpd_complete - Complete power transition of a device in a power domain. 1407 * @dev: Device to complete the transition of. 1408 * 1409 * Complete a power transition of a device (during a system-wide power 1410 * transition) under the assumption that its pm_domain field points to the 1411 * domain member of an object of type struct generic_pm_domain representing 1412 * a power domain consisting of I/O devices. 1413 */ 1414 static void genpd_complete(struct device *dev) 1415 { 1416 struct generic_pm_domain *genpd; 1417 1418 dev_dbg(dev, "%s()\n", __func__); 1419 1420 genpd = dev_to_genpd(dev); 1421 if (IS_ERR(genpd)) 1422 return; 1423 1424 pm_generic_complete(dev); 1425 1426 genpd_lock(genpd); 1427 1428 genpd->prepared_count--; 1429 if (!genpd->prepared_count) 1430 genpd_queue_power_off_work(genpd); 1431 1432 genpd_unlock(genpd); 1433 } 1434 1435 static void genpd_switch_state(struct device *dev, bool suspend) 1436 { 1437 struct generic_pm_domain *genpd; 1438 bool use_lock; 1439 1440 genpd = dev_to_genpd_safe(dev); 1441 if (!genpd) 1442 return; 1443 1444 use_lock = genpd_is_irq_safe(genpd); 1445 1446 if (use_lock) 1447 genpd_lock(genpd); 1448 1449 if (suspend) { 1450 genpd->suspended_count++; 1451 genpd_sync_power_off(genpd, use_lock, 0); 1452 } else { 1453 genpd_sync_power_on(genpd, use_lock, 0); 1454 genpd->suspended_count--; 1455 } 1456 1457 if (use_lock) 1458 genpd_unlock(genpd); 1459 } 1460 1461 /** 1462 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1463 * @dev: The device that is attached to the genpd, that can be suspended. 1464 * 1465 * This routine should typically be called for a device that needs to be 1466 * suspended during the syscore suspend phase. It may also be called during 1467 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1468 * genpd. 1469 */ 1470 void dev_pm_genpd_suspend(struct device *dev) 1471 { 1472 genpd_switch_state(dev, true); 1473 } 1474 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1475 1476 /** 1477 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1478 * @dev: The device that is attached to the genpd, which needs to be resumed. 1479 * 1480 * This routine should typically be called for a device that needs to be resumed 1481 * during the syscore resume phase. It may also be called during suspend-to-idle 1482 * to resume a corresponding CPU device that is attached to a genpd. 1483 */ 1484 void dev_pm_genpd_resume(struct device *dev) 1485 { 1486 genpd_switch_state(dev, false); 1487 } 1488 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1489 1490 #else /* !CONFIG_PM_SLEEP */ 1491 1492 #define genpd_prepare NULL 1493 #define genpd_suspend_noirq NULL 1494 #define genpd_resume_noirq NULL 1495 #define genpd_freeze_noirq NULL 1496 #define genpd_thaw_noirq NULL 1497 #define genpd_poweroff_noirq NULL 1498 #define genpd_restore_noirq NULL 1499 #define genpd_complete NULL 1500 1501 #endif /* CONFIG_PM_SLEEP */ 1502 1503 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) 1504 { 1505 struct generic_pm_domain_data *gpd_data; 1506 int ret; 1507 1508 ret = dev_pm_get_subsys_data(dev); 1509 if (ret) 1510 return ERR_PTR(ret); 1511 1512 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1513 if (!gpd_data) { 1514 ret = -ENOMEM; 1515 goto err_put; 1516 } 1517 1518 gpd_data->base.dev = dev; 1519 gpd_data->td.constraint_changed = true; 1520 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1521 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1522 gpd_data->next_wakeup = KTIME_MAX; 1523 1524 spin_lock_irq(&dev->power.lock); 1525 1526 if (dev->power.subsys_data->domain_data) { 1527 ret = -EINVAL; 1528 goto err_free; 1529 } 1530 1531 dev->power.subsys_data->domain_data = &gpd_data->base; 1532 1533 spin_unlock_irq(&dev->power.lock); 1534 1535 return gpd_data; 1536 1537 err_free: 1538 spin_unlock_irq(&dev->power.lock); 1539 kfree(gpd_data); 1540 err_put: 1541 dev_pm_put_subsys_data(dev); 1542 return ERR_PTR(ret); 1543 } 1544 1545 static void genpd_free_dev_data(struct device *dev, 1546 struct generic_pm_domain_data *gpd_data) 1547 { 1548 spin_lock_irq(&dev->power.lock); 1549 1550 dev->power.subsys_data->domain_data = NULL; 1551 1552 spin_unlock_irq(&dev->power.lock); 1553 1554 kfree(gpd_data); 1555 dev_pm_put_subsys_data(dev); 1556 } 1557 1558 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1559 int cpu, bool set, unsigned int depth) 1560 { 1561 struct gpd_link *link; 1562 1563 if (!genpd_is_cpu_domain(genpd)) 1564 return; 1565 1566 list_for_each_entry(link, &genpd->child_links, child_node) { 1567 struct generic_pm_domain *parent = link->parent; 1568 1569 genpd_lock_nested(parent, depth + 1); 1570 genpd_update_cpumask(parent, cpu, set, depth + 1); 1571 genpd_unlock(parent); 1572 } 1573 1574 if (set) 1575 cpumask_set_cpu(cpu, genpd->cpus); 1576 else 1577 cpumask_clear_cpu(cpu, genpd->cpus); 1578 } 1579 1580 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1581 { 1582 if (cpu >= 0) 1583 genpd_update_cpumask(genpd, cpu, true, 0); 1584 } 1585 1586 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1587 { 1588 if (cpu >= 0) 1589 genpd_update_cpumask(genpd, cpu, false, 0); 1590 } 1591 1592 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1593 { 1594 int cpu; 1595 1596 if (!genpd_is_cpu_domain(genpd)) 1597 return -1; 1598 1599 for_each_possible_cpu(cpu) { 1600 if (get_cpu_device(cpu) == dev) 1601 return cpu; 1602 } 1603 1604 return -1; 1605 } 1606 1607 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1608 struct device *base_dev) 1609 { 1610 struct generic_pm_domain_data *gpd_data; 1611 int ret; 1612 1613 dev_dbg(dev, "%s()\n", __func__); 1614 1615 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1616 return -EINVAL; 1617 1618 gpd_data = genpd_alloc_dev_data(dev); 1619 if (IS_ERR(gpd_data)) 1620 return PTR_ERR(gpd_data); 1621 1622 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1623 1624 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1625 if (ret) 1626 goto out; 1627 1628 genpd_lock(genpd); 1629 1630 genpd_set_cpumask(genpd, gpd_data->cpu); 1631 dev_pm_domain_set(dev, &genpd->domain); 1632 1633 genpd->device_count++; 1634 genpd->max_off_time_changed = true; 1635 1636 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1637 1638 genpd_unlock(genpd); 1639 out: 1640 if (ret) 1641 genpd_free_dev_data(dev, gpd_data); 1642 else 1643 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1644 DEV_PM_QOS_RESUME_LATENCY); 1645 1646 return ret; 1647 } 1648 1649 /** 1650 * pm_genpd_add_device - Add a device to an I/O PM domain. 1651 * @genpd: PM domain to add the device to. 1652 * @dev: Device to be added. 1653 */ 1654 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1655 { 1656 int ret; 1657 1658 mutex_lock(&gpd_list_lock); 1659 ret = genpd_add_device(genpd, dev, dev); 1660 mutex_unlock(&gpd_list_lock); 1661 1662 return ret; 1663 } 1664 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1665 1666 static int genpd_remove_device(struct generic_pm_domain *genpd, 1667 struct device *dev) 1668 { 1669 struct generic_pm_domain_data *gpd_data; 1670 struct pm_domain_data *pdd; 1671 int ret = 0; 1672 1673 dev_dbg(dev, "%s()\n", __func__); 1674 1675 pdd = dev->power.subsys_data->domain_data; 1676 gpd_data = to_gpd_data(pdd); 1677 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1678 DEV_PM_QOS_RESUME_LATENCY); 1679 1680 genpd_lock(genpd); 1681 1682 if (genpd->prepared_count > 0) { 1683 ret = -EAGAIN; 1684 goto out; 1685 } 1686 1687 genpd->device_count--; 1688 genpd->max_off_time_changed = true; 1689 1690 genpd_clear_cpumask(genpd, gpd_data->cpu); 1691 dev_pm_domain_set(dev, NULL); 1692 1693 list_del_init(&pdd->list_node); 1694 1695 genpd_unlock(genpd); 1696 1697 if (genpd->detach_dev) 1698 genpd->detach_dev(genpd, dev); 1699 1700 genpd_free_dev_data(dev, gpd_data); 1701 1702 return 0; 1703 1704 out: 1705 genpd_unlock(genpd); 1706 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1707 1708 return ret; 1709 } 1710 1711 /** 1712 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1713 * @dev: Device to be removed. 1714 */ 1715 int pm_genpd_remove_device(struct device *dev) 1716 { 1717 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1718 1719 if (!genpd) 1720 return -EINVAL; 1721 1722 return genpd_remove_device(genpd, dev); 1723 } 1724 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1725 1726 /** 1727 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1728 * 1729 * @dev: Device that should be associated with the notifier 1730 * @nb: The notifier block to register 1731 * 1732 * Users may call this function to add a genpd power on/off notifier for an 1733 * attached @dev. Only one notifier per device is allowed. The notifier is 1734 * sent when genpd is powering on/off the PM domain. 1735 * 1736 * It is assumed that the user guarantee that the genpd wouldn't be detached 1737 * while this routine is getting called. 1738 * 1739 * Returns 0 on success and negative error values on failures. 1740 */ 1741 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1742 { 1743 struct generic_pm_domain *genpd; 1744 struct generic_pm_domain_data *gpd_data; 1745 int ret; 1746 1747 genpd = dev_to_genpd_safe(dev); 1748 if (!genpd) 1749 return -ENODEV; 1750 1751 if (WARN_ON(!dev->power.subsys_data || 1752 !dev->power.subsys_data->domain_data)) 1753 return -EINVAL; 1754 1755 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1756 if (gpd_data->power_nb) 1757 return -EEXIST; 1758 1759 genpd_lock(genpd); 1760 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1761 genpd_unlock(genpd); 1762 1763 if (ret) { 1764 dev_warn(dev, "failed to add notifier for PM domain %s\n", 1765 genpd->name); 1766 return ret; 1767 } 1768 1769 gpd_data->power_nb = nb; 1770 return 0; 1771 } 1772 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1773 1774 /** 1775 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1776 * 1777 * @dev: Device that is associated with the notifier 1778 * 1779 * Users may call this function to remove a genpd power on/off notifier for an 1780 * attached @dev. 1781 * 1782 * It is assumed that the user guarantee that the genpd wouldn't be detached 1783 * while this routine is getting called. 1784 * 1785 * Returns 0 on success and negative error values on failures. 1786 */ 1787 int dev_pm_genpd_remove_notifier(struct device *dev) 1788 { 1789 struct generic_pm_domain *genpd; 1790 struct generic_pm_domain_data *gpd_data; 1791 int ret; 1792 1793 genpd = dev_to_genpd_safe(dev); 1794 if (!genpd) 1795 return -ENODEV; 1796 1797 if (WARN_ON(!dev->power.subsys_data || 1798 !dev->power.subsys_data->domain_data)) 1799 return -EINVAL; 1800 1801 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1802 if (!gpd_data->power_nb) 1803 return -ENODEV; 1804 1805 genpd_lock(genpd); 1806 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1807 gpd_data->power_nb); 1808 genpd_unlock(genpd); 1809 1810 if (ret) { 1811 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1812 genpd->name); 1813 return ret; 1814 } 1815 1816 gpd_data->power_nb = NULL; 1817 return 0; 1818 } 1819 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1820 1821 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1822 struct generic_pm_domain *subdomain) 1823 { 1824 struct gpd_link *link, *itr; 1825 int ret = 0; 1826 1827 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1828 || genpd == subdomain) 1829 return -EINVAL; 1830 1831 /* 1832 * If the domain can be powered on/off in an IRQ safe 1833 * context, ensure that the subdomain can also be 1834 * powered on/off in that context. 1835 */ 1836 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1837 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1838 genpd->name, subdomain->name); 1839 return -EINVAL; 1840 } 1841 1842 link = kzalloc(sizeof(*link), GFP_KERNEL); 1843 if (!link) 1844 return -ENOMEM; 1845 1846 genpd_lock(subdomain); 1847 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1848 1849 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1850 ret = -EINVAL; 1851 goto out; 1852 } 1853 1854 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1855 if (itr->child == subdomain && itr->parent == genpd) { 1856 ret = -EINVAL; 1857 goto out; 1858 } 1859 } 1860 1861 link->parent = genpd; 1862 list_add_tail(&link->parent_node, &genpd->parent_links); 1863 link->child = subdomain; 1864 list_add_tail(&link->child_node, &subdomain->child_links); 1865 if (genpd_status_on(subdomain)) 1866 genpd_sd_counter_inc(genpd); 1867 1868 out: 1869 genpd_unlock(genpd); 1870 genpd_unlock(subdomain); 1871 if (ret) 1872 kfree(link); 1873 return ret; 1874 } 1875 1876 /** 1877 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1878 * @genpd: Leader PM domain to add the subdomain to. 1879 * @subdomain: Subdomain to be added. 1880 */ 1881 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1882 struct generic_pm_domain *subdomain) 1883 { 1884 int ret; 1885 1886 mutex_lock(&gpd_list_lock); 1887 ret = genpd_add_subdomain(genpd, subdomain); 1888 mutex_unlock(&gpd_list_lock); 1889 1890 return ret; 1891 } 1892 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1893 1894 /** 1895 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1896 * @genpd: Leader PM domain to remove the subdomain from. 1897 * @subdomain: Subdomain to be removed. 1898 */ 1899 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1900 struct generic_pm_domain *subdomain) 1901 { 1902 struct gpd_link *l, *link; 1903 int ret = -EINVAL; 1904 1905 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1906 return -EINVAL; 1907 1908 genpd_lock(subdomain); 1909 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1910 1911 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 1912 pr_warn("%s: unable to remove subdomain %s\n", 1913 genpd->name, subdomain->name); 1914 ret = -EBUSY; 1915 goto out; 1916 } 1917 1918 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 1919 if (link->child != subdomain) 1920 continue; 1921 1922 list_del(&link->parent_node); 1923 list_del(&link->child_node); 1924 kfree(link); 1925 if (genpd_status_on(subdomain)) 1926 genpd_sd_counter_dec(genpd); 1927 1928 ret = 0; 1929 break; 1930 } 1931 1932 out: 1933 genpd_unlock(genpd); 1934 genpd_unlock(subdomain); 1935 1936 return ret; 1937 } 1938 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1939 1940 static void genpd_free_default_power_state(struct genpd_power_state *states, 1941 unsigned int state_count) 1942 { 1943 kfree(states); 1944 } 1945 1946 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1947 { 1948 struct genpd_power_state *state; 1949 1950 state = kzalloc(sizeof(*state), GFP_KERNEL); 1951 if (!state) 1952 return -ENOMEM; 1953 1954 genpd->states = state; 1955 genpd->state_count = 1; 1956 genpd->free_states = genpd_free_default_power_state; 1957 1958 return 0; 1959 } 1960 1961 static void genpd_lock_init(struct generic_pm_domain *genpd) 1962 { 1963 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { 1964 spin_lock_init(&genpd->slock); 1965 genpd->lock_ops = &genpd_spin_ops; 1966 } else { 1967 mutex_init(&genpd->mlock); 1968 genpd->lock_ops = &genpd_mtx_ops; 1969 } 1970 } 1971 1972 /** 1973 * pm_genpd_init - Initialize a generic I/O PM domain object. 1974 * @genpd: PM domain object to initialize. 1975 * @gov: PM domain governor to associate with the domain (may be NULL). 1976 * @is_off: Initial value of the domain's power_is_off field. 1977 * 1978 * Returns 0 on successful initialization, else a negative error code. 1979 */ 1980 int pm_genpd_init(struct generic_pm_domain *genpd, 1981 struct dev_power_governor *gov, bool is_off) 1982 { 1983 int ret; 1984 1985 if (IS_ERR_OR_NULL(genpd)) 1986 return -EINVAL; 1987 1988 INIT_LIST_HEAD(&genpd->parent_links); 1989 INIT_LIST_HEAD(&genpd->child_links); 1990 INIT_LIST_HEAD(&genpd->dev_list); 1991 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 1992 genpd_lock_init(genpd); 1993 genpd->gov = gov; 1994 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1995 atomic_set(&genpd->sd_count, 0); 1996 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 1997 genpd->device_count = 0; 1998 genpd->max_off_time_ns = -1; 1999 genpd->max_off_time_changed = true; 2000 genpd->provider = NULL; 2001 genpd->has_provider = false; 2002 genpd->accounting_time = ktime_get(); 2003 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2004 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2005 genpd->domain.ops.prepare = genpd_prepare; 2006 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2007 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2008 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2009 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2010 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2011 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2012 genpd->domain.ops.complete = genpd_complete; 2013 genpd->domain.start = genpd_dev_pm_start; 2014 2015 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2016 genpd->dev_ops.stop = pm_clk_suspend; 2017 genpd->dev_ops.start = pm_clk_resume; 2018 } 2019 2020 /* Always-on domains must be powered on at initialization. */ 2021 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2022 !genpd_status_on(genpd)) 2023 return -EINVAL; 2024 2025 if (genpd_is_cpu_domain(genpd) && 2026 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 2027 return -ENOMEM; 2028 2029 /* Use only one "off" state if there were no states declared */ 2030 if (genpd->state_count == 0) { 2031 ret = genpd_set_default_power_state(genpd); 2032 if (ret) { 2033 if (genpd_is_cpu_domain(genpd)) 2034 free_cpumask_var(genpd->cpus); 2035 return ret; 2036 } 2037 } else if (!gov && genpd->state_count > 1) { 2038 pr_warn("%s: no governor for states\n", genpd->name); 2039 } 2040 2041 device_initialize(&genpd->dev); 2042 dev_set_name(&genpd->dev, "%s", genpd->name); 2043 2044 mutex_lock(&gpd_list_lock); 2045 list_add(&genpd->gpd_list_node, &gpd_list); 2046 mutex_unlock(&gpd_list_lock); 2047 genpd_debug_add(genpd); 2048 2049 return 0; 2050 } 2051 EXPORT_SYMBOL_GPL(pm_genpd_init); 2052 2053 static int genpd_remove(struct generic_pm_domain *genpd) 2054 { 2055 struct gpd_link *l, *link; 2056 2057 if (IS_ERR_OR_NULL(genpd)) 2058 return -EINVAL; 2059 2060 genpd_lock(genpd); 2061 2062 if (genpd->has_provider) { 2063 genpd_unlock(genpd); 2064 pr_err("Provider present, unable to remove %s\n", genpd->name); 2065 return -EBUSY; 2066 } 2067 2068 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2069 genpd_unlock(genpd); 2070 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2071 return -EBUSY; 2072 } 2073 2074 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2075 list_del(&link->parent_node); 2076 list_del(&link->child_node); 2077 kfree(link); 2078 } 2079 2080 list_del(&genpd->gpd_list_node); 2081 genpd_unlock(genpd); 2082 genpd_debug_remove(genpd); 2083 cancel_work_sync(&genpd->power_off_work); 2084 if (genpd_is_cpu_domain(genpd)) 2085 free_cpumask_var(genpd->cpus); 2086 if (genpd->free_states) 2087 genpd->free_states(genpd->states, genpd->state_count); 2088 2089 pr_debug("%s: removed %s\n", __func__, genpd->name); 2090 2091 return 0; 2092 } 2093 2094 /** 2095 * pm_genpd_remove - Remove a generic I/O PM domain 2096 * @genpd: Pointer to PM domain that is to be removed. 2097 * 2098 * To remove the PM domain, this function: 2099 * - Removes the PM domain as a subdomain to any parent domains, 2100 * if it was added. 2101 * - Removes the PM domain from the list of registered PM domains. 2102 * 2103 * The PM domain will only be removed, if the associated provider has 2104 * been removed, it is not a parent to any other PM domain and has no 2105 * devices associated with it. 2106 */ 2107 int pm_genpd_remove(struct generic_pm_domain *genpd) 2108 { 2109 int ret; 2110 2111 mutex_lock(&gpd_list_lock); 2112 ret = genpd_remove(genpd); 2113 mutex_unlock(&gpd_list_lock); 2114 2115 return ret; 2116 } 2117 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2118 2119 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2120 2121 /* 2122 * Device Tree based PM domain providers. 2123 * 2124 * The code below implements generic device tree based PM domain providers that 2125 * bind device tree nodes with generic PM domains registered in the system. 2126 * 2127 * Any driver that registers generic PM domains and needs to support binding of 2128 * devices to these domains is supposed to register a PM domain provider, which 2129 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2130 * 2131 * Two simple mapping functions have been provided for convenience: 2132 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2133 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2134 * index. 2135 */ 2136 2137 /** 2138 * struct of_genpd_provider - PM domain provider registration structure 2139 * @link: Entry in global list of PM domain providers 2140 * @node: Pointer to device tree node of PM domain provider 2141 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2142 * into a PM domain. 2143 * @data: context pointer to be passed into @xlate callback 2144 */ 2145 struct of_genpd_provider { 2146 struct list_head link; 2147 struct device_node *node; 2148 genpd_xlate_t xlate; 2149 void *data; 2150 }; 2151 2152 /* List of registered PM domain providers. */ 2153 static LIST_HEAD(of_genpd_providers); 2154 /* Mutex to protect the list above. */ 2155 static DEFINE_MUTEX(of_genpd_mutex); 2156 2157 /** 2158 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2159 * @genpdspec: OF phandle args to map into a PM domain 2160 * @data: xlate function private data - pointer to struct generic_pm_domain 2161 * 2162 * This is a generic xlate function that can be used to model PM domains that 2163 * have their own device tree nodes. The private data of xlate function needs 2164 * to be a valid pointer to struct generic_pm_domain. 2165 */ 2166 static struct generic_pm_domain *genpd_xlate_simple( 2167 struct of_phandle_args *genpdspec, 2168 void *data) 2169 { 2170 return data; 2171 } 2172 2173 /** 2174 * genpd_xlate_onecell() - Xlate function using a single index. 2175 * @genpdspec: OF phandle args to map into a PM domain 2176 * @data: xlate function private data - pointer to struct genpd_onecell_data 2177 * 2178 * This is a generic xlate function that can be used to model simple PM domain 2179 * controllers that have one device tree node and provide multiple PM domains. 2180 * A single cell is used as an index into an array of PM domains specified in 2181 * the genpd_onecell_data struct when registering the provider. 2182 */ 2183 static struct generic_pm_domain *genpd_xlate_onecell( 2184 struct of_phandle_args *genpdspec, 2185 void *data) 2186 { 2187 struct genpd_onecell_data *genpd_data = data; 2188 unsigned int idx = genpdspec->args[0]; 2189 2190 if (genpdspec->args_count != 1) 2191 return ERR_PTR(-EINVAL); 2192 2193 if (idx >= genpd_data->num_domains) { 2194 pr_err("%s: invalid domain index %u\n", __func__, idx); 2195 return ERR_PTR(-EINVAL); 2196 } 2197 2198 if (!genpd_data->domains[idx]) 2199 return ERR_PTR(-ENOENT); 2200 2201 return genpd_data->domains[idx]; 2202 } 2203 2204 /** 2205 * genpd_add_provider() - Register a PM domain provider for a node 2206 * @np: Device node pointer associated with the PM domain provider. 2207 * @xlate: Callback for decoding PM domain from phandle arguments. 2208 * @data: Context pointer for @xlate callback. 2209 */ 2210 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2211 void *data) 2212 { 2213 struct of_genpd_provider *cp; 2214 2215 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2216 if (!cp) 2217 return -ENOMEM; 2218 2219 cp->node = of_node_get(np); 2220 cp->data = data; 2221 cp->xlate = xlate; 2222 fwnode_dev_initialized(&np->fwnode, true); 2223 2224 mutex_lock(&of_genpd_mutex); 2225 list_add(&cp->link, &of_genpd_providers); 2226 mutex_unlock(&of_genpd_mutex); 2227 pr_debug("Added domain provider from %pOF\n", np); 2228 2229 return 0; 2230 } 2231 2232 static bool genpd_present(const struct generic_pm_domain *genpd) 2233 { 2234 bool ret = false; 2235 const struct generic_pm_domain *gpd; 2236 2237 mutex_lock(&gpd_list_lock); 2238 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2239 if (gpd == genpd) { 2240 ret = true; 2241 break; 2242 } 2243 } 2244 mutex_unlock(&gpd_list_lock); 2245 2246 return ret; 2247 } 2248 2249 /** 2250 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2251 * @np: Device node pointer associated with the PM domain provider. 2252 * @genpd: Pointer to PM domain associated with the PM domain provider. 2253 */ 2254 int of_genpd_add_provider_simple(struct device_node *np, 2255 struct generic_pm_domain *genpd) 2256 { 2257 int ret; 2258 2259 if (!np || !genpd) 2260 return -EINVAL; 2261 2262 if (!genpd_present(genpd)) 2263 return -EINVAL; 2264 2265 genpd->dev.of_node = np; 2266 2267 /* Parse genpd OPP table */ 2268 if (genpd->set_performance_state) { 2269 ret = dev_pm_opp_of_add_table(&genpd->dev); 2270 if (ret) 2271 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2272 2273 /* 2274 * Save table for faster processing while setting performance 2275 * state. 2276 */ 2277 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2278 WARN_ON(IS_ERR(genpd->opp_table)); 2279 } 2280 2281 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2282 if (ret) { 2283 if (genpd->set_performance_state) { 2284 dev_pm_opp_put_opp_table(genpd->opp_table); 2285 dev_pm_opp_of_remove_table(&genpd->dev); 2286 } 2287 2288 return ret; 2289 } 2290 2291 genpd->provider = &np->fwnode; 2292 genpd->has_provider = true; 2293 2294 return 0; 2295 } 2296 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2297 2298 /** 2299 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2300 * @np: Device node pointer associated with the PM domain provider. 2301 * @data: Pointer to the data associated with the PM domain provider. 2302 */ 2303 int of_genpd_add_provider_onecell(struct device_node *np, 2304 struct genpd_onecell_data *data) 2305 { 2306 struct generic_pm_domain *genpd; 2307 unsigned int i; 2308 int ret = -EINVAL; 2309 2310 if (!np || !data) 2311 return -EINVAL; 2312 2313 if (!data->xlate) 2314 data->xlate = genpd_xlate_onecell; 2315 2316 for (i = 0; i < data->num_domains; i++) { 2317 genpd = data->domains[i]; 2318 2319 if (!genpd) 2320 continue; 2321 if (!genpd_present(genpd)) 2322 goto error; 2323 2324 genpd->dev.of_node = np; 2325 2326 /* Parse genpd OPP table */ 2327 if (genpd->set_performance_state) { 2328 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2329 if (ret) { 2330 dev_err_probe(&genpd->dev, ret, 2331 "Failed to add OPP table for index %d\n", i); 2332 goto error; 2333 } 2334 2335 /* 2336 * Save table for faster processing while setting 2337 * performance state. 2338 */ 2339 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2340 WARN_ON(IS_ERR(genpd->opp_table)); 2341 } 2342 2343 genpd->provider = &np->fwnode; 2344 genpd->has_provider = true; 2345 } 2346 2347 ret = genpd_add_provider(np, data->xlate, data); 2348 if (ret < 0) 2349 goto error; 2350 2351 return 0; 2352 2353 error: 2354 while (i--) { 2355 genpd = data->domains[i]; 2356 2357 if (!genpd) 2358 continue; 2359 2360 genpd->provider = NULL; 2361 genpd->has_provider = false; 2362 2363 if (genpd->set_performance_state) { 2364 dev_pm_opp_put_opp_table(genpd->opp_table); 2365 dev_pm_opp_of_remove_table(&genpd->dev); 2366 } 2367 } 2368 2369 return ret; 2370 } 2371 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2372 2373 /** 2374 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2375 * @np: Device node pointer associated with the PM domain provider 2376 */ 2377 void of_genpd_del_provider(struct device_node *np) 2378 { 2379 struct of_genpd_provider *cp, *tmp; 2380 struct generic_pm_domain *gpd; 2381 2382 mutex_lock(&gpd_list_lock); 2383 mutex_lock(&of_genpd_mutex); 2384 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2385 if (cp->node == np) { 2386 /* 2387 * For each PM domain associated with the 2388 * provider, set the 'has_provider' to false 2389 * so that the PM domain can be safely removed. 2390 */ 2391 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2392 if (gpd->provider == &np->fwnode) { 2393 gpd->has_provider = false; 2394 2395 if (!gpd->set_performance_state) 2396 continue; 2397 2398 dev_pm_opp_put_opp_table(gpd->opp_table); 2399 dev_pm_opp_of_remove_table(&gpd->dev); 2400 } 2401 } 2402 2403 fwnode_dev_initialized(&cp->node->fwnode, false); 2404 list_del(&cp->link); 2405 of_node_put(cp->node); 2406 kfree(cp); 2407 break; 2408 } 2409 } 2410 mutex_unlock(&of_genpd_mutex); 2411 mutex_unlock(&gpd_list_lock); 2412 } 2413 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2414 2415 /** 2416 * genpd_get_from_provider() - Look-up PM domain 2417 * @genpdspec: OF phandle args to use for look-up 2418 * 2419 * Looks for a PM domain provider under the node specified by @genpdspec and if 2420 * found, uses xlate function of the provider to map phandle args to a PM 2421 * domain. 2422 * 2423 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2424 * on failure. 2425 */ 2426 static struct generic_pm_domain *genpd_get_from_provider( 2427 struct of_phandle_args *genpdspec) 2428 { 2429 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2430 struct of_genpd_provider *provider; 2431 2432 if (!genpdspec) 2433 return ERR_PTR(-EINVAL); 2434 2435 mutex_lock(&of_genpd_mutex); 2436 2437 /* Check if we have such a provider in our array */ 2438 list_for_each_entry(provider, &of_genpd_providers, link) { 2439 if (provider->node == genpdspec->np) 2440 genpd = provider->xlate(genpdspec, provider->data); 2441 if (!IS_ERR(genpd)) 2442 break; 2443 } 2444 2445 mutex_unlock(&of_genpd_mutex); 2446 2447 return genpd; 2448 } 2449 2450 /** 2451 * of_genpd_add_device() - Add a device to an I/O PM domain 2452 * @genpdspec: OF phandle args to use for look-up PM domain 2453 * @dev: Device to be added. 2454 * 2455 * Looks-up an I/O PM domain based upon phandle args provided and adds 2456 * the device to the PM domain. Returns a negative error code on failure. 2457 */ 2458 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2459 { 2460 struct generic_pm_domain *genpd; 2461 int ret; 2462 2463 mutex_lock(&gpd_list_lock); 2464 2465 genpd = genpd_get_from_provider(genpdspec); 2466 if (IS_ERR(genpd)) { 2467 ret = PTR_ERR(genpd); 2468 goto out; 2469 } 2470 2471 ret = genpd_add_device(genpd, dev, dev); 2472 2473 out: 2474 mutex_unlock(&gpd_list_lock); 2475 2476 return ret; 2477 } 2478 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2479 2480 /** 2481 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2482 * @parent_spec: OF phandle args to use for parent PM domain look-up 2483 * @subdomain_spec: OF phandle args to use for subdomain look-up 2484 * 2485 * Looks-up a parent PM domain and subdomain based upon phandle args 2486 * provided and adds the subdomain to the parent PM domain. Returns a 2487 * negative error code on failure. 2488 */ 2489 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2490 struct of_phandle_args *subdomain_spec) 2491 { 2492 struct generic_pm_domain *parent, *subdomain; 2493 int ret; 2494 2495 mutex_lock(&gpd_list_lock); 2496 2497 parent = genpd_get_from_provider(parent_spec); 2498 if (IS_ERR(parent)) { 2499 ret = PTR_ERR(parent); 2500 goto out; 2501 } 2502 2503 subdomain = genpd_get_from_provider(subdomain_spec); 2504 if (IS_ERR(subdomain)) { 2505 ret = PTR_ERR(subdomain); 2506 goto out; 2507 } 2508 2509 ret = genpd_add_subdomain(parent, subdomain); 2510 2511 out: 2512 mutex_unlock(&gpd_list_lock); 2513 2514 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2515 } 2516 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2517 2518 /** 2519 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2520 * @parent_spec: OF phandle args to use for parent PM domain look-up 2521 * @subdomain_spec: OF phandle args to use for subdomain look-up 2522 * 2523 * Looks-up a parent PM domain and subdomain based upon phandle args 2524 * provided and removes the subdomain from the parent PM domain. Returns a 2525 * negative error code on failure. 2526 */ 2527 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 2528 struct of_phandle_args *subdomain_spec) 2529 { 2530 struct generic_pm_domain *parent, *subdomain; 2531 int ret; 2532 2533 mutex_lock(&gpd_list_lock); 2534 2535 parent = genpd_get_from_provider(parent_spec); 2536 if (IS_ERR(parent)) { 2537 ret = PTR_ERR(parent); 2538 goto out; 2539 } 2540 2541 subdomain = genpd_get_from_provider(subdomain_spec); 2542 if (IS_ERR(subdomain)) { 2543 ret = PTR_ERR(subdomain); 2544 goto out; 2545 } 2546 2547 ret = pm_genpd_remove_subdomain(parent, subdomain); 2548 2549 out: 2550 mutex_unlock(&gpd_list_lock); 2551 2552 return ret; 2553 } 2554 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2555 2556 /** 2557 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2558 * @np: Pointer to device node associated with provider 2559 * 2560 * Find the last PM domain that was added by a particular provider and 2561 * remove this PM domain from the list of PM domains. The provider is 2562 * identified by the 'provider' device structure that is passed. The PM 2563 * domain will only be removed, if the provider associated with domain 2564 * has been removed. 2565 * 2566 * Returns a valid pointer to struct generic_pm_domain on success or 2567 * ERR_PTR() on failure. 2568 */ 2569 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2570 { 2571 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2572 int ret; 2573 2574 if (IS_ERR_OR_NULL(np)) 2575 return ERR_PTR(-EINVAL); 2576 2577 mutex_lock(&gpd_list_lock); 2578 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2579 if (gpd->provider == &np->fwnode) { 2580 ret = genpd_remove(gpd); 2581 genpd = ret ? ERR_PTR(ret) : gpd; 2582 break; 2583 } 2584 } 2585 mutex_unlock(&gpd_list_lock); 2586 2587 return genpd; 2588 } 2589 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2590 2591 static void genpd_release_dev(struct device *dev) 2592 { 2593 of_node_put(dev->of_node); 2594 kfree(dev); 2595 } 2596 2597 static struct bus_type genpd_bus_type = { 2598 .name = "genpd", 2599 }; 2600 2601 /** 2602 * genpd_dev_pm_detach - Detach a device from its PM domain. 2603 * @dev: Device to detach. 2604 * @power_off: Currently not used 2605 * 2606 * Try to locate a corresponding generic PM domain, which the device was 2607 * attached to previously. If such is found, the device is detached from it. 2608 */ 2609 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2610 { 2611 struct generic_pm_domain *pd; 2612 unsigned int i; 2613 int ret = 0; 2614 2615 pd = dev_to_genpd(dev); 2616 if (IS_ERR(pd)) 2617 return; 2618 2619 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2620 2621 /* Drop the default performance state */ 2622 if (dev_gpd_data(dev)->default_pstate) { 2623 dev_pm_genpd_set_performance_state(dev, 0); 2624 dev_gpd_data(dev)->default_pstate = 0; 2625 } 2626 2627 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2628 ret = genpd_remove_device(pd, dev); 2629 if (ret != -EAGAIN) 2630 break; 2631 2632 mdelay(i); 2633 cond_resched(); 2634 } 2635 2636 if (ret < 0) { 2637 dev_err(dev, "failed to remove from PM domain %s: %d", 2638 pd->name, ret); 2639 return; 2640 } 2641 2642 /* Check if PM domain can be powered off after removing this device. */ 2643 genpd_queue_power_off_work(pd); 2644 2645 /* Unregister the device if it was created by genpd. */ 2646 if (dev->bus == &genpd_bus_type) 2647 device_unregister(dev); 2648 } 2649 2650 static void genpd_dev_pm_sync(struct device *dev) 2651 { 2652 struct generic_pm_domain *pd; 2653 2654 pd = dev_to_genpd(dev); 2655 if (IS_ERR(pd)) 2656 return; 2657 2658 genpd_queue_power_off_work(pd); 2659 } 2660 2661 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2662 unsigned int index, bool power_on) 2663 { 2664 struct of_phandle_args pd_args; 2665 struct generic_pm_domain *pd; 2666 int pstate; 2667 int ret; 2668 2669 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2670 "#power-domain-cells", index, &pd_args); 2671 if (ret < 0) 2672 return ret; 2673 2674 mutex_lock(&gpd_list_lock); 2675 pd = genpd_get_from_provider(&pd_args); 2676 of_node_put(pd_args.np); 2677 if (IS_ERR(pd)) { 2678 mutex_unlock(&gpd_list_lock); 2679 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2680 __func__, PTR_ERR(pd)); 2681 return driver_deferred_probe_check_state(base_dev); 2682 } 2683 2684 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2685 2686 ret = genpd_add_device(pd, dev, base_dev); 2687 mutex_unlock(&gpd_list_lock); 2688 2689 if (ret < 0) 2690 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 2691 2692 dev->pm_domain->detach = genpd_dev_pm_detach; 2693 dev->pm_domain->sync = genpd_dev_pm_sync; 2694 2695 if (power_on) { 2696 genpd_lock(pd); 2697 ret = genpd_power_on(pd, 0); 2698 genpd_unlock(pd); 2699 } 2700 2701 if (ret) { 2702 genpd_remove_device(pd, dev); 2703 return -EPROBE_DEFER; 2704 } 2705 2706 /* Set the default performance state */ 2707 pstate = of_get_required_opp_performance_state(dev->of_node, index); 2708 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 2709 ret = pstate; 2710 goto err; 2711 } else if (pstate > 0) { 2712 ret = dev_pm_genpd_set_performance_state(dev, pstate); 2713 if (ret) 2714 goto err; 2715 dev_gpd_data(dev)->default_pstate = pstate; 2716 } 2717 return 1; 2718 2719 err: 2720 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 2721 pd->name, ret); 2722 genpd_remove_device(pd, dev); 2723 return ret; 2724 } 2725 2726 /** 2727 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2728 * @dev: Device to attach. 2729 * 2730 * Parse device's OF node to find a PM domain specifier. If such is found, 2731 * attaches the device to retrieved pm_domain ops. 2732 * 2733 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2734 * PM domain or when multiple power-domains exists for it, else a negative error 2735 * code. Note that if a power-domain exists for the device, but it cannot be 2736 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2737 * not probed and to re-try again later. 2738 */ 2739 int genpd_dev_pm_attach(struct device *dev) 2740 { 2741 if (!dev->of_node) 2742 return 0; 2743 2744 /* 2745 * Devices with multiple PM domains must be attached separately, as we 2746 * can only attach one PM domain per device. 2747 */ 2748 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2749 "#power-domain-cells") != 1) 2750 return 0; 2751 2752 return __genpd_dev_pm_attach(dev, dev, 0, true); 2753 } 2754 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2755 2756 /** 2757 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2758 * @dev: The device used to lookup the PM domain. 2759 * @index: The index of the PM domain. 2760 * 2761 * Parse device's OF node to find a PM domain specifier at the provided @index. 2762 * If such is found, creates a virtual device and attaches it to the retrieved 2763 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2764 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2765 * 2766 * Returns the created virtual device if successfully attached PM domain, NULL 2767 * when the device don't need a PM domain, else an ERR_PTR() in case of 2768 * failures. If a power-domain exists for the device, but cannot be found or 2769 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2770 * is not probed and to re-try again later. 2771 */ 2772 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2773 unsigned int index) 2774 { 2775 struct device *virt_dev; 2776 int num_domains; 2777 int ret; 2778 2779 if (!dev->of_node) 2780 return NULL; 2781 2782 /* Verify that the index is within a valid range. */ 2783 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2784 "#power-domain-cells"); 2785 if (index >= num_domains) 2786 return NULL; 2787 2788 /* Allocate and register device on the genpd bus. */ 2789 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2790 if (!virt_dev) 2791 return ERR_PTR(-ENOMEM); 2792 2793 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2794 virt_dev->bus = &genpd_bus_type; 2795 virt_dev->release = genpd_release_dev; 2796 virt_dev->of_node = of_node_get(dev->of_node); 2797 2798 ret = device_register(virt_dev); 2799 if (ret) { 2800 put_device(virt_dev); 2801 return ERR_PTR(ret); 2802 } 2803 2804 /* Try to attach the device to the PM domain at the specified index. */ 2805 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2806 if (ret < 1) { 2807 device_unregister(virt_dev); 2808 return ret ? ERR_PTR(ret) : NULL; 2809 } 2810 2811 pm_runtime_enable(virt_dev); 2812 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 2813 2814 return virt_dev; 2815 } 2816 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 2817 2818 /** 2819 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 2820 * @dev: The device used to lookup the PM domain. 2821 * @name: The name of the PM domain. 2822 * 2823 * Parse device's OF node to find a PM domain specifier using the 2824 * power-domain-names DT property. For further description see 2825 * genpd_dev_pm_attach_by_id(). 2826 */ 2827 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 2828 { 2829 int index; 2830 2831 if (!dev->of_node) 2832 return NULL; 2833 2834 index = of_property_match_string(dev->of_node, "power-domain-names", 2835 name); 2836 if (index < 0) 2837 return NULL; 2838 2839 return genpd_dev_pm_attach_by_id(dev, index); 2840 } 2841 2842 static const struct of_device_id idle_state_match[] = { 2843 { .compatible = "domain-idle-state", }, 2844 { } 2845 }; 2846 2847 static int genpd_parse_state(struct genpd_power_state *genpd_state, 2848 struct device_node *state_node) 2849 { 2850 int err; 2851 u32 residency; 2852 u32 entry_latency, exit_latency; 2853 2854 err = of_property_read_u32(state_node, "entry-latency-us", 2855 &entry_latency); 2856 if (err) { 2857 pr_debug(" * %pOF missing entry-latency-us property\n", 2858 state_node); 2859 return -EINVAL; 2860 } 2861 2862 err = of_property_read_u32(state_node, "exit-latency-us", 2863 &exit_latency); 2864 if (err) { 2865 pr_debug(" * %pOF missing exit-latency-us property\n", 2866 state_node); 2867 return -EINVAL; 2868 } 2869 2870 err = of_property_read_u32(state_node, "min-residency-us", &residency); 2871 if (!err) 2872 genpd_state->residency_ns = 1000 * residency; 2873 2874 genpd_state->power_on_latency_ns = 1000 * exit_latency; 2875 genpd_state->power_off_latency_ns = 1000 * entry_latency; 2876 genpd_state->fwnode = &state_node->fwnode; 2877 2878 return 0; 2879 } 2880 2881 static int genpd_iterate_idle_states(struct device_node *dn, 2882 struct genpd_power_state *states) 2883 { 2884 int ret; 2885 struct of_phandle_iterator it; 2886 struct device_node *np; 2887 int i = 0; 2888 2889 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 2890 if (ret <= 0) 2891 return ret == -ENOENT ? 0 : ret; 2892 2893 /* Loop over the phandles until all the requested entry is found */ 2894 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 2895 np = it.node; 2896 if (!of_match_node(idle_state_match, np)) 2897 continue; 2898 if (states) { 2899 ret = genpd_parse_state(&states[i], np); 2900 if (ret) { 2901 pr_err("Parsing idle state node %pOF failed with err %d\n", 2902 np, ret); 2903 of_node_put(np); 2904 return ret; 2905 } 2906 } 2907 i++; 2908 } 2909 2910 return i; 2911 } 2912 2913 /** 2914 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 2915 * 2916 * @dn: The genpd device node 2917 * @states: The pointer to which the state array will be saved. 2918 * @n: The count of elements in the array returned from this function. 2919 * 2920 * Returns the device states parsed from the OF node. The memory for the states 2921 * is allocated by this function and is the responsibility of the caller to 2922 * free the memory after use. If any or zero compatible domain idle states is 2923 * found it returns 0 and in case of errors, a negative error code is returned. 2924 */ 2925 int of_genpd_parse_idle_states(struct device_node *dn, 2926 struct genpd_power_state **states, int *n) 2927 { 2928 struct genpd_power_state *st; 2929 int ret; 2930 2931 ret = genpd_iterate_idle_states(dn, NULL); 2932 if (ret < 0) 2933 return ret; 2934 2935 if (!ret) { 2936 *states = NULL; 2937 *n = 0; 2938 return 0; 2939 } 2940 2941 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 2942 if (!st) 2943 return -ENOMEM; 2944 2945 ret = genpd_iterate_idle_states(dn, st); 2946 if (ret <= 0) { 2947 kfree(st); 2948 return ret < 0 ? ret : -EINVAL; 2949 } 2950 2951 *states = st; 2952 *n = ret; 2953 2954 return 0; 2955 } 2956 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 2957 2958 /** 2959 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. 2960 * 2961 * @genpd_dev: Genpd's device for which the performance-state needs to be found. 2962 * @opp: struct dev_pm_opp of the OPP for which we need to find performance 2963 * state. 2964 * 2965 * Returns performance state encoded in the OPP of the genpd. This calls 2966 * platform specific genpd->opp_to_performance_state() callback to translate 2967 * power domain OPP to performance state. 2968 * 2969 * Returns performance state on success and 0 on failure. 2970 */ 2971 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, 2972 struct dev_pm_opp *opp) 2973 { 2974 struct generic_pm_domain *genpd = NULL; 2975 int state; 2976 2977 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); 2978 2979 if (unlikely(!genpd->opp_to_performance_state)) 2980 return 0; 2981 2982 genpd_lock(genpd); 2983 state = genpd->opp_to_performance_state(genpd, opp); 2984 genpd_unlock(genpd); 2985 2986 return state; 2987 } 2988 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); 2989 2990 static int __init genpd_bus_init(void) 2991 { 2992 return bus_register(&genpd_bus_type); 2993 } 2994 core_initcall(genpd_bus_init); 2995 2996 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2997 2998 2999 /*** debugfs support ***/ 3000 3001 #ifdef CONFIG_DEBUG_FS 3002 /* 3003 * TODO: This function is a slightly modified version of rtpm_status_show 3004 * from sysfs.c, so generalize it. 3005 */ 3006 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3007 { 3008 static const char * const status_lookup[] = { 3009 [RPM_ACTIVE] = "active", 3010 [RPM_RESUMING] = "resuming", 3011 [RPM_SUSPENDED] = "suspended", 3012 [RPM_SUSPENDING] = "suspending" 3013 }; 3014 const char *p = ""; 3015 3016 if (dev->power.runtime_error) 3017 p = "error"; 3018 else if (dev->power.disable_depth) 3019 p = "unsupported"; 3020 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3021 p = status_lookup[dev->power.runtime_status]; 3022 else 3023 WARN_ON(1); 3024 3025 seq_printf(s, "%-25s ", p); 3026 } 3027 3028 static void perf_status_str(struct seq_file *s, struct device *dev) 3029 { 3030 struct generic_pm_domain_data *gpd_data; 3031 3032 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3033 seq_put_decimal_ull(s, "", gpd_data->performance_state); 3034 } 3035 3036 static int genpd_summary_one(struct seq_file *s, 3037 struct generic_pm_domain *genpd) 3038 { 3039 static const char * const status_lookup[] = { 3040 [GENPD_STATE_ON] = "on", 3041 [GENPD_STATE_OFF] = "off" 3042 }; 3043 struct pm_domain_data *pm_data; 3044 const char *kobj_path; 3045 struct gpd_link *link; 3046 char state[16]; 3047 int ret; 3048 3049 ret = genpd_lock_interruptible(genpd); 3050 if (ret) 3051 return -ERESTARTSYS; 3052 3053 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3054 goto exit; 3055 if (!genpd_status_on(genpd)) 3056 snprintf(state, sizeof(state), "%s-%u", 3057 status_lookup[genpd->status], genpd->state_idx); 3058 else 3059 snprintf(state, sizeof(state), "%s", 3060 status_lookup[genpd->status]); 3061 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3062 3063 /* 3064 * Modifications on the list require holding locks on both 3065 * parent and child, so we are safe. 3066 * Also genpd->name is immutable. 3067 */ 3068 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3069 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3070 seq_printf(s, "\n%48s", " "); 3071 seq_printf(s, "%s", link->child->name); 3072 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3073 seq_puts(s, ", "); 3074 } 3075 3076 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3077 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3078 genpd_is_irq_safe(genpd) ? 3079 GFP_ATOMIC : GFP_KERNEL); 3080 if (kobj_path == NULL) 3081 continue; 3082 3083 seq_printf(s, "\n %-50s ", kobj_path); 3084 rtpm_status_str(s, pm_data->dev); 3085 perf_status_str(s, pm_data->dev); 3086 kfree(kobj_path); 3087 } 3088 3089 seq_puts(s, "\n"); 3090 exit: 3091 genpd_unlock(genpd); 3092 3093 return 0; 3094 } 3095 3096 static int summary_show(struct seq_file *s, void *data) 3097 { 3098 struct generic_pm_domain *genpd; 3099 int ret = 0; 3100 3101 seq_puts(s, "domain status children performance\n"); 3102 seq_puts(s, " /device runtime status\n"); 3103 seq_puts(s, "----------------------------------------------------------------------------------------------\n"); 3104 3105 ret = mutex_lock_interruptible(&gpd_list_lock); 3106 if (ret) 3107 return -ERESTARTSYS; 3108 3109 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3110 ret = genpd_summary_one(s, genpd); 3111 if (ret) 3112 break; 3113 } 3114 mutex_unlock(&gpd_list_lock); 3115 3116 return ret; 3117 } 3118 3119 static int status_show(struct seq_file *s, void *data) 3120 { 3121 static const char * const status_lookup[] = { 3122 [GENPD_STATE_ON] = "on", 3123 [GENPD_STATE_OFF] = "off" 3124 }; 3125 3126 struct generic_pm_domain *genpd = s->private; 3127 int ret = 0; 3128 3129 ret = genpd_lock_interruptible(genpd); 3130 if (ret) 3131 return -ERESTARTSYS; 3132 3133 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3134 goto exit; 3135 3136 if (genpd->status == GENPD_STATE_OFF) 3137 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3138 genpd->state_idx); 3139 else 3140 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3141 exit: 3142 genpd_unlock(genpd); 3143 return ret; 3144 } 3145 3146 static int sub_domains_show(struct seq_file *s, void *data) 3147 { 3148 struct generic_pm_domain *genpd = s->private; 3149 struct gpd_link *link; 3150 int ret = 0; 3151 3152 ret = genpd_lock_interruptible(genpd); 3153 if (ret) 3154 return -ERESTARTSYS; 3155 3156 list_for_each_entry(link, &genpd->parent_links, parent_node) 3157 seq_printf(s, "%s\n", link->child->name); 3158 3159 genpd_unlock(genpd); 3160 return ret; 3161 } 3162 3163 static int idle_states_show(struct seq_file *s, void *data) 3164 { 3165 struct generic_pm_domain *genpd = s->private; 3166 unsigned int i; 3167 int ret = 0; 3168 3169 ret = genpd_lock_interruptible(genpd); 3170 if (ret) 3171 return -ERESTARTSYS; 3172 3173 seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3174 3175 for (i = 0; i < genpd->state_count; i++) { 3176 ktime_t delta = 0; 3177 s64 msecs; 3178 3179 if ((genpd->status == GENPD_STATE_OFF) && 3180 (genpd->state_idx == i)) 3181 delta = ktime_sub(ktime_get(), genpd->accounting_time); 3182 3183 msecs = ktime_to_ms( 3184 ktime_add(genpd->states[i].idle_time, delta)); 3185 seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs, 3186 genpd->states[i].usage, genpd->states[i].rejected); 3187 } 3188 3189 genpd_unlock(genpd); 3190 return ret; 3191 } 3192 3193 static int active_time_show(struct seq_file *s, void *data) 3194 { 3195 struct generic_pm_domain *genpd = s->private; 3196 ktime_t delta = 0; 3197 int ret = 0; 3198 3199 ret = genpd_lock_interruptible(genpd); 3200 if (ret) 3201 return -ERESTARTSYS; 3202 3203 if (genpd->status == GENPD_STATE_ON) 3204 delta = ktime_sub(ktime_get(), genpd->accounting_time); 3205 3206 seq_printf(s, "%lld ms\n", ktime_to_ms( 3207 ktime_add(genpd->on_time, delta))); 3208 3209 genpd_unlock(genpd); 3210 return ret; 3211 } 3212 3213 static int total_idle_time_show(struct seq_file *s, void *data) 3214 { 3215 struct generic_pm_domain *genpd = s->private; 3216 ktime_t delta = 0, total = 0; 3217 unsigned int i; 3218 int ret = 0; 3219 3220 ret = genpd_lock_interruptible(genpd); 3221 if (ret) 3222 return -ERESTARTSYS; 3223 3224 for (i = 0; i < genpd->state_count; i++) { 3225 3226 if ((genpd->status == GENPD_STATE_OFF) && 3227 (genpd->state_idx == i)) 3228 delta = ktime_sub(ktime_get(), genpd->accounting_time); 3229 3230 total = ktime_add(total, genpd->states[i].idle_time); 3231 } 3232 total = ktime_add(total, delta); 3233 3234 seq_printf(s, "%lld ms\n", ktime_to_ms(total)); 3235 3236 genpd_unlock(genpd); 3237 return ret; 3238 } 3239 3240 3241 static int devices_show(struct seq_file *s, void *data) 3242 { 3243 struct generic_pm_domain *genpd = s->private; 3244 struct pm_domain_data *pm_data; 3245 const char *kobj_path; 3246 int ret = 0; 3247 3248 ret = genpd_lock_interruptible(genpd); 3249 if (ret) 3250 return -ERESTARTSYS; 3251 3252 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3253 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3254 genpd_is_irq_safe(genpd) ? 3255 GFP_ATOMIC : GFP_KERNEL); 3256 if (kobj_path == NULL) 3257 continue; 3258 3259 seq_printf(s, "%s\n", kobj_path); 3260 kfree(kobj_path); 3261 } 3262 3263 genpd_unlock(genpd); 3264 return ret; 3265 } 3266 3267 static int perf_state_show(struct seq_file *s, void *data) 3268 { 3269 struct generic_pm_domain *genpd = s->private; 3270 3271 if (genpd_lock_interruptible(genpd)) 3272 return -ERESTARTSYS; 3273 3274 seq_printf(s, "%u\n", genpd->performance_state); 3275 3276 genpd_unlock(genpd); 3277 return 0; 3278 } 3279 3280 DEFINE_SHOW_ATTRIBUTE(summary); 3281 DEFINE_SHOW_ATTRIBUTE(status); 3282 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3283 DEFINE_SHOW_ATTRIBUTE(idle_states); 3284 DEFINE_SHOW_ATTRIBUTE(active_time); 3285 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3286 DEFINE_SHOW_ATTRIBUTE(devices); 3287 DEFINE_SHOW_ATTRIBUTE(perf_state); 3288 3289 static void genpd_debug_add(struct generic_pm_domain *genpd) 3290 { 3291 struct dentry *d; 3292 3293 if (!genpd_debugfs_dir) 3294 return; 3295 3296 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3297 3298 debugfs_create_file("current_state", 0444, 3299 d, genpd, &status_fops); 3300 debugfs_create_file("sub_domains", 0444, 3301 d, genpd, &sub_domains_fops); 3302 debugfs_create_file("idle_states", 0444, 3303 d, genpd, &idle_states_fops); 3304 debugfs_create_file("active_time", 0444, 3305 d, genpd, &active_time_fops); 3306 debugfs_create_file("total_idle_time", 0444, 3307 d, genpd, &total_idle_time_fops); 3308 debugfs_create_file("devices", 0444, 3309 d, genpd, &devices_fops); 3310 if (genpd->set_performance_state) 3311 debugfs_create_file("perf_state", 0444, 3312 d, genpd, &perf_state_fops); 3313 } 3314 3315 static int __init genpd_debug_init(void) 3316 { 3317 struct generic_pm_domain *genpd; 3318 3319 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3320 3321 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3322 NULL, &summary_fops); 3323 3324 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3325 genpd_debug_add(genpd); 3326 3327 return 0; 3328 } 3329 late_initcall(genpd_debug_init); 3330 3331 static void __exit genpd_debug_exit(void) 3332 { 3333 debugfs_remove_recursive(genpd_debugfs_dir); 3334 } 3335 __exitcall(genpd_debug_exit); 3336 #endif /* CONFIG_DEBUG_FS */ 3337