1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 #include <linux/cpu.h> 24 #include <linux/debugfs.h> 25 26 #include "power.h" 27 28 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 29 30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 31 ({ \ 32 type (*__routine)(struct device *__d); \ 33 type __ret = (type)0; \ 34 \ 35 __routine = genpd->dev_ops.callback; \ 36 if (__routine) { \ 37 __ret = __routine(dev); \ 38 } \ 39 __ret; \ 40 }) 41 42 static LIST_HEAD(gpd_list); 43 static DEFINE_MUTEX(gpd_list_lock); 44 45 struct genpd_lock_ops { 46 void (*lock)(struct generic_pm_domain *genpd); 47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 48 int (*lock_interruptible)(struct generic_pm_domain *genpd); 49 void (*unlock)(struct generic_pm_domain *genpd); 50 }; 51 52 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 53 { 54 mutex_lock(&genpd->mlock); 55 } 56 57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 58 int depth) 59 { 60 mutex_lock_nested(&genpd->mlock, depth); 61 } 62 63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 64 { 65 return mutex_lock_interruptible(&genpd->mlock); 66 } 67 68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 69 { 70 return mutex_unlock(&genpd->mlock); 71 } 72 73 static const struct genpd_lock_ops genpd_mtx_ops = { 74 .lock = genpd_lock_mtx, 75 .lock_nested = genpd_lock_nested_mtx, 76 .lock_interruptible = genpd_lock_interruptible_mtx, 77 .unlock = genpd_unlock_mtx, 78 }; 79 80 static void genpd_lock_spin(struct generic_pm_domain *genpd) 81 __acquires(&genpd->slock) 82 { 83 unsigned long flags; 84 85 spin_lock_irqsave(&genpd->slock, flags); 86 genpd->lock_flags = flags; 87 } 88 89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 90 int depth) 91 __acquires(&genpd->slock) 92 { 93 unsigned long flags; 94 95 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 96 genpd->lock_flags = flags; 97 } 98 99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 100 __acquires(&genpd->slock) 101 { 102 unsigned long flags; 103 104 spin_lock_irqsave(&genpd->slock, flags); 105 genpd->lock_flags = flags; 106 return 0; 107 } 108 109 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 110 __releases(&genpd->slock) 111 { 112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 113 } 114 115 static const struct genpd_lock_ops genpd_spin_ops = { 116 .lock = genpd_lock_spin, 117 .lock_nested = genpd_lock_nested_spin, 118 .lock_interruptible = genpd_lock_interruptible_spin, 119 .unlock = genpd_unlock_spin, 120 }; 121 122 #define genpd_lock(p) p->lock_ops->lock(p) 123 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 125 #define genpd_unlock(p) p->lock_ops->unlock(p) 126 127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 133 134 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 135 const struct generic_pm_domain *genpd) 136 { 137 bool ret; 138 139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 140 141 /* 142 * Warn once if an IRQ safe device is attached to a domain, which 143 * callbacks are allowed to sleep. This indicates a suboptimal 144 * configuration for PM, but it doesn't matter for an always on domain. 145 */ 146 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 147 return ret; 148 149 if (ret) 150 dev_warn_once(dev, "PM domain %s will not be powered off\n", 151 genpd->name); 152 153 return ret; 154 } 155 156 static int genpd_runtime_suspend(struct device *dev); 157 158 /* 159 * Get the generic PM domain for a particular struct device. 160 * This validates the struct device pointer, the PM domain pointer, 161 * and checks that the PM domain pointer is a real generic PM domain. 162 * Any failure results in NULL being returned. 163 */ 164 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 165 { 166 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 167 return NULL; 168 169 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 170 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 171 return pd_to_genpd(dev->pm_domain); 172 173 return NULL; 174 } 175 176 /* 177 * This should only be used where we are certain that the pm_domain 178 * attached to the device is a genpd domain. 179 */ 180 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 181 { 182 if (IS_ERR_OR_NULL(dev->pm_domain)) 183 return ERR_PTR(-EINVAL); 184 185 return pd_to_genpd(dev->pm_domain); 186 } 187 188 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 189 struct device *dev) 190 { 191 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 192 } 193 194 static int genpd_start_dev(const struct generic_pm_domain *genpd, 195 struct device *dev) 196 { 197 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 198 } 199 200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 201 { 202 bool ret = false; 203 204 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 205 ret = !!atomic_dec_and_test(&genpd->sd_count); 206 207 return ret; 208 } 209 210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 211 { 212 atomic_inc(&genpd->sd_count); 213 smp_mb__after_atomic(); 214 } 215 216 #ifdef CONFIG_DEBUG_FS 217 static struct dentry *genpd_debugfs_dir; 218 219 static void genpd_debug_add(struct generic_pm_domain *genpd); 220 221 static void genpd_debug_remove(struct generic_pm_domain *genpd) 222 { 223 struct dentry *d; 224 225 if (!genpd_debugfs_dir) 226 return; 227 228 d = debugfs_lookup(genpd->name, genpd_debugfs_dir); 229 debugfs_remove(d); 230 } 231 232 static void genpd_update_accounting(struct generic_pm_domain *genpd) 233 { 234 u64 delta, now; 235 236 now = ktime_get_mono_fast_ns(); 237 if (now <= genpd->accounting_time) 238 return; 239 240 delta = now - genpd->accounting_time; 241 242 /* 243 * If genpd->status is active, it means we are just 244 * out of off and so update the idle time and vice 245 * versa. 246 */ 247 if (genpd->status == GENPD_STATE_ON) 248 genpd->states[genpd->state_idx].idle_time += delta; 249 else 250 genpd->on_time += delta; 251 252 genpd->accounting_time = now; 253 } 254 #else 255 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 256 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 257 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 258 #endif 259 260 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 261 unsigned int state) 262 { 263 struct generic_pm_domain_data *pd_data; 264 struct pm_domain_data *pdd; 265 struct gpd_link *link; 266 267 /* New requested state is same as Max requested state */ 268 if (state == genpd->performance_state) 269 return state; 270 271 /* New requested state is higher than Max requested state */ 272 if (state > genpd->performance_state) 273 return state; 274 275 /* Traverse all devices within the domain */ 276 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 277 pd_data = to_gpd_data(pdd); 278 279 if (pd_data->performance_state > state) 280 state = pd_data->performance_state; 281 } 282 283 /* 284 * Traverse all sub-domains within the domain. This can be 285 * done without any additional locking as the link->performance_state 286 * field is protected by the parent genpd->lock, which is already taken. 287 * 288 * Also note that link->performance_state (subdomain's performance state 289 * requirement to parent domain) is different from 290 * link->child->performance_state (current performance state requirement 291 * of the devices/sub-domains of the subdomain) and so can have a 292 * different value. 293 * 294 * Note that we also take vote from powered-off sub-domains into account 295 * as the same is done for devices right now. 296 */ 297 list_for_each_entry(link, &genpd->parent_links, parent_node) { 298 if (link->performance_state > state) 299 state = link->performance_state; 300 } 301 302 return state; 303 } 304 305 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 306 struct generic_pm_domain *parent, 307 unsigned int pstate) 308 { 309 if (!parent->set_performance_state) 310 return pstate; 311 312 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 313 parent->opp_table, 314 pstate); 315 } 316 317 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 318 unsigned int state, int depth) 319 { 320 struct generic_pm_domain *parent; 321 struct gpd_link *link; 322 int parent_state, ret; 323 324 if (state == genpd->performance_state) 325 return 0; 326 327 /* Propagate to parents of genpd */ 328 list_for_each_entry(link, &genpd->child_links, child_node) { 329 parent = link->parent; 330 331 /* Find parent's performance state */ 332 ret = genpd_xlate_performance_state(genpd, parent, state); 333 if (unlikely(ret < 0)) 334 goto err; 335 336 parent_state = ret; 337 338 genpd_lock_nested(parent, depth + 1); 339 340 link->prev_performance_state = link->performance_state; 341 link->performance_state = parent_state; 342 parent_state = _genpd_reeval_performance_state(parent, 343 parent_state); 344 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 345 if (ret) 346 link->performance_state = link->prev_performance_state; 347 348 genpd_unlock(parent); 349 350 if (ret) 351 goto err; 352 } 353 354 if (genpd->set_performance_state) { 355 ret = genpd->set_performance_state(genpd, state); 356 if (ret) 357 goto err; 358 } 359 360 genpd->performance_state = state; 361 return 0; 362 363 err: 364 /* Encountered an error, lets rollback */ 365 list_for_each_entry_continue_reverse(link, &genpd->child_links, 366 child_node) { 367 parent = link->parent; 368 369 genpd_lock_nested(parent, depth + 1); 370 371 parent_state = link->prev_performance_state; 372 link->performance_state = parent_state; 373 374 parent_state = _genpd_reeval_performance_state(parent, 375 parent_state); 376 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 377 pr_err("%s: Failed to roll back to %d performance state\n", 378 parent->name, parent_state); 379 } 380 381 genpd_unlock(parent); 382 } 383 384 return ret; 385 } 386 387 static int genpd_set_performance_state(struct device *dev, unsigned int state) 388 { 389 struct generic_pm_domain *genpd = dev_to_genpd(dev); 390 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 391 unsigned int prev_state; 392 int ret; 393 394 prev_state = gpd_data->performance_state; 395 if (prev_state == state) 396 return 0; 397 398 gpd_data->performance_state = state; 399 state = _genpd_reeval_performance_state(genpd, state); 400 401 ret = _genpd_set_performance_state(genpd, state, 0); 402 if (ret) 403 gpd_data->performance_state = prev_state; 404 405 return ret; 406 } 407 408 static int genpd_drop_performance_state(struct device *dev) 409 { 410 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 411 412 if (!genpd_set_performance_state(dev, 0)) 413 return prev_state; 414 415 return 0; 416 } 417 418 static void genpd_restore_performance_state(struct device *dev, 419 unsigned int state) 420 { 421 if (state) 422 genpd_set_performance_state(dev, state); 423 } 424 425 /** 426 * dev_pm_genpd_set_performance_state- Set performance state of device's power 427 * domain. 428 * 429 * @dev: Device for which the performance-state needs to be set. 430 * @state: Target performance state of the device. This can be set as 0 when the 431 * device doesn't have any performance state constraints left (And so 432 * the device wouldn't participate anymore to find the target 433 * performance state of the genpd). 434 * 435 * It is assumed that the users guarantee that the genpd wouldn't be detached 436 * while this routine is getting called. 437 * 438 * Returns 0 on success and negative error values on failures. 439 */ 440 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 441 { 442 struct generic_pm_domain *genpd; 443 int ret = 0; 444 445 genpd = dev_to_genpd_safe(dev); 446 if (!genpd) 447 return -ENODEV; 448 449 if (WARN_ON(!dev->power.subsys_data || 450 !dev->power.subsys_data->domain_data)) 451 return -EINVAL; 452 453 genpd_lock(genpd); 454 if (pm_runtime_suspended(dev)) { 455 dev_gpd_data(dev)->rpm_pstate = state; 456 } else { 457 ret = genpd_set_performance_state(dev, state); 458 if (!ret) 459 dev_gpd_data(dev)->rpm_pstate = 0; 460 } 461 genpd_unlock(genpd); 462 463 return ret; 464 } 465 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 466 467 /** 468 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 469 * 470 * @dev: Device to handle 471 * @next: impending interrupt/wakeup for the device 472 * 473 * 474 * Allow devices to inform of the next wakeup. It's assumed that the users 475 * guarantee that the genpd wouldn't be detached while this routine is getting 476 * called. Additionally, it's also assumed that @dev isn't runtime suspended 477 * (RPM_SUSPENDED)." 478 * Although devices are expected to update the next_wakeup after the end of 479 * their usecase as well, it is possible the devices themselves may not know 480 * about that, so stale @next will be ignored when powering off the domain. 481 */ 482 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 483 { 484 struct generic_pm_domain *genpd; 485 struct gpd_timing_data *td; 486 487 genpd = dev_to_genpd_safe(dev); 488 if (!genpd) 489 return; 490 491 td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 492 if (td) 493 td->next_wakeup = next; 494 } 495 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 496 497 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 498 { 499 unsigned int state_idx = genpd->state_idx; 500 ktime_t time_start; 501 s64 elapsed_ns; 502 int ret; 503 504 /* Notify consumers that we are about to power on. */ 505 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 506 GENPD_NOTIFY_PRE_ON, 507 GENPD_NOTIFY_OFF, NULL); 508 ret = notifier_to_errno(ret); 509 if (ret) 510 return ret; 511 512 if (!genpd->power_on) 513 goto out; 514 515 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 516 if (!timed) { 517 ret = genpd->power_on(genpd); 518 if (ret) 519 goto err; 520 521 goto out; 522 } 523 524 time_start = ktime_get(); 525 ret = genpd->power_on(genpd); 526 if (ret) 527 goto err; 528 529 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 530 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 531 goto out; 532 533 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 534 genpd->gd->max_off_time_changed = true; 535 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 536 genpd->name, "on", elapsed_ns); 537 538 out: 539 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 540 return 0; 541 err: 542 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 543 NULL); 544 return ret; 545 } 546 547 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 548 { 549 unsigned int state_idx = genpd->state_idx; 550 ktime_t time_start; 551 s64 elapsed_ns; 552 int ret; 553 554 /* Notify consumers that we are about to power off. */ 555 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 556 GENPD_NOTIFY_PRE_OFF, 557 GENPD_NOTIFY_ON, NULL); 558 ret = notifier_to_errno(ret); 559 if (ret) 560 return ret; 561 562 if (!genpd->power_off) 563 goto out; 564 565 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 566 if (!timed) { 567 ret = genpd->power_off(genpd); 568 if (ret) 569 goto busy; 570 571 goto out; 572 } 573 574 time_start = ktime_get(); 575 ret = genpd->power_off(genpd); 576 if (ret) 577 goto busy; 578 579 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 580 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 581 goto out; 582 583 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 584 genpd->gd->max_off_time_changed = true; 585 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 586 genpd->name, "off", elapsed_ns); 587 588 out: 589 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 590 NULL); 591 return 0; 592 busy: 593 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 594 return ret; 595 } 596 597 /** 598 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 599 * @genpd: PM domain to power off. 600 * 601 * Queue up the execution of genpd_power_off() unless it's already been done 602 * before. 603 */ 604 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 605 { 606 queue_work(pm_wq, &genpd->power_off_work); 607 } 608 609 /** 610 * genpd_power_off - Remove power from a given PM domain. 611 * @genpd: PM domain to power down. 612 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 613 * RPM status of the releated device is in an intermediate state, not yet turned 614 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 615 * be RPM_SUSPENDED, while it tries to power off the PM domain. 616 * @depth: nesting count for lockdep. 617 * 618 * If all of the @genpd's devices have been suspended and all of its subdomains 619 * have been powered down, remove power from @genpd. 620 */ 621 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 622 unsigned int depth) 623 { 624 struct pm_domain_data *pdd; 625 struct gpd_link *link; 626 unsigned int not_suspended = 0; 627 int ret; 628 629 /* 630 * Do not try to power off the domain in the following situations: 631 * (1) The domain is already in the "power off" state. 632 * (2) System suspend is in progress. 633 */ 634 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 635 return 0; 636 637 /* 638 * Abort power off for the PM domain in the following situations: 639 * (1) The domain is configured as always on. 640 * (2) When the domain has a subdomain being powered on. 641 */ 642 if (genpd_is_always_on(genpd) || 643 genpd_is_rpm_always_on(genpd) || 644 atomic_read(&genpd->sd_count) > 0) 645 return -EBUSY; 646 647 /* 648 * The children must be in their deepest (powered-off) states to allow 649 * the parent to be powered off. Note that, there's no need for 650 * additional locking, as powering on a child, requires the parent's 651 * lock to be acquired first. 652 */ 653 list_for_each_entry(link, &genpd->parent_links, parent_node) { 654 struct generic_pm_domain *child = link->child; 655 if (child->state_idx < child->state_count - 1) 656 return -EBUSY; 657 } 658 659 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 660 /* 661 * Do not allow PM domain to be powered off, when an IRQ safe 662 * device is part of a non-IRQ safe domain. 663 */ 664 if (!pm_runtime_suspended(pdd->dev) || 665 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 666 not_suspended++; 667 } 668 669 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 670 return -EBUSY; 671 672 if (genpd->gov && genpd->gov->power_down_ok) { 673 if (!genpd->gov->power_down_ok(&genpd->domain)) 674 return -EAGAIN; 675 } 676 677 /* Default to shallowest state. */ 678 if (!genpd->gov) 679 genpd->state_idx = 0; 680 681 /* Don't power off, if a child domain is waiting to power on. */ 682 if (atomic_read(&genpd->sd_count) > 0) 683 return -EBUSY; 684 685 ret = _genpd_power_off(genpd, true); 686 if (ret) { 687 genpd->states[genpd->state_idx].rejected++; 688 return ret; 689 } 690 691 genpd->status = GENPD_STATE_OFF; 692 genpd_update_accounting(genpd); 693 genpd->states[genpd->state_idx].usage++; 694 695 list_for_each_entry(link, &genpd->child_links, child_node) { 696 genpd_sd_counter_dec(link->parent); 697 genpd_lock_nested(link->parent, depth + 1); 698 genpd_power_off(link->parent, false, depth + 1); 699 genpd_unlock(link->parent); 700 } 701 702 return 0; 703 } 704 705 /** 706 * genpd_power_on - Restore power to a given PM domain and its parents. 707 * @genpd: PM domain to power up. 708 * @depth: nesting count for lockdep. 709 * 710 * Restore power to @genpd and all of its parents so that it is possible to 711 * resume a device belonging to it. 712 */ 713 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 714 { 715 struct gpd_link *link; 716 int ret = 0; 717 718 if (genpd_status_on(genpd)) 719 return 0; 720 721 /* 722 * The list is guaranteed not to change while the loop below is being 723 * executed, unless one of the parents' .power_on() callbacks fiddles 724 * with it. 725 */ 726 list_for_each_entry(link, &genpd->child_links, child_node) { 727 struct generic_pm_domain *parent = link->parent; 728 729 genpd_sd_counter_inc(parent); 730 731 genpd_lock_nested(parent, depth + 1); 732 ret = genpd_power_on(parent, depth + 1); 733 genpd_unlock(parent); 734 735 if (ret) { 736 genpd_sd_counter_dec(parent); 737 goto err; 738 } 739 } 740 741 ret = _genpd_power_on(genpd, true); 742 if (ret) 743 goto err; 744 745 genpd->status = GENPD_STATE_ON; 746 genpd_update_accounting(genpd); 747 748 return 0; 749 750 err: 751 list_for_each_entry_continue_reverse(link, 752 &genpd->child_links, 753 child_node) { 754 genpd_sd_counter_dec(link->parent); 755 genpd_lock_nested(link->parent, depth + 1); 756 genpd_power_off(link->parent, false, depth + 1); 757 genpd_unlock(link->parent); 758 } 759 760 return ret; 761 } 762 763 static int genpd_dev_pm_start(struct device *dev) 764 { 765 struct generic_pm_domain *genpd = dev_to_genpd(dev); 766 767 return genpd_start_dev(genpd, dev); 768 } 769 770 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 771 unsigned long val, void *ptr) 772 { 773 struct generic_pm_domain_data *gpd_data; 774 struct device *dev; 775 776 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 777 dev = gpd_data->base.dev; 778 779 for (;;) { 780 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 781 struct pm_domain_data *pdd; 782 struct gpd_timing_data *td; 783 784 spin_lock_irq(&dev->power.lock); 785 786 pdd = dev->power.subsys_data ? 787 dev->power.subsys_data->domain_data : NULL; 788 if (pdd) { 789 td = to_gpd_data(pdd)->td; 790 if (td) { 791 td->constraint_changed = true; 792 genpd = dev_to_genpd(dev); 793 } 794 } 795 796 spin_unlock_irq(&dev->power.lock); 797 798 if (!IS_ERR(genpd)) { 799 genpd_lock(genpd); 800 genpd->gd->max_off_time_changed = true; 801 genpd_unlock(genpd); 802 } 803 804 dev = dev->parent; 805 if (!dev || dev->power.ignore_children) 806 break; 807 } 808 809 return NOTIFY_DONE; 810 } 811 812 /** 813 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 814 * @work: Work structure used for scheduling the execution of this function. 815 */ 816 static void genpd_power_off_work_fn(struct work_struct *work) 817 { 818 struct generic_pm_domain *genpd; 819 820 genpd = container_of(work, struct generic_pm_domain, power_off_work); 821 822 genpd_lock(genpd); 823 genpd_power_off(genpd, false, 0); 824 genpd_unlock(genpd); 825 } 826 827 /** 828 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 829 * @dev: Device to handle. 830 */ 831 static int __genpd_runtime_suspend(struct device *dev) 832 { 833 int (*cb)(struct device *__dev); 834 835 if (dev->type && dev->type->pm) 836 cb = dev->type->pm->runtime_suspend; 837 else if (dev->class && dev->class->pm) 838 cb = dev->class->pm->runtime_suspend; 839 else if (dev->bus && dev->bus->pm) 840 cb = dev->bus->pm->runtime_suspend; 841 else 842 cb = NULL; 843 844 if (!cb && dev->driver && dev->driver->pm) 845 cb = dev->driver->pm->runtime_suspend; 846 847 return cb ? cb(dev) : 0; 848 } 849 850 /** 851 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 852 * @dev: Device to handle. 853 */ 854 static int __genpd_runtime_resume(struct device *dev) 855 { 856 int (*cb)(struct device *__dev); 857 858 if (dev->type && dev->type->pm) 859 cb = dev->type->pm->runtime_resume; 860 else if (dev->class && dev->class->pm) 861 cb = dev->class->pm->runtime_resume; 862 else if (dev->bus && dev->bus->pm) 863 cb = dev->bus->pm->runtime_resume; 864 else 865 cb = NULL; 866 867 if (!cb && dev->driver && dev->driver->pm) 868 cb = dev->driver->pm->runtime_resume; 869 870 return cb ? cb(dev) : 0; 871 } 872 873 /** 874 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 875 * @dev: Device to suspend. 876 * 877 * Carry out a runtime suspend of a device under the assumption that its 878 * pm_domain field points to the domain member of an object of type 879 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 880 */ 881 static int genpd_runtime_suspend(struct device *dev) 882 { 883 struct generic_pm_domain *genpd; 884 bool (*suspend_ok)(struct device *__dev); 885 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 886 struct gpd_timing_data *td = gpd_data->td; 887 bool runtime_pm = pm_runtime_enabled(dev); 888 ktime_t time_start = 0; 889 s64 elapsed_ns; 890 int ret; 891 892 dev_dbg(dev, "%s()\n", __func__); 893 894 genpd = dev_to_genpd(dev); 895 if (IS_ERR(genpd)) 896 return -EINVAL; 897 898 /* 899 * A runtime PM centric subsystem/driver may re-use the runtime PM 900 * callbacks for other purposes than runtime PM. In those scenarios 901 * runtime PM is disabled. Under these circumstances, we shall skip 902 * validating/measuring the PM QoS latency. 903 */ 904 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 905 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 906 return -EBUSY; 907 908 /* Measure suspend latency. */ 909 if (td && runtime_pm) 910 time_start = ktime_get(); 911 912 ret = __genpd_runtime_suspend(dev); 913 if (ret) 914 return ret; 915 916 ret = genpd_stop_dev(genpd, dev); 917 if (ret) { 918 __genpd_runtime_resume(dev); 919 return ret; 920 } 921 922 /* Update suspend latency value if the measured time exceeds it. */ 923 if (td && runtime_pm) { 924 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 925 if (elapsed_ns > td->suspend_latency_ns) { 926 td->suspend_latency_ns = elapsed_ns; 927 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 928 elapsed_ns); 929 genpd->gd->max_off_time_changed = true; 930 td->constraint_changed = true; 931 } 932 } 933 934 /* 935 * If power.irq_safe is set, this routine may be run with 936 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 937 */ 938 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 939 return 0; 940 941 genpd_lock(genpd); 942 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 943 genpd_power_off(genpd, true, 0); 944 genpd_unlock(genpd); 945 946 return 0; 947 } 948 949 /** 950 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 951 * @dev: Device to resume. 952 * 953 * Carry out a runtime resume of a device under the assumption that its 954 * pm_domain field points to the domain member of an object of type 955 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 956 */ 957 static int genpd_runtime_resume(struct device *dev) 958 { 959 struct generic_pm_domain *genpd; 960 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 961 struct gpd_timing_data *td = gpd_data->td; 962 bool timed = td && pm_runtime_enabled(dev); 963 ktime_t time_start = 0; 964 s64 elapsed_ns; 965 int ret; 966 967 dev_dbg(dev, "%s()\n", __func__); 968 969 genpd = dev_to_genpd(dev); 970 if (IS_ERR(genpd)) 971 return -EINVAL; 972 973 /* 974 * As we don't power off a non IRQ safe domain, which holds 975 * an IRQ safe device, we don't need to restore power to it. 976 */ 977 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 978 goto out; 979 980 genpd_lock(genpd); 981 ret = genpd_power_on(genpd, 0); 982 if (!ret) 983 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 984 genpd_unlock(genpd); 985 986 if (ret) 987 return ret; 988 989 out: 990 /* Measure resume latency. */ 991 if (timed) 992 time_start = ktime_get(); 993 994 ret = genpd_start_dev(genpd, dev); 995 if (ret) 996 goto err_poweroff; 997 998 ret = __genpd_runtime_resume(dev); 999 if (ret) 1000 goto err_stop; 1001 1002 /* Update resume latency value if the measured time exceeds it. */ 1003 if (timed) { 1004 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1005 if (elapsed_ns > td->resume_latency_ns) { 1006 td->resume_latency_ns = elapsed_ns; 1007 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1008 elapsed_ns); 1009 genpd->gd->max_off_time_changed = true; 1010 td->constraint_changed = true; 1011 } 1012 } 1013 1014 return 0; 1015 1016 err_stop: 1017 genpd_stop_dev(genpd, dev); 1018 err_poweroff: 1019 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1020 genpd_lock(genpd); 1021 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1022 genpd_power_off(genpd, true, 0); 1023 genpd_unlock(genpd); 1024 } 1025 1026 return ret; 1027 } 1028 1029 static bool pd_ignore_unused; 1030 static int __init pd_ignore_unused_setup(char *__unused) 1031 { 1032 pd_ignore_unused = true; 1033 return 1; 1034 } 1035 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1036 1037 /** 1038 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1039 */ 1040 static int __init genpd_power_off_unused(void) 1041 { 1042 struct generic_pm_domain *genpd; 1043 1044 if (pd_ignore_unused) { 1045 pr_warn("genpd: Not disabling unused power domains\n"); 1046 return 0; 1047 } 1048 1049 mutex_lock(&gpd_list_lock); 1050 1051 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1052 genpd_queue_power_off_work(genpd); 1053 1054 mutex_unlock(&gpd_list_lock); 1055 1056 return 0; 1057 } 1058 late_initcall(genpd_power_off_unused); 1059 1060 #ifdef CONFIG_PM_SLEEP 1061 1062 /** 1063 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1064 * @genpd: PM domain to power off, if possible. 1065 * @use_lock: use the lock. 1066 * @depth: nesting count for lockdep. 1067 * 1068 * Check if the given PM domain can be powered off (during system suspend or 1069 * hibernation) and do that if so. Also, in that case propagate to its parents. 1070 * 1071 * This function is only called in "noirq" and "syscore" stages of system power 1072 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1073 * these cases the lock must be held. 1074 */ 1075 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1076 unsigned int depth) 1077 { 1078 struct gpd_link *link; 1079 1080 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1081 return; 1082 1083 if (genpd->suspended_count != genpd->device_count 1084 || atomic_read(&genpd->sd_count) > 0) 1085 return; 1086 1087 /* Check that the children are in their deepest (powered-off) state. */ 1088 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1089 struct generic_pm_domain *child = link->child; 1090 if (child->state_idx < child->state_count - 1) 1091 return; 1092 } 1093 1094 /* Choose the deepest state when suspending */ 1095 genpd->state_idx = genpd->state_count - 1; 1096 if (_genpd_power_off(genpd, false)) 1097 return; 1098 1099 genpd->status = GENPD_STATE_OFF; 1100 1101 list_for_each_entry(link, &genpd->child_links, child_node) { 1102 genpd_sd_counter_dec(link->parent); 1103 1104 if (use_lock) 1105 genpd_lock_nested(link->parent, depth + 1); 1106 1107 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1108 1109 if (use_lock) 1110 genpd_unlock(link->parent); 1111 } 1112 } 1113 1114 /** 1115 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1116 * @genpd: PM domain to power on. 1117 * @use_lock: use the lock. 1118 * @depth: nesting count for lockdep. 1119 * 1120 * This function is only called in "noirq" and "syscore" stages of system power 1121 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1122 * these cases the lock must be held. 1123 */ 1124 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1125 unsigned int depth) 1126 { 1127 struct gpd_link *link; 1128 1129 if (genpd_status_on(genpd)) 1130 return; 1131 1132 list_for_each_entry(link, &genpd->child_links, child_node) { 1133 genpd_sd_counter_inc(link->parent); 1134 1135 if (use_lock) 1136 genpd_lock_nested(link->parent, depth + 1); 1137 1138 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1139 1140 if (use_lock) 1141 genpd_unlock(link->parent); 1142 } 1143 1144 _genpd_power_on(genpd, false); 1145 genpd->status = GENPD_STATE_ON; 1146 } 1147 1148 /** 1149 * genpd_prepare - Start power transition of a device in a PM domain. 1150 * @dev: Device to start the transition of. 1151 * 1152 * Start a power transition of a device (during a system-wide power transition) 1153 * under the assumption that its pm_domain field points to the domain member of 1154 * an object of type struct generic_pm_domain representing a PM domain 1155 * consisting of I/O devices. 1156 */ 1157 static int genpd_prepare(struct device *dev) 1158 { 1159 struct generic_pm_domain *genpd; 1160 int ret; 1161 1162 dev_dbg(dev, "%s()\n", __func__); 1163 1164 genpd = dev_to_genpd(dev); 1165 if (IS_ERR(genpd)) 1166 return -EINVAL; 1167 1168 genpd_lock(genpd); 1169 1170 if (genpd->prepared_count++ == 0) 1171 genpd->suspended_count = 0; 1172 1173 genpd_unlock(genpd); 1174 1175 ret = pm_generic_prepare(dev); 1176 if (ret < 0) { 1177 genpd_lock(genpd); 1178 1179 genpd->prepared_count--; 1180 1181 genpd_unlock(genpd); 1182 } 1183 1184 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1185 return ret >= 0 ? 0 : ret; 1186 } 1187 1188 /** 1189 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1190 * I/O pm domain. 1191 * @dev: Device to suspend. 1192 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. 1193 * 1194 * Stop the device and remove power from the domain if all devices in it have 1195 * been stopped. 1196 */ 1197 static int genpd_finish_suspend(struct device *dev, bool poweroff) 1198 { 1199 struct generic_pm_domain *genpd; 1200 int ret = 0; 1201 1202 genpd = dev_to_genpd(dev); 1203 if (IS_ERR(genpd)) 1204 return -EINVAL; 1205 1206 if (poweroff) 1207 ret = pm_generic_poweroff_noirq(dev); 1208 else 1209 ret = pm_generic_suspend_noirq(dev); 1210 if (ret) 1211 return ret; 1212 1213 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1214 return 0; 1215 1216 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1217 !pm_runtime_status_suspended(dev)) { 1218 ret = genpd_stop_dev(genpd, dev); 1219 if (ret) { 1220 if (poweroff) 1221 pm_generic_restore_noirq(dev); 1222 else 1223 pm_generic_resume_noirq(dev); 1224 return ret; 1225 } 1226 } 1227 1228 genpd_lock(genpd); 1229 genpd->suspended_count++; 1230 genpd_sync_power_off(genpd, true, 0); 1231 genpd_unlock(genpd); 1232 1233 return 0; 1234 } 1235 1236 /** 1237 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1238 * @dev: Device to suspend. 1239 * 1240 * Stop the device and remove power from the domain if all devices in it have 1241 * been stopped. 1242 */ 1243 static int genpd_suspend_noirq(struct device *dev) 1244 { 1245 dev_dbg(dev, "%s()\n", __func__); 1246 1247 return genpd_finish_suspend(dev, false); 1248 } 1249 1250 /** 1251 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1252 * @dev: Device to resume. 1253 * 1254 * Restore power to the device's PM domain, if necessary, and start the device. 1255 */ 1256 static int genpd_resume_noirq(struct device *dev) 1257 { 1258 struct generic_pm_domain *genpd; 1259 int ret; 1260 1261 dev_dbg(dev, "%s()\n", __func__); 1262 1263 genpd = dev_to_genpd(dev); 1264 if (IS_ERR(genpd)) 1265 return -EINVAL; 1266 1267 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1268 return pm_generic_resume_noirq(dev); 1269 1270 genpd_lock(genpd); 1271 genpd_sync_power_on(genpd, true, 0); 1272 genpd->suspended_count--; 1273 genpd_unlock(genpd); 1274 1275 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1276 !pm_runtime_status_suspended(dev)) { 1277 ret = genpd_start_dev(genpd, dev); 1278 if (ret) 1279 return ret; 1280 } 1281 1282 return pm_generic_resume_noirq(dev); 1283 } 1284 1285 /** 1286 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1287 * @dev: Device to freeze. 1288 * 1289 * Carry out a late freeze of a device under the assumption that its 1290 * pm_domain field points to the domain member of an object of type 1291 * struct generic_pm_domain representing a power domain consisting of I/O 1292 * devices. 1293 */ 1294 static int genpd_freeze_noirq(struct device *dev) 1295 { 1296 const struct generic_pm_domain *genpd; 1297 int ret = 0; 1298 1299 dev_dbg(dev, "%s()\n", __func__); 1300 1301 genpd = dev_to_genpd(dev); 1302 if (IS_ERR(genpd)) 1303 return -EINVAL; 1304 1305 ret = pm_generic_freeze_noirq(dev); 1306 if (ret) 1307 return ret; 1308 1309 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1310 !pm_runtime_status_suspended(dev)) 1311 ret = genpd_stop_dev(genpd, dev); 1312 1313 return ret; 1314 } 1315 1316 /** 1317 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1318 * @dev: Device to thaw. 1319 * 1320 * Start the device, unless power has been removed from the domain already 1321 * before the system transition. 1322 */ 1323 static int genpd_thaw_noirq(struct device *dev) 1324 { 1325 const struct generic_pm_domain *genpd; 1326 int ret = 0; 1327 1328 dev_dbg(dev, "%s()\n", __func__); 1329 1330 genpd = dev_to_genpd(dev); 1331 if (IS_ERR(genpd)) 1332 return -EINVAL; 1333 1334 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1335 !pm_runtime_status_suspended(dev)) { 1336 ret = genpd_start_dev(genpd, dev); 1337 if (ret) 1338 return ret; 1339 } 1340 1341 return pm_generic_thaw_noirq(dev); 1342 } 1343 1344 /** 1345 * genpd_poweroff_noirq - Completion of hibernation of device in an 1346 * I/O PM domain. 1347 * @dev: Device to poweroff. 1348 * 1349 * Stop the device and remove power from the domain if all devices in it have 1350 * been stopped. 1351 */ 1352 static int genpd_poweroff_noirq(struct device *dev) 1353 { 1354 dev_dbg(dev, "%s()\n", __func__); 1355 1356 return genpd_finish_suspend(dev, true); 1357 } 1358 1359 /** 1360 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1361 * @dev: Device to resume. 1362 * 1363 * Make sure the domain will be in the same power state as before the 1364 * hibernation the system is resuming from and start the device if necessary. 1365 */ 1366 static int genpd_restore_noirq(struct device *dev) 1367 { 1368 struct generic_pm_domain *genpd; 1369 int ret = 0; 1370 1371 dev_dbg(dev, "%s()\n", __func__); 1372 1373 genpd = dev_to_genpd(dev); 1374 if (IS_ERR(genpd)) 1375 return -EINVAL; 1376 1377 /* 1378 * At this point suspended_count == 0 means we are being run for the 1379 * first time for the given domain in the present cycle. 1380 */ 1381 genpd_lock(genpd); 1382 if (genpd->suspended_count++ == 0) { 1383 /* 1384 * The boot kernel might put the domain into arbitrary state, 1385 * so make it appear as powered off to genpd_sync_power_on(), 1386 * so that it tries to power it on in case it was really off. 1387 */ 1388 genpd->status = GENPD_STATE_OFF; 1389 } 1390 1391 genpd_sync_power_on(genpd, true, 0); 1392 genpd_unlock(genpd); 1393 1394 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1395 !pm_runtime_status_suspended(dev)) { 1396 ret = genpd_start_dev(genpd, dev); 1397 if (ret) 1398 return ret; 1399 } 1400 1401 return pm_generic_restore_noirq(dev); 1402 } 1403 1404 /** 1405 * genpd_complete - Complete power transition of a device in a power domain. 1406 * @dev: Device to complete the transition of. 1407 * 1408 * Complete a power transition of a device (during a system-wide power 1409 * transition) under the assumption that its pm_domain field points to the 1410 * domain member of an object of type struct generic_pm_domain representing 1411 * a power domain consisting of I/O devices. 1412 */ 1413 static void genpd_complete(struct device *dev) 1414 { 1415 struct generic_pm_domain *genpd; 1416 1417 dev_dbg(dev, "%s()\n", __func__); 1418 1419 genpd = dev_to_genpd(dev); 1420 if (IS_ERR(genpd)) 1421 return; 1422 1423 pm_generic_complete(dev); 1424 1425 genpd_lock(genpd); 1426 1427 genpd->prepared_count--; 1428 if (!genpd->prepared_count) 1429 genpd_queue_power_off_work(genpd); 1430 1431 genpd_unlock(genpd); 1432 } 1433 1434 static void genpd_switch_state(struct device *dev, bool suspend) 1435 { 1436 struct generic_pm_domain *genpd; 1437 bool use_lock; 1438 1439 genpd = dev_to_genpd_safe(dev); 1440 if (!genpd) 1441 return; 1442 1443 use_lock = genpd_is_irq_safe(genpd); 1444 1445 if (use_lock) 1446 genpd_lock(genpd); 1447 1448 if (suspend) { 1449 genpd->suspended_count++; 1450 genpd_sync_power_off(genpd, use_lock, 0); 1451 } else { 1452 genpd_sync_power_on(genpd, use_lock, 0); 1453 genpd->suspended_count--; 1454 } 1455 1456 if (use_lock) 1457 genpd_unlock(genpd); 1458 } 1459 1460 /** 1461 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1462 * @dev: The device that is attached to the genpd, that can be suspended. 1463 * 1464 * This routine should typically be called for a device that needs to be 1465 * suspended during the syscore suspend phase. It may also be called during 1466 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1467 * genpd. 1468 */ 1469 void dev_pm_genpd_suspend(struct device *dev) 1470 { 1471 genpd_switch_state(dev, true); 1472 } 1473 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1474 1475 /** 1476 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1477 * @dev: The device that is attached to the genpd, which needs to be resumed. 1478 * 1479 * This routine should typically be called for a device that needs to be resumed 1480 * during the syscore resume phase. It may also be called during suspend-to-idle 1481 * to resume a corresponding CPU device that is attached to a genpd. 1482 */ 1483 void dev_pm_genpd_resume(struct device *dev) 1484 { 1485 genpd_switch_state(dev, false); 1486 } 1487 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1488 1489 #else /* !CONFIG_PM_SLEEP */ 1490 1491 #define genpd_prepare NULL 1492 #define genpd_suspend_noirq NULL 1493 #define genpd_resume_noirq NULL 1494 #define genpd_freeze_noirq NULL 1495 #define genpd_thaw_noirq NULL 1496 #define genpd_poweroff_noirq NULL 1497 #define genpd_restore_noirq NULL 1498 #define genpd_complete NULL 1499 1500 #endif /* CONFIG_PM_SLEEP */ 1501 1502 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1503 bool has_governor) 1504 { 1505 struct generic_pm_domain_data *gpd_data; 1506 struct gpd_timing_data *td; 1507 int ret; 1508 1509 ret = dev_pm_get_subsys_data(dev); 1510 if (ret) 1511 return ERR_PTR(ret); 1512 1513 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1514 if (!gpd_data) { 1515 ret = -ENOMEM; 1516 goto err_put; 1517 } 1518 1519 gpd_data->base.dev = dev; 1520 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1521 1522 /* Allocate data used by a governor. */ 1523 if (has_governor) { 1524 td = kzalloc(sizeof(*td), GFP_KERNEL); 1525 if (!td) { 1526 ret = -ENOMEM; 1527 goto err_free; 1528 } 1529 1530 td->constraint_changed = true; 1531 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1532 td->next_wakeup = KTIME_MAX; 1533 gpd_data->td = td; 1534 } 1535 1536 spin_lock_irq(&dev->power.lock); 1537 1538 if (dev->power.subsys_data->domain_data) 1539 ret = -EINVAL; 1540 else 1541 dev->power.subsys_data->domain_data = &gpd_data->base; 1542 1543 spin_unlock_irq(&dev->power.lock); 1544 1545 if (ret) 1546 goto err_free; 1547 1548 return gpd_data; 1549 1550 err_free: 1551 kfree(gpd_data->td); 1552 kfree(gpd_data); 1553 err_put: 1554 dev_pm_put_subsys_data(dev); 1555 return ERR_PTR(ret); 1556 } 1557 1558 static void genpd_free_dev_data(struct device *dev, 1559 struct generic_pm_domain_data *gpd_data) 1560 { 1561 spin_lock_irq(&dev->power.lock); 1562 1563 dev->power.subsys_data->domain_data = NULL; 1564 1565 spin_unlock_irq(&dev->power.lock); 1566 1567 kfree(gpd_data->td); 1568 kfree(gpd_data); 1569 dev_pm_put_subsys_data(dev); 1570 } 1571 1572 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1573 int cpu, bool set, unsigned int depth) 1574 { 1575 struct gpd_link *link; 1576 1577 if (!genpd_is_cpu_domain(genpd)) 1578 return; 1579 1580 list_for_each_entry(link, &genpd->child_links, child_node) { 1581 struct generic_pm_domain *parent = link->parent; 1582 1583 genpd_lock_nested(parent, depth + 1); 1584 genpd_update_cpumask(parent, cpu, set, depth + 1); 1585 genpd_unlock(parent); 1586 } 1587 1588 if (set) 1589 cpumask_set_cpu(cpu, genpd->cpus); 1590 else 1591 cpumask_clear_cpu(cpu, genpd->cpus); 1592 } 1593 1594 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1595 { 1596 if (cpu >= 0) 1597 genpd_update_cpumask(genpd, cpu, true, 0); 1598 } 1599 1600 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1601 { 1602 if (cpu >= 0) 1603 genpd_update_cpumask(genpd, cpu, false, 0); 1604 } 1605 1606 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1607 { 1608 int cpu; 1609 1610 if (!genpd_is_cpu_domain(genpd)) 1611 return -1; 1612 1613 for_each_possible_cpu(cpu) { 1614 if (get_cpu_device(cpu) == dev) 1615 return cpu; 1616 } 1617 1618 return -1; 1619 } 1620 1621 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1622 struct device *base_dev) 1623 { 1624 struct genpd_governor_data *gd = genpd->gd; 1625 struct generic_pm_domain_data *gpd_data; 1626 int ret; 1627 1628 dev_dbg(dev, "%s()\n", __func__); 1629 1630 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1631 return -EINVAL; 1632 1633 gpd_data = genpd_alloc_dev_data(dev, gd); 1634 if (IS_ERR(gpd_data)) 1635 return PTR_ERR(gpd_data); 1636 1637 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1638 1639 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1640 if (ret) 1641 goto out; 1642 1643 genpd_lock(genpd); 1644 1645 genpd_set_cpumask(genpd, gpd_data->cpu); 1646 dev_pm_domain_set(dev, &genpd->domain); 1647 1648 genpd->device_count++; 1649 if (gd) 1650 gd->max_off_time_changed = true; 1651 1652 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1653 1654 genpd_unlock(genpd); 1655 out: 1656 if (ret) 1657 genpd_free_dev_data(dev, gpd_data); 1658 else 1659 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1660 DEV_PM_QOS_RESUME_LATENCY); 1661 1662 return ret; 1663 } 1664 1665 /** 1666 * pm_genpd_add_device - Add a device to an I/O PM domain. 1667 * @genpd: PM domain to add the device to. 1668 * @dev: Device to be added. 1669 */ 1670 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1671 { 1672 int ret; 1673 1674 mutex_lock(&gpd_list_lock); 1675 ret = genpd_add_device(genpd, dev, dev); 1676 mutex_unlock(&gpd_list_lock); 1677 1678 return ret; 1679 } 1680 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1681 1682 static int genpd_remove_device(struct generic_pm_domain *genpd, 1683 struct device *dev) 1684 { 1685 struct generic_pm_domain_data *gpd_data; 1686 struct pm_domain_data *pdd; 1687 int ret = 0; 1688 1689 dev_dbg(dev, "%s()\n", __func__); 1690 1691 pdd = dev->power.subsys_data->domain_data; 1692 gpd_data = to_gpd_data(pdd); 1693 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1694 DEV_PM_QOS_RESUME_LATENCY); 1695 1696 genpd_lock(genpd); 1697 1698 if (genpd->prepared_count > 0) { 1699 ret = -EAGAIN; 1700 goto out; 1701 } 1702 1703 genpd->device_count--; 1704 if (genpd->gd) 1705 genpd->gd->max_off_time_changed = true; 1706 1707 genpd_clear_cpumask(genpd, gpd_data->cpu); 1708 dev_pm_domain_set(dev, NULL); 1709 1710 list_del_init(&pdd->list_node); 1711 1712 genpd_unlock(genpd); 1713 1714 if (genpd->detach_dev) 1715 genpd->detach_dev(genpd, dev); 1716 1717 genpd_free_dev_data(dev, gpd_data); 1718 1719 return 0; 1720 1721 out: 1722 genpd_unlock(genpd); 1723 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1724 1725 return ret; 1726 } 1727 1728 /** 1729 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1730 * @dev: Device to be removed. 1731 */ 1732 int pm_genpd_remove_device(struct device *dev) 1733 { 1734 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1735 1736 if (!genpd) 1737 return -EINVAL; 1738 1739 return genpd_remove_device(genpd, dev); 1740 } 1741 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1742 1743 /** 1744 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1745 * 1746 * @dev: Device that should be associated with the notifier 1747 * @nb: The notifier block to register 1748 * 1749 * Users may call this function to add a genpd power on/off notifier for an 1750 * attached @dev. Only one notifier per device is allowed. The notifier is 1751 * sent when genpd is powering on/off the PM domain. 1752 * 1753 * It is assumed that the user guarantee that the genpd wouldn't be detached 1754 * while this routine is getting called. 1755 * 1756 * Returns 0 on success and negative error values on failures. 1757 */ 1758 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1759 { 1760 struct generic_pm_domain *genpd; 1761 struct generic_pm_domain_data *gpd_data; 1762 int ret; 1763 1764 genpd = dev_to_genpd_safe(dev); 1765 if (!genpd) 1766 return -ENODEV; 1767 1768 if (WARN_ON(!dev->power.subsys_data || 1769 !dev->power.subsys_data->domain_data)) 1770 return -EINVAL; 1771 1772 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1773 if (gpd_data->power_nb) 1774 return -EEXIST; 1775 1776 genpd_lock(genpd); 1777 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1778 genpd_unlock(genpd); 1779 1780 if (ret) { 1781 dev_warn(dev, "failed to add notifier for PM domain %s\n", 1782 genpd->name); 1783 return ret; 1784 } 1785 1786 gpd_data->power_nb = nb; 1787 return 0; 1788 } 1789 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1790 1791 /** 1792 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1793 * 1794 * @dev: Device that is associated with the notifier 1795 * 1796 * Users may call this function to remove a genpd power on/off notifier for an 1797 * attached @dev. 1798 * 1799 * It is assumed that the user guarantee that the genpd wouldn't be detached 1800 * while this routine is getting called. 1801 * 1802 * Returns 0 on success and negative error values on failures. 1803 */ 1804 int dev_pm_genpd_remove_notifier(struct device *dev) 1805 { 1806 struct generic_pm_domain *genpd; 1807 struct generic_pm_domain_data *gpd_data; 1808 int ret; 1809 1810 genpd = dev_to_genpd_safe(dev); 1811 if (!genpd) 1812 return -ENODEV; 1813 1814 if (WARN_ON(!dev->power.subsys_data || 1815 !dev->power.subsys_data->domain_data)) 1816 return -EINVAL; 1817 1818 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1819 if (!gpd_data->power_nb) 1820 return -ENODEV; 1821 1822 genpd_lock(genpd); 1823 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1824 gpd_data->power_nb); 1825 genpd_unlock(genpd); 1826 1827 if (ret) { 1828 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1829 genpd->name); 1830 return ret; 1831 } 1832 1833 gpd_data->power_nb = NULL; 1834 return 0; 1835 } 1836 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1837 1838 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1839 struct generic_pm_domain *subdomain) 1840 { 1841 struct gpd_link *link, *itr; 1842 int ret = 0; 1843 1844 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1845 || genpd == subdomain) 1846 return -EINVAL; 1847 1848 /* 1849 * If the domain can be powered on/off in an IRQ safe 1850 * context, ensure that the subdomain can also be 1851 * powered on/off in that context. 1852 */ 1853 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1854 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1855 genpd->name, subdomain->name); 1856 return -EINVAL; 1857 } 1858 1859 link = kzalloc(sizeof(*link), GFP_KERNEL); 1860 if (!link) 1861 return -ENOMEM; 1862 1863 genpd_lock(subdomain); 1864 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1865 1866 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1867 ret = -EINVAL; 1868 goto out; 1869 } 1870 1871 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1872 if (itr->child == subdomain && itr->parent == genpd) { 1873 ret = -EINVAL; 1874 goto out; 1875 } 1876 } 1877 1878 link->parent = genpd; 1879 list_add_tail(&link->parent_node, &genpd->parent_links); 1880 link->child = subdomain; 1881 list_add_tail(&link->child_node, &subdomain->child_links); 1882 if (genpd_status_on(subdomain)) 1883 genpd_sd_counter_inc(genpd); 1884 1885 out: 1886 genpd_unlock(genpd); 1887 genpd_unlock(subdomain); 1888 if (ret) 1889 kfree(link); 1890 return ret; 1891 } 1892 1893 /** 1894 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1895 * @genpd: Leader PM domain to add the subdomain to. 1896 * @subdomain: Subdomain to be added. 1897 */ 1898 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1899 struct generic_pm_domain *subdomain) 1900 { 1901 int ret; 1902 1903 mutex_lock(&gpd_list_lock); 1904 ret = genpd_add_subdomain(genpd, subdomain); 1905 mutex_unlock(&gpd_list_lock); 1906 1907 return ret; 1908 } 1909 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1910 1911 /** 1912 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1913 * @genpd: Leader PM domain to remove the subdomain from. 1914 * @subdomain: Subdomain to be removed. 1915 */ 1916 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1917 struct generic_pm_domain *subdomain) 1918 { 1919 struct gpd_link *l, *link; 1920 int ret = -EINVAL; 1921 1922 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1923 return -EINVAL; 1924 1925 genpd_lock(subdomain); 1926 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1927 1928 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 1929 pr_warn("%s: unable to remove subdomain %s\n", 1930 genpd->name, subdomain->name); 1931 ret = -EBUSY; 1932 goto out; 1933 } 1934 1935 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 1936 if (link->child != subdomain) 1937 continue; 1938 1939 list_del(&link->parent_node); 1940 list_del(&link->child_node); 1941 kfree(link); 1942 if (genpd_status_on(subdomain)) 1943 genpd_sd_counter_dec(genpd); 1944 1945 ret = 0; 1946 break; 1947 } 1948 1949 out: 1950 genpd_unlock(genpd); 1951 genpd_unlock(subdomain); 1952 1953 return ret; 1954 } 1955 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1956 1957 static void genpd_free_default_power_state(struct genpd_power_state *states, 1958 unsigned int state_count) 1959 { 1960 kfree(states); 1961 } 1962 1963 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1964 { 1965 struct genpd_power_state *state; 1966 1967 state = kzalloc(sizeof(*state), GFP_KERNEL); 1968 if (!state) 1969 return -ENOMEM; 1970 1971 genpd->states = state; 1972 genpd->state_count = 1; 1973 genpd->free_states = genpd_free_default_power_state; 1974 1975 return 0; 1976 } 1977 1978 static int genpd_alloc_data(struct generic_pm_domain *genpd) 1979 { 1980 struct genpd_governor_data *gd = NULL; 1981 int ret; 1982 1983 if (genpd_is_cpu_domain(genpd) && 1984 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 1985 return -ENOMEM; 1986 1987 if (genpd->gov) { 1988 gd = kzalloc(sizeof(*gd), GFP_KERNEL); 1989 if (!gd) { 1990 ret = -ENOMEM; 1991 goto free; 1992 } 1993 1994 gd->max_off_time_ns = -1; 1995 gd->max_off_time_changed = true; 1996 gd->next_wakeup = KTIME_MAX; 1997 } 1998 1999 /* Use only one "off" state if there were no states declared */ 2000 if (genpd->state_count == 0) { 2001 ret = genpd_set_default_power_state(genpd); 2002 if (ret) 2003 goto free; 2004 } 2005 2006 genpd->gd = gd; 2007 return 0; 2008 2009 free: 2010 if (genpd_is_cpu_domain(genpd)) 2011 free_cpumask_var(genpd->cpus); 2012 kfree(gd); 2013 return ret; 2014 } 2015 2016 static void genpd_free_data(struct generic_pm_domain *genpd) 2017 { 2018 if (genpd_is_cpu_domain(genpd)) 2019 free_cpumask_var(genpd->cpus); 2020 if (genpd->free_states) 2021 genpd->free_states(genpd->states, genpd->state_count); 2022 kfree(genpd->gd); 2023 } 2024 2025 static void genpd_lock_init(struct generic_pm_domain *genpd) 2026 { 2027 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { 2028 spin_lock_init(&genpd->slock); 2029 genpd->lock_ops = &genpd_spin_ops; 2030 } else { 2031 mutex_init(&genpd->mlock); 2032 genpd->lock_ops = &genpd_mtx_ops; 2033 } 2034 } 2035 2036 /** 2037 * pm_genpd_init - Initialize a generic I/O PM domain object. 2038 * @genpd: PM domain object to initialize. 2039 * @gov: PM domain governor to associate with the domain (may be NULL). 2040 * @is_off: Initial value of the domain's power_is_off field. 2041 * 2042 * Returns 0 on successful initialization, else a negative error code. 2043 */ 2044 int pm_genpd_init(struct generic_pm_domain *genpd, 2045 struct dev_power_governor *gov, bool is_off) 2046 { 2047 int ret; 2048 2049 if (IS_ERR_OR_NULL(genpd)) 2050 return -EINVAL; 2051 2052 INIT_LIST_HEAD(&genpd->parent_links); 2053 INIT_LIST_HEAD(&genpd->child_links); 2054 INIT_LIST_HEAD(&genpd->dev_list); 2055 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2056 genpd_lock_init(genpd); 2057 genpd->gov = gov; 2058 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2059 atomic_set(&genpd->sd_count, 0); 2060 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2061 genpd->device_count = 0; 2062 genpd->provider = NULL; 2063 genpd->has_provider = false; 2064 genpd->accounting_time = ktime_get_mono_fast_ns(); 2065 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2066 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2067 genpd->domain.ops.prepare = genpd_prepare; 2068 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2069 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2070 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2071 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2072 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2073 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2074 genpd->domain.ops.complete = genpd_complete; 2075 genpd->domain.start = genpd_dev_pm_start; 2076 2077 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2078 genpd->dev_ops.stop = pm_clk_suspend; 2079 genpd->dev_ops.start = pm_clk_resume; 2080 } 2081 2082 /* The always-on governor works better with the corresponding flag. */ 2083 if (gov == &pm_domain_always_on_gov) 2084 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2085 2086 /* Always-on domains must be powered on at initialization. */ 2087 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2088 !genpd_status_on(genpd)) 2089 return -EINVAL; 2090 2091 /* Multiple states but no governor doesn't make sense. */ 2092 if (!gov && genpd->state_count > 1) 2093 pr_warn("%s: no governor for states\n", genpd->name); 2094 2095 ret = genpd_alloc_data(genpd); 2096 if (ret) 2097 return ret; 2098 2099 device_initialize(&genpd->dev); 2100 dev_set_name(&genpd->dev, "%s", genpd->name); 2101 2102 mutex_lock(&gpd_list_lock); 2103 list_add(&genpd->gpd_list_node, &gpd_list); 2104 mutex_unlock(&gpd_list_lock); 2105 genpd_debug_add(genpd); 2106 2107 return 0; 2108 } 2109 EXPORT_SYMBOL_GPL(pm_genpd_init); 2110 2111 static int genpd_remove(struct generic_pm_domain *genpd) 2112 { 2113 struct gpd_link *l, *link; 2114 2115 if (IS_ERR_OR_NULL(genpd)) 2116 return -EINVAL; 2117 2118 genpd_lock(genpd); 2119 2120 if (genpd->has_provider) { 2121 genpd_unlock(genpd); 2122 pr_err("Provider present, unable to remove %s\n", genpd->name); 2123 return -EBUSY; 2124 } 2125 2126 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2127 genpd_unlock(genpd); 2128 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2129 return -EBUSY; 2130 } 2131 2132 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2133 list_del(&link->parent_node); 2134 list_del(&link->child_node); 2135 kfree(link); 2136 } 2137 2138 list_del(&genpd->gpd_list_node); 2139 genpd_unlock(genpd); 2140 genpd_debug_remove(genpd); 2141 cancel_work_sync(&genpd->power_off_work); 2142 genpd_free_data(genpd); 2143 2144 pr_debug("%s: removed %s\n", __func__, genpd->name); 2145 2146 return 0; 2147 } 2148 2149 /** 2150 * pm_genpd_remove - Remove a generic I/O PM domain 2151 * @genpd: Pointer to PM domain that is to be removed. 2152 * 2153 * To remove the PM domain, this function: 2154 * - Removes the PM domain as a subdomain to any parent domains, 2155 * if it was added. 2156 * - Removes the PM domain from the list of registered PM domains. 2157 * 2158 * The PM domain will only be removed, if the associated provider has 2159 * been removed, it is not a parent to any other PM domain and has no 2160 * devices associated with it. 2161 */ 2162 int pm_genpd_remove(struct generic_pm_domain *genpd) 2163 { 2164 int ret; 2165 2166 mutex_lock(&gpd_list_lock); 2167 ret = genpd_remove(genpd); 2168 mutex_unlock(&gpd_list_lock); 2169 2170 return ret; 2171 } 2172 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2173 2174 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2175 2176 /* 2177 * Device Tree based PM domain providers. 2178 * 2179 * The code below implements generic device tree based PM domain providers that 2180 * bind device tree nodes with generic PM domains registered in the system. 2181 * 2182 * Any driver that registers generic PM domains and needs to support binding of 2183 * devices to these domains is supposed to register a PM domain provider, which 2184 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2185 * 2186 * Two simple mapping functions have been provided for convenience: 2187 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2188 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2189 * index. 2190 */ 2191 2192 /** 2193 * struct of_genpd_provider - PM domain provider registration structure 2194 * @link: Entry in global list of PM domain providers 2195 * @node: Pointer to device tree node of PM domain provider 2196 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2197 * into a PM domain. 2198 * @data: context pointer to be passed into @xlate callback 2199 */ 2200 struct of_genpd_provider { 2201 struct list_head link; 2202 struct device_node *node; 2203 genpd_xlate_t xlate; 2204 void *data; 2205 }; 2206 2207 /* List of registered PM domain providers. */ 2208 static LIST_HEAD(of_genpd_providers); 2209 /* Mutex to protect the list above. */ 2210 static DEFINE_MUTEX(of_genpd_mutex); 2211 2212 /** 2213 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2214 * @genpdspec: OF phandle args to map into a PM domain 2215 * @data: xlate function private data - pointer to struct generic_pm_domain 2216 * 2217 * This is a generic xlate function that can be used to model PM domains that 2218 * have their own device tree nodes. The private data of xlate function needs 2219 * to be a valid pointer to struct generic_pm_domain. 2220 */ 2221 static struct generic_pm_domain *genpd_xlate_simple( 2222 struct of_phandle_args *genpdspec, 2223 void *data) 2224 { 2225 return data; 2226 } 2227 2228 /** 2229 * genpd_xlate_onecell() - Xlate function using a single index. 2230 * @genpdspec: OF phandle args to map into a PM domain 2231 * @data: xlate function private data - pointer to struct genpd_onecell_data 2232 * 2233 * This is a generic xlate function that can be used to model simple PM domain 2234 * controllers that have one device tree node and provide multiple PM domains. 2235 * A single cell is used as an index into an array of PM domains specified in 2236 * the genpd_onecell_data struct when registering the provider. 2237 */ 2238 static struct generic_pm_domain *genpd_xlate_onecell( 2239 struct of_phandle_args *genpdspec, 2240 void *data) 2241 { 2242 struct genpd_onecell_data *genpd_data = data; 2243 unsigned int idx = genpdspec->args[0]; 2244 2245 if (genpdspec->args_count != 1) 2246 return ERR_PTR(-EINVAL); 2247 2248 if (idx >= genpd_data->num_domains) { 2249 pr_err("%s: invalid domain index %u\n", __func__, idx); 2250 return ERR_PTR(-EINVAL); 2251 } 2252 2253 if (!genpd_data->domains[idx]) 2254 return ERR_PTR(-ENOENT); 2255 2256 return genpd_data->domains[idx]; 2257 } 2258 2259 /** 2260 * genpd_add_provider() - Register a PM domain provider for a node 2261 * @np: Device node pointer associated with the PM domain provider. 2262 * @xlate: Callback for decoding PM domain from phandle arguments. 2263 * @data: Context pointer for @xlate callback. 2264 */ 2265 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2266 void *data) 2267 { 2268 struct of_genpd_provider *cp; 2269 2270 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2271 if (!cp) 2272 return -ENOMEM; 2273 2274 cp->node = of_node_get(np); 2275 cp->data = data; 2276 cp->xlate = xlate; 2277 fwnode_dev_initialized(&np->fwnode, true); 2278 2279 mutex_lock(&of_genpd_mutex); 2280 list_add(&cp->link, &of_genpd_providers); 2281 mutex_unlock(&of_genpd_mutex); 2282 pr_debug("Added domain provider from %pOF\n", np); 2283 2284 return 0; 2285 } 2286 2287 static bool genpd_present(const struct generic_pm_domain *genpd) 2288 { 2289 bool ret = false; 2290 const struct generic_pm_domain *gpd; 2291 2292 mutex_lock(&gpd_list_lock); 2293 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2294 if (gpd == genpd) { 2295 ret = true; 2296 break; 2297 } 2298 } 2299 mutex_unlock(&gpd_list_lock); 2300 2301 return ret; 2302 } 2303 2304 /** 2305 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2306 * @np: Device node pointer associated with the PM domain provider. 2307 * @genpd: Pointer to PM domain associated with the PM domain provider. 2308 */ 2309 int of_genpd_add_provider_simple(struct device_node *np, 2310 struct generic_pm_domain *genpd) 2311 { 2312 int ret; 2313 2314 if (!np || !genpd) 2315 return -EINVAL; 2316 2317 if (!genpd_present(genpd)) 2318 return -EINVAL; 2319 2320 genpd->dev.of_node = np; 2321 2322 /* Parse genpd OPP table */ 2323 if (genpd->set_performance_state) { 2324 ret = dev_pm_opp_of_add_table(&genpd->dev); 2325 if (ret) 2326 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2327 2328 /* 2329 * Save table for faster processing while setting performance 2330 * state. 2331 */ 2332 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2333 WARN_ON(IS_ERR(genpd->opp_table)); 2334 } 2335 2336 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2337 if (ret) { 2338 if (genpd->set_performance_state) { 2339 dev_pm_opp_put_opp_table(genpd->opp_table); 2340 dev_pm_opp_of_remove_table(&genpd->dev); 2341 } 2342 2343 return ret; 2344 } 2345 2346 genpd->provider = &np->fwnode; 2347 genpd->has_provider = true; 2348 2349 return 0; 2350 } 2351 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2352 2353 /** 2354 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2355 * @np: Device node pointer associated with the PM domain provider. 2356 * @data: Pointer to the data associated with the PM domain provider. 2357 */ 2358 int of_genpd_add_provider_onecell(struct device_node *np, 2359 struct genpd_onecell_data *data) 2360 { 2361 struct generic_pm_domain *genpd; 2362 unsigned int i; 2363 int ret = -EINVAL; 2364 2365 if (!np || !data) 2366 return -EINVAL; 2367 2368 if (!data->xlate) 2369 data->xlate = genpd_xlate_onecell; 2370 2371 for (i = 0; i < data->num_domains; i++) { 2372 genpd = data->domains[i]; 2373 2374 if (!genpd) 2375 continue; 2376 if (!genpd_present(genpd)) 2377 goto error; 2378 2379 genpd->dev.of_node = np; 2380 2381 /* Parse genpd OPP table */ 2382 if (genpd->set_performance_state) { 2383 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2384 if (ret) { 2385 dev_err_probe(&genpd->dev, ret, 2386 "Failed to add OPP table for index %d\n", i); 2387 goto error; 2388 } 2389 2390 /* 2391 * Save table for faster processing while setting 2392 * performance state. 2393 */ 2394 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2395 WARN_ON(IS_ERR(genpd->opp_table)); 2396 } 2397 2398 genpd->provider = &np->fwnode; 2399 genpd->has_provider = true; 2400 } 2401 2402 ret = genpd_add_provider(np, data->xlate, data); 2403 if (ret < 0) 2404 goto error; 2405 2406 return 0; 2407 2408 error: 2409 while (i--) { 2410 genpd = data->domains[i]; 2411 2412 if (!genpd) 2413 continue; 2414 2415 genpd->provider = NULL; 2416 genpd->has_provider = false; 2417 2418 if (genpd->set_performance_state) { 2419 dev_pm_opp_put_opp_table(genpd->opp_table); 2420 dev_pm_opp_of_remove_table(&genpd->dev); 2421 } 2422 } 2423 2424 return ret; 2425 } 2426 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2427 2428 /** 2429 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2430 * @np: Device node pointer associated with the PM domain provider 2431 */ 2432 void of_genpd_del_provider(struct device_node *np) 2433 { 2434 struct of_genpd_provider *cp, *tmp; 2435 struct generic_pm_domain *gpd; 2436 2437 mutex_lock(&gpd_list_lock); 2438 mutex_lock(&of_genpd_mutex); 2439 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2440 if (cp->node == np) { 2441 /* 2442 * For each PM domain associated with the 2443 * provider, set the 'has_provider' to false 2444 * so that the PM domain can be safely removed. 2445 */ 2446 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2447 if (gpd->provider == &np->fwnode) { 2448 gpd->has_provider = false; 2449 2450 if (!gpd->set_performance_state) 2451 continue; 2452 2453 dev_pm_opp_put_opp_table(gpd->opp_table); 2454 dev_pm_opp_of_remove_table(&gpd->dev); 2455 } 2456 } 2457 2458 fwnode_dev_initialized(&cp->node->fwnode, false); 2459 list_del(&cp->link); 2460 of_node_put(cp->node); 2461 kfree(cp); 2462 break; 2463 } 2464 } 2465 mutex_unlock(&of_genpd_mutex); 2466 mutex_unlock(&gpd_list_lock); 2467 } 2468 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2469 2470 /** 2471 * genpd_get_from_provider() - Look-up PM domain 2472 * @genpdspec: OF phandle args to use for look-up 2473 * 2474 * Looks for a PM domain provider under the node specified by @genpdspec and if 2475 * found, uses xlate function of the provider to map phandle args to a PM 2476 * domain. 2477 * 2478 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2479 * on failure. 2480 */ 2481 static struct generic_pm_domain *genpd_get_from_provider( 2482 struct of_phandle_args *genpdspec) 2483 { 2484 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2485 struct of_genpd_provider *provider; 2486 2487 if (!genpdspec) 2488 return ERR_PTR(-EINVAL); 2489 2490 mutex_lock(&of_genpd_mutex); 2491 2492 /* Check if we have such a provider in our array */ 2493 list_for_each_entry(provider, &of_genpd_providers, link) { 2494 if (provider->node == genpdspec->np) 2495 genpd = provider->xlate(genpdspec, provider->data); 2496 if (!IS_ERR(genpd)) 2497 break; 2498 } 2499 2500 mutex_unlock(&of_genpd_mutex); 2501 2502 return genpd; 2503 } 2504 2505 /** 2506 * of_genpd_add_device() - Add a device to an I/O PM domain 2507 * @genpdspec: OF phandle args to use for look-up PM domain 2508 * @dev: Device to be added. 2509 * 2510 * Looks-up an I/O PM domain based upon phandle args provided and adds 2511 * the device to the PM domain. Returns a negative error code on failure. 2512 */ 2513 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2514 { 2515 struct generic_pm_domain *genpd; 2516 int ret; 2517 2518 mutex_lock(&gpd_list_lock); 2519 2520 genpd = genpd_get_from_provider(genpdspec); 2521 if (IS_ERR(genpd)) { 2522 ret = PTR_ERR(genpd); 2523 goto out; 2524 } 2525 2526 ret = genpd_add_device(genpd, dev, dev); 2527 2528 out: 2529 mutex_unlock(&gpd_list_lock); 2530 2531 return ret; 2532 } 2533 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2534 2535 /** 2536 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2537 * @parent_spec: OF phandle args to use for parent PM domain look-up 2538 * @subdomain_spec: OF phandle args to use for subdomain look-up 2539 * 2540 * Looks-up a parent PM domain and subdomain based upon phandle args 2541 * provided and adds the subdomain to the parent PM domain. Returns a 2542 * negative error code on failure. 2543 */ 2544 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2545 struct of_phandle_args *subdomain_spec) 2546 { 2547 struct generic_pm_domain *parent, *subdomain; 2548 int ret; 2549 2550 mutex_lock(&gpd_list_lock); 2551 2552 parent = genpd_get_from_provider(parent_spec); 2553 if (IS_ERR(parent)) { 2554 ret = PTR_ERR(parent); 2555 goto out; 2556 } 2557 2558 subdomain = genpd_get_from_provider(subdomain_spec); 2559 if (IS_ERR(subdomain)) { 2560 ret = PTR_ERR(subdomain); 2561 goto out; 2562 } 2563 2564 ret = genpd_add_subdomain(parent, subdomain); 2565 2566 out: 2567 mutex_unlock(&gpd_list_lock); 2568 2569 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2570 } 2571 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2572 2573 /** 2574 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2575 * @parent_spec: OF phandle args to use for parent PM domain look-up 2576 * @subdomain_spec: OF phandle args to use for subdomain look-up 2577 * 2578 * Looks-up a parent PM domain and subdomain based upon phandle args 2579 * provided and removes the subdomain from the parent PM domain. Returns a 2580 * negative error code on failure. 2581 */ 2582 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 2583 struct of_phandle_args *subdomain_spec) 2584 { 2585 struct generic_pm_domain *parent, *subdomain; 2586 int ret; 2587 2588 mutex_lock(&gpd_list_lock); 2589 2590 parent = genpd_get_from_provider(parent_spec); 2591 if (IS_ERR(parent)) { 2592 ret = PTR_ERR(parent); 2593 goto out; 2594 } 2595 2596 subdomain = genpd_get_from_provider(subdomain_spec); 2597 if (IS_ERR(subdomain)) { 2598 ret = PTR_ERR(subdomain); 2599 goto out; 2600 } 2601 2602 ret = pm_genpd_remove_subdomain(parent, subdomain); 2603 2604 out: 2605 mutex_unlock(&gpd_list_lock); 2606 2607 return ret; 2608 } 2609 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2610 2611 /** 2612 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2613 * @np: Pointer to device node associated with provider 2614 * 2615 * Find the last PM domain that was added by a particular provider and 2616 * remove this PM domain from the list of PM domains. The provider is 2617 * identified by the 'provider' device structure that is passed. The PM 2618 * domain will only be removed, if the provider associated with domain 2619 * has been removed. 2620 * 2621 * Returns a valid pointer to struct generic_pm_domain on success or 2622 * ERR_PTR() on failure. 2623 */ 2624 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2625 { 2626 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2627 int ret; 2628 2629 if (IS_ERR_OR_NULL(np)) 2630 return ERR_PTR(-EINVAL); 2631 2632 mutex_lock(&gpd_list_lock); 2633 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2634 if (gpd->provider == &np->fwnode) { 2635 ret = genpd_remove(gpd); 2636 genpd = ret ? ERR_PTR(ret) : gpd; 2637 break; 2638 } 2639 } 2640 mutex_unlock(&gpd_list_lock); 2641 2642 return genpd; 2643 } 2644 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2645 2646 static void genpd_release_dev(struct device *dev) 2647 { 2648 of_node_put(dev->of_node); 2649 kfree(dev); 2650 } 2651 2652 static struct bus_type genpd_bus_type = { 2653 .name = "genpd", 2654 }; 2655 2656 /** 2657 * genpd_dev_pm_detach - Detach a device from its PM domain. 2658 * @dev: Device to detach. 2659 * @power_off: Currently not used 2660 * 2661 * Try to locate a corresponding generic PM domain, which the device was 2662 * attached to previously. If such is found, the device is detached from it. 2663 */ 2664 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2665 { 2666 struct generic_pm_domain *pd; 2667 unsigned int i; 2668 int ret = 0; 2669 2670 pd = dev_to_genpd(dev); 2671 if (IS_ERR(pd)) 2672 return; 2673 2674 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2675 2676 /* Drop the default performance state */ 2677 if (dev_gpd_data(dev)->default_pstate) { 2678 dev_pm_genpd_set_performance_state(dev, 0); 2679 dev_gpd_data(dev)->default_pstate = 0; 2680 } 2681 2682 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2683 ret = genpd_remove_device(pd, dev); 2684 if (ret != -EAGAIN) 2685 break; 2686 2687 mdelay(i); 2688 cond_resched(); 2689 } 2690 2691 if (ret < 0) { 2692 dev_err(dev, "failed to remove from PM domain %s: %d", 2693 pd->name, ret); 2694 return; 2695 } 2696 2697 /* Check if PM domain can be powered off after removing this device. */ 2698 genpd_queue_power_off_work(pd); 2699 2700 /* Unregister the device if it was created by genpd. */ 2701 if (dev->bus == &genpd_bus_type) 2702 device_unregister(dev); 2703 } 2704 2705 static void genpd_dev_pm_sync(struct device *dev) 2706 { 2707 struct generic_pm_domain *pd; 2708 2709 pd = dev_to_genpd(dev); 2710 if (IS_ERR(pd)) 2711 return; 2712 2713 genpd_queue_power_off_work(pd); 2714 } 2715 2716 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2717 unsigned int index, bool power_on) 2718 { 2719 struct of_phandle_args pd_args; 2720 struct generic_pm_domain *pd; 2721 int pstate; 2722 int ret; 2723 2724 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2725 "#power-domain-cells", index, &pd_args); 2726 if (ret < 0) 2727 return ret; 2728 2729 mutex_lock(&gpd_list_lock); 2730 pd = genpd_get_from_provider(&pd_args); 2731 of_node_put(pd_args.np); 2732 if (IS_ERR(pd)) { 2733 mutex_unlock(&gpd_list_lock); 2734 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2735 __func__, PTR_ERR(pd)); 2736 return -ENODEV; 2737 } 2738 2739 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2740 2741 ret = genpd_add_device(pd, dev, base_dev); 2742 mutex_unlock(&gpd_list_lock); 2743 2744 if (ret < 0) 2745 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 2746 2747 dev->pm_domain->detach = genpd_dev_pm_detach; 2748 dev->pm_domain->sync = genpd_dev_pm_sync; 2749 2750 if (power_on) { 2751 genpd_lock(pd); 2752 ret = genpd_power_on(pd, 0); 2753 genpd_unlock(pd); 2754 } 2755 2756 if (ret) { 2757 genpd_remove_device(pd, dev); 2758 return -EPROBE_DEFER; 2759 } 2760 2761 /* Set the default performance state */ 2762 pstate = of_get_required_opp_performance_state(dev->of_node, index); 2763 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 2764 ret = pstate; 2765 goto err; 2766 } else if (pstate > 0) { 2767 ret = dev_pm_genpd_set_performance_state(dev, pstate); 2768 if (ret) 2769 goto err; 2770 dev_gpd_data(dev)->default_pstate = pstate; 2771 } 2772 return 1; 2773 2774 err: 2775 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 2776 pd->name, ret); 2777 genpd_remove_device(pd, dev); 2778 return ret; 2779 } 2780 2781 /** 2782 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2783 * @dev: Device to attach. 2784 * 2785 * Parse device's OF node to find a PM domain specifier. If such is found, 2786 * attaches the device to retrieved pm_domain ops. 2787 * 2788 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2789 * PM domain or when multiple power-domains exists for it, else a negative error 2790 * code. Note that if a power-domain exists for the device, but it cannot be 2791 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2792 * not probed and to re-try again later. 2793 */ 2794 int genpd_dev_pm_attach(struct device *dev) 2795 { 2796 if (!dev->of_node) 2797 return 0; 2798 2799 /* 2800 * Devices with multiple PM domains must be attached separately, as we 2801 * can only attach one PM domain per device. 2802 */ 2803 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2804 "#power-domain-cells") != 1) 2805 return 0; 2806 2807 return __genpd_dev_pm_attach(dev, dev, 0, true); 2808 } 2809 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2810 2811 /** 2812 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2813 * @dev: The device used to lookup the PM domain. 2814 * @index: The index of the PM domain. 2815 * 2816 * Parse device's OF node to find a PM domain specifier at the provided @index. 2817 * If such is found, creates a virtual device and attaches it to the retrieved 2818 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2819 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2820 * 2821 * Returns the created virtual device if successfully attached PM domain, NULL 2822 * when the device don't need a PM domain, else an ERR_PTR() in case of 2823 * failures. If a power-domain exists for the device, but cannot be found or 2824 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2825 * is not probed and to re-try again later. 2826 */ 2827 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2828 unsigned int index) 2829 { 2830 struct device *virt_dev; 2831 int num_domains; 2832 int ret; 2833 2834 if (!dev->of_node) 2835 return NULL; 2836 2837 /* Verify that the index is within a valid range. */ 2838 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2839 "#power-domain-cells"); 2840 if (index >= num_domains) 2841 return NULL; 2842 2843 /* Allocate and register device on the genpd bus. */ 2844 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2845 if (!virt_dev) 2846 return ERR_PTR(-ENOMEM); 2847 2848 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2849 virt_dev->bus = &genpd_bus_type; 2850 virt_dev->release = genpd_release_dev; 2851 virt_dev->of_node = of_node_get(dev->of_node); 2852 2853 ret = device_register(virt_dev); 2854 if (ret) { 2855 put_device(virt_dev); 2856 return ERR_PTR(ret); 2857 } 2858 2859 /* Try to attach the device to the PM domain at the specified index. */ 2860 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2861 if (ret < 1) { 2862 device_unregister(virt_dev); 2863 return ret ? ERR_PTR(ret) : NULL; 2864 } 2865 2866 pm_runtime_enable(virt_dev); 2867 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 2868 2869 return virt_dev; 2870 } 2871 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 2872 2873 /** 2874 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 2875 * @dev: The device used to lookup the PM domain. 2876 * @name: The name of the PM domain. 2877 * 2878 * Parse device's OF node to find a PM domain specifier using the 2879 * power-domain-names DT property. For further description see 2880 * genpd_dev_pm_attach_by_id(). 2881 */ 2882 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 2883 { 2884 int index; 2885 2886 if (!dev->of_node) 2887 return NULL; 2888 2889 index = of_property_match_string(dev->of_node, "power-domain-names", 2890 name); 2891 if (index < 0) 2892 return NULL; 2893 2894 return genpd_dev_pm_attach_by_id(dev, index); 2895 } 2896 2897 static const struct of_device_id idle_state_match[] = { 2898 { .compatible = "domain-idle-state", }, 2899 { } 2900 }; 2901 2902 static int genpd_parse_state(struct genpd_power_state *genpd_state, 2903 struct device_node *state_node) 2904 { 2905 int err; 2906 u32 residency; 2907 u32 entry_latency, exit_latency; 2908 2909 err = of_property_read_u32(state_node, "entry-latency-us", 2910 &entry_latency); 2911 if (err) { 2912 pr_debug(" * %pOF missing entry-latency-us property\n", 2913 state_node); 2914 return -EINVAL; 2915 } 2916 2917 err = of_property_read_u32(state_node, "exit-latency-us", 2918 &exit_latency); 2919 if (err) { 2920 pr_debug(" * %pOF missing exit-latency-us property\n", 2921 state_node); 2922 return -EINVAL; 2923 } 2924 2925 err = of_property_read_u32(state_node, "min-residency-us", &residency); 2926 if (!err) 2927 genpd_state->residency_ns = 1000 * residency; 2928 2929 genpd_state->power_on_latency_ns = 1000 * exit_latency; 2930 genpd_state->power_off_latency_ns = 1000 * entry_latency; 2931 genpd_state->fwnode = &state_node->fwnode; 2932 2933 return 0; 2934 } 2935 2936 static int genpd_iterate_idle_states(struct device_node *dn, 2937 struct genpd_power_state *states) 2938 { 2939 int ret; 2940 struct of_phandle_iterator it; 2941 struct device_node *np; 2942 int i = 0; 2943 2944 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 2945 if (ret <= 0) 2946 return ret == -ENOENT ? 0 : ret; 2947 2948 /* Loop over the phandles until all the requested entry is found */ 2949 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 2950 np = it.node; 2951 if (!of_match_node(idle_state_match, np)) 2952 continue; 2953 if (states) { 2954 ret = genpd_parse_state(&states[i], np); 2955 if (ret) { 2956 pr_err("Parsing idle state node %pOF failed with err %d\n", 2957 np, ret); 2958 of_node_put(np); 2959 return ret; 2960 } 2961 } 2962 i++; 2963 } 2964 2965 return i; 2966 } 2967 2968 /** 2969 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 2970 * 2971 * @dn: The genpd device node 2972 * @states: The pointer to which the state array will be saved. 2973 * @n: The count of elements in the array returned from this function. 2974 * 2975 * Returns the device states parsed from the OF node. The memory for the states 2976 * is allocated by this function and is the responsibility of the caller to 2977 * free the memory after use. If any or zero compatible domain idle states is 2978 * found it returns 0 and in case of errors, a negative error code is returned. 2979 */ 2980 int of_genpd_parse_idle_states(struct device_node *dn, 2981 struct genpd_power_state **states, int *n) 2982 { 2983 struct genpd_power_state *st; 2984 int ret; 2985 2986 ret = genpd_iterate_idle_states(dn, NULL); 2987 if (ret < 0) 2988 return ret; 2989 2990 if (!ret) { 2991 *states = NULL; 2992 *n = 0; 2993 return 0; 2994 } 2995 2996 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 2997 if (!st) 2998 return -ENOMEM; 2999 3000 ret = genpd_iterate_idle_states(dn, st); 3001 if (ret <= 0) { 3002 kfree(st); 3003 return ret < 0 ? ret : -EINVAL; 3004 } 3005 3006 *states = st; 3007 *n = ret; 3008 3009 return 0; 3010 } 3011 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3012 3013 /** 3014 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. 3015 * 3016 * @genpd_dev: Genpd's device for which the performance-state needs to be found. 3017 * @opp: struct dev_pm_opp of the OPP for which we need to find performance 3018 * state. 3019 * 3020 * Returns performance state encoded in the OPP of the genpd. This calls 3021 * platform specific genpd->opp_to_performance_state() callback to translate 3022 * power domain OPP to performance state. 3023 * 3024 * Returns performance state on success and 0 on failure. 3025 */ 3026 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, 3027 struct dev_pm_opp *opp) 3028 { 3029 struct generic_pm_domain *genpd = NULL; 3030 int state; 3031 3032 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); 3033 3034 if (unlikely(!genpd->opp_to_performance_state)) 3035 return 0; 3036 3037 genpd_lock(genpd); 3038 state = genpd->opp_to_performance_state(genpd, opp); 3039 genpd_unlock(genpd); 3040 3041 return state; 3042 } 3043 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); 3044 3045 static int __init genpd_bus_init(void) 3046 { 3047 return bus_register(&genpd_bus_type); 3048 } 3049 core_initcall(genpd_bus_init); 3050 3051 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3052 3053 3054 /*** debugfs support ***/ 3055 3056 #ifdef CONFIG_DEBUG_FS 3057 /* 3058 * TODO: This function is a slightly modified version of rtpm_status_show 3059 * from sysfs.c, so generalize it. 3060 */ 3061 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3062 { 3063 static const char * const status_lookup[] = { 3064 [RPM_ACTIVE] = "active", 3065 [RPM_RESUMING] = "resuming", 3066 [RPM_SUSPENDED] = "suspended", 3067 [RPM_SUSPENDING] = "suspending" 3068 }; 3069 const char *p = ""; 3070 3071 if (dev->power.runtime_error) 3072 p = "error"; 3073 else if (dev->power.disable_depth) 3074 p = "unsupported"; 3075 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3076 p = status_lookup[dev->power.runtime_status]; 3077 else 3078 WARN_ON(1); 3079 3080 seq_printf(s, "%-25s ", p); 3081 } 3082 3083 static void perf_status_str(struct seq_file *s, struct device *dev) 3084 { 3085 struct generic_pm_domain_data *gpd_data; 3086 3087 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3088 seq_put_decimal_ull(s, "", gpd_data->performance_state); 3089 } 3090 3091 static int genpd_summary_one(struct seq_file *s, 3092 struct generic_pm_domain *genpd) 3093 { 3094 static const char * const status_lookup[] = { 3095 [GENPD_STATE_ON] = "on", 3096 [GENPD_STATE_OFF] = "off" 3097 }; 3098 struct pm_domain_data *pm_data; 3099 const char *kobj_path; 3100 struct gpd_link *link; 3101 char state[16]; 3102 int ret; 3103 3104 ret = genpd_lock_interruptible(genpd); 3105 if (ret) 3106 return -ERESTARTSYS; 3107 3108 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3109 goto exit; 3110 if (!genpd_status_on(genpd)) 3111 snprintf(state, sizeof(state), "%s-%u", 3112 status_lookup[genpd->status], genpd->state_idx); 3113 else 3114 snprintf(state, sizeof(state), "%s", 3115 status_lookup[genpd->status]); 3116 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3117 3118 /* 3119 * Modifications on the list require holding locks on both 3120 * parent and child, so we are safe. 3121 * Also genpd->name is immutable. 3122 */ 3123 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3124 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3125 seq_printf(s, "\n%48s", " "); 3126 seq_printf(s, "%s", link->child->name); 3127 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3128 seq_puts(s, ", "); 3129 } 3130 3131 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3132 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3133 genpd_is_irq_safe(genpd) ? 3134 GFP_ATOMIC : GFP_KERNEL); 3135 if (kobj_path == NULL) 3136 continue; 3137 3138 seq_printf(s, "\n %-50s ", kobj_path); 3139 rtpm_status_str(s, pm_data->dev); 3140 perf_status_str(s, pm_data->dev); 3141 kfree(kobj_path); 3142 } 3143 3144 seq_puts(s, "\n"); 3145 exit: 3146 genpd_unlock(genpd); 3147 3148 return 0; 3149 } 3150 3151 static int summary_show(struct seq_file *s, void *data) 3152 { 3153 struct generic_pm_domain *genpd; 3154 int ret = 0; 3155 3156 seq_puts(s, "domain status children performance\n"); 3157 seq_puts(s, " /device runtime status\n"); 3158 seq_puts(s, "----------------------------------------------------------------------------------------------\n"); 3159 3160 ret = mutex_lock_interruptible(&gpd_list_lock); 3161 if (ret) 3162 return -ERESTARTSYS; 3163 3164 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3165 ret = genpd_summary_one(s, genpd); 3166 if (ret) 3167 break; 3168 } 3169 mutex_unlock(&gpd_list_lock); 3170 3171 return ret; 3172 } 3173 3174 static int status_show(struct seq_file *s, void *data) 3175 { 3176 static const char * const status_lookup[] = { 3177 [GENPD_STATE_ON] = "on", 3178 [GENPD_STATE_OFF] = "off" 3179 }; 3180 3181 struct generic_pm_domain *genpd = s->private; 3182 int ret = 0; 3183 3184 ret = genpd_lock_interruptible(genpd); 3185 if (ret) 3186 return -ERESTARTSYS; 3187 3188 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3189 goto exit; 3190 3191 if (genpd->status == GENPD_STATE_OFF) 3192 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3193 genpd->state_idx); 3194 else 3195 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3196 exit: 3197 genpd_unlock(genpd); 3198 return ret; 3199 } 3200 3201 static int sub_domains_show(struct seq_file *s, void *data) 3202 { 3203 struct generic_pm_domain *genpd = s->private; 3204 struct gpd_link *link; 3205 int ret = 0; 3206 3207 ret = genpd_lock_interruptible(genpd); 3208 if (ret) 3209 return -ERESTARTSYS; 3210 3211 list_for_each_entry(link, &genpd->parent_links, parent_node) 3212 seq_printf(s, "%s\n", link->child->name); 3213 3214 genpd_unlock(genpd); 3215 return ret; 3216 } 3217 3218 static int idle_states_show(struct seq_file *s, void *data) 3219 { 3220 struct generic_pm_domain *genpd = s->private; 3221 u64 now, delta, idle_time = 0; 3222 unsigned int i; 3223 int ret = 0; 3224 3225 ret = genpd_lock_interruptible(genpd); 3226 if (ret) 3227 return -ERESTARTSYS; 3228 3229 seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3230 3231 for (i = 0; i < genpd->state_count; i++) { 3232 idle_time += genpd->states[i].idle_time; 3233 3234 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3235 now = ktime_get_mono_fast_ns(); 3236 if (now > genpd->accounting_time) { 3237 delta = now - genpd->accounting_time; 3238 idle_time += delta; 3239 } 3240 } 3241 3242 do_div(idle_time, NSEC_PER_MSEC); 3243 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, 3244 genpd->states[i].usage, genpd->states[i].rejected); 3245 } 3246 3247 genpd_unlock(genpd); 3248 return ret; 3249 } 3250 3251 static int active_time_show(struct seq_file *s, void *data) 3252 { 3253 struct generic_pm_domain *genpd = s->private; 3254 u64 now, on_time, delta = 0; 3255 int ret = 0; 3256 3257 ret = genpd_lock_interruptible(genpd); 3258 if (ret) 3259 return -ERESTARTSYS; 3260 3261 if (genpd->status == GENPD_STATE_ON) { 3262 now = ktime_get_mono_fast_ns(); 3263 if (now > genpd->accounting_time) 3264 delta = now - genpd->accounting_time; 3265 } 3266 3267 on_time = genpd->on_time + delta; 3268 do_div(on_time, NSEC_PER_MSEC); 3269 seq_printf(s, "%llu ms\n", on_time); 3270 3271 genpd_unlock(genpd); 3272 return ret; 3273 } 3274 3275 static int total_idle_time_show(struct seq_file *s, void *data) 3276 { 3277 struct generic_pm_domain *genpd = s->private; 3278 u64 now, delta, total = 0; 3279 unsigned int i; 3280 int ret = 0; 3281 3282 ret = genpd_lock_interruptible(genpd); 3283 if (ret) 3284 return -ERESTARTSYS; 3285 3286 for (i = 0; i < genpd->state_count; i++) { 3287 total += genpd->states[i].idle_time; 3288 3289 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3290 now = ktime_get_mono_fast_ns(); 3291 if (now > genpd->accounting_time) { 3292 delta = now - genpd->accounting_time; 3293 total += delta; 3294 } 3295 } 3296 } 3297 3298 do_div(total, NSEC_PER_MSEC); 3299 seq_printf(s, "%llu ms\n", total); 3300 3301 genpd_unlock(genpd); 3302 return ret; 3303 } 3304 3305 3306 static int devices_show(struct seq_file *s, void *data) 3307 { 3308 struct generic_pm_domain *genpd = s->private; 3309 struct pm_domain_data *pm_data; 3310 const char *kobj_path; 3311 int ret = 0; 3312 3313 ret = genpd_lock_interruptible(genpd); 3314 if (ret) 3315 return -ERESTARTSYS; 3316 3317 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3318 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3319 genpd_is_irq_safe(genpd) ? 3320 GFP_ATOMIC : GFP_KERNEL); 3321 if (kobj_path == NULL) 3322 continue; 3323 3324 seq_printf(s, "%s\n", kobj_path); 3325 kfree(kobj_path); 3326 } 3327 3328 genpd_unlock(genpd); 3329 return ret; 3330 } 3331 3332 static int perf_state_show(struct seq_file *s, void *data) 3333 { 3334 struct generic_pm_domain *genpd = s->private; 3335 3336 if (genpd_lock_interruptible(genpd)) 3337 return -ERESTARTSYS; 3338 3339 seq_printf(s, "%u\n", genpd->performance_state); 3340 3341 genpd_unlock(genpd); 3342 return 0; 3343 } 3344 3345 DEFINE_SHOW_ATTRIBUTE(summary); 3346 DEFINE_SHOW_ATTRIBUTE(status); 3347 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3348 DEFINE_SHOW_ATTRIBUTE(idle_states); 3349 DEFINE_SHOW_ATTRIBUTE(active_time); 3350 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3351 DEFINE_SHOW_ATTRIBUTE(devices); 3352 DEFINE_SHOW_ATTRIBUTE(perf_state); 3353 3354 static void genpd_debug_add(struct generic_pm_domain *genpd) 3355 { 3356 struct dentry *d; 3357 3358 if (!genpd_debugfs_dir) 3359 return; 3360 3361 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3362 3363 debugfs_create_file("current_state", 0444, 3364 d, genpd, &status_fops); 3365 debugfs_create_file("sub_domains", 0444, 3366 d, genpd, &sub_domains_fops); 3367 debugfs_create_file("idle_states", 0444, 3368 d, genpd, &idle_states_fops); 3369 debugfs_create_file("active_time", 0444, 3370 d, genpd, &active_time_fops); 3371 debugfs_create_file("total_idle_time", 0444, 3372 d, genpd, &total_idle_time_fops); 3373 debugfs_create_file("devices", 0444, 3374 d, genpd, &devices_fops); 3375 if (genpd->set_performance_state) 3376 debugfs_create_file("perf_state", 0444, 3377 d, genpd, &perf_state_fops); 3378 } 3379 3380 static int __init genpd_debug_init(void) 3381 { 3382 struct generic_pm_domain *genpd; 3383 3384 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3385 3386 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3387 NULL, &summary_fops); 3388 3389 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3390 genpd_debug_add(genpd); 3391 3392 return 0; 3393 } 3394 late_initcall(genpd_debug_init); 3395 3396 static void __exit genpd_debug_exit(void) 3397 { 3398 debugfs_remove_recursive(genpd_debugfs_dir); 3399 } 3400 __exitcall(genpd_debug_exit); 3401 #endif /* CONFIG_DEBUG_FS */ 3402