1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 #include <linux/cpu.h> 24 #include <linux/debugfs.h> 25 26 #include "power.h" 27 28 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 29 30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 31 ({ \ 32 type (*__routine)(struct device *__d); \ 33 type __ret = (type)0; \ 34 \ 35 __routine = genpd->dev_ops.callback; \ 36 if (__routine) { \ 37 __ret = __routine(dev); \ 38 } \ 39 __ret; \ 40 }) 41 42 static LIST_HEAD(gpd_list); 43 static DEFINE_MUTEX(gpd_list_lock); 44 45 struct genpd_lock_ops { 46 void (*lock)(struct generic_pm_domain *genpd); 47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 48 int (*lock_interruptible)(struct generic_pm_domain *genpd); 49 void (*unlock)(struct generic_pm_domain *genpd); 50 }; 51 52 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 53 { 54 mutex_lock(&genpd->mlock); 55 } 56 57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 58 int depth) 59 { 60 mutex_lock_nested(&genpd->mlock, depth); 61 } 62 63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 64 { 65 return mutex_lock_interruptible(&genpd->mlock); 66 } 67 68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 69 { 70 return mutex_unlock(&genpd->mlock); 71 } 72 73 static const struct genpd_lock_ops genpd_mtx_ops = { 74 .lock = genpd_lock_mtx, 75 .lock_nested = genpd_lock_nested_mtx, 76 .lock_interruptible = genpd_lock_interruptible_mtx, 77 .unlock = genpd_unlock_mtx, 78 }; 79 80 static void genpd_lock_spin(struct generic_pm_domain *genpd) 81 __acquires(&genpd->slock) 82 { 83 unsigned long flags; 84 85 spin_lock_irqsave(&genpd->slock, flags); 86 genpd->lock_flags = flags; 87 } 88 89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 90 int depth) 91 __acquires(&genpd->slock) 92 { 93 unsigned long flags; 94 95 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 96 genpd->lock_flags = flags; 97 } 98 99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 100 __acquires(&genpd->slock) 101 { 102 unsigned long flags; 103 104 spin_lock_irqsave(&genpd->slock, flags); 105 genpd->lock_flags = flags; 106 return 0; 107 } 108 109 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 110 __releases(&genpd->slock) 111 { 112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 113 } 114 115 static const struct genpd_lock_ops genpd_spin_ops = { 116 .lock = genpd_lock_spin, 117 .lock_nested = genpd_lock_nested_spin, 118 .lock_interruptible = genpd_lock_interruptible_spin, 119 .unlock = genpd_unlock_spin, 120 }; 121 122 #define genpd_lock(p) p->lock_ops->lock(p) 123 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 125 #define genpd_unlock(p) p->lock_ops->unlock(p) 126 127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 133 134 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 135 const struct generic_pm_domain *genpd) 136 { 137 bool ret; 138 139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 140 141 /* 142 * Warn once if an IRQ safe device is attached to a domain, which 143 * callbacks are allowed to sleep. This indicates a suboptimal 144 * configuration for PM, but it doesn't matter for an always on domain. 145 */ 146 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 147 return ret; 148 149 if (ret) 150 dev_warn_once(dev, "PM domain %s will not be powered off\n", 151 genpd->name); 152 153 return ret; 154 } 155 156 static int genpd_runtime_suspend(struct device *dev); 157 158 /* 159 * Get the generic PM domain for a particular struct device. 160 * This validates the struct device pointer, the PM domain pointer, 161 * and checks that the PM domain pointer is a real generic PM domain. 162 * Any failure results in NULL being returned. 163 */ 164 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 165 { 166 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 167 return NULL; 168 169 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 170 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 171 return pd_to_genpd(dev->pm_domain); 172 173 return NULL; 174 } 175 176 /* 177 * This should only be used where we are certain that the pm_domain 178 * attached to the device is a genpd domain. 179 */ 180 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 181 { 182 if (IS_ERR_OR_NULL(dev->pm_domain)) 183 return ERR_PTR(-EINVAL); 184 185 return pd_to_genpd(dev->pm_domain); 186 } 187 188 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 189 struct device *dev) 190 { 191 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 192 } 193 194 static int genpd_start_dev(const struct generic_pm_domain *genpd, 195 struct device *dev) 196 { 197 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 198 } 199 200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 201 { 202 bool ret = false; 203 204 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 205 ret = !!atomic_dec_and_test(&genpd->sd_count); 206 207 return ret; 208 } 209 210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 211 { 212 atomic_inc(&genpd->sd_count); 213 smp_mb__after_atomic(); 214 } 215 216 #ifdef CONFIG_DEBUG_FS 217 static struct dentry *genpd_debugfs_dir; 218 219 static void genpd_debug_add(struct generic_pm_domain *genpd); 220 221 static void genpd_debug_remove(struct generic_pm_domain *genpd) 222 { 223 struct dentry *d; 224 225 if (!genpd_debugfs_dir) 226 return; 227 228 d = debugfs_lookup(genpd->name, genpd_debugfs_dir); 229 debugfs_remove(d); 230 } 231 232 static void genpd_update_accounting(struct generic_pm_domain *genpd) 233 { 234 u64 delta, now; 235 236 now = ktime_get_mono_fast_ns(); 237 if (now <= genpd->accounting_time) 238 return; 239 240 delta = now - genpd->accounting_time; 241 242 /* 243 * If genpd->status is active, it means we are just 244 * out of off and so update the idle time and vice 245 * versa. 246 */ 247 if (genpd->status == GENPD_STATE_ON) 248 genpd->states[genpd->state_idx].idle_time += delta; 249 else 250 genpd->on_time += delta; 251 252 genpd->accounting_time = now; 253 } 254 #else 255 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 256 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 257 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 258 #endif 259 260 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 261 unsigned int state) 262 { 263 struct generic_pm_domain_data *pd_data; 264 struct pm_domain_data *pdd; 265 struct gpd_link *link; 266 267 /* New requested state is same as Max requested state */ 268 if (state == genpd->performance_state) 269 return state; 270 271 /* New requested state is higher than Max requested state */ 272 if (state > genpd->performance_state) 273 return state; 274 275 /* Traverse all devices within the domain */ 276 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 277 pd_data = to_gpd_data(pdd); 278 279 if (pd_data->performance_state > state) 280 state = pd_data->performance_state; 281 } 282 283 /* 284 * Traverse all sub-domains within the domain. This can be 285 * done without any additional locking as the link->performance_state 286 * field is protected by the parent genpd->lock, which is already taken. 287 * 288 * Also note that link->performance_state (subdomain's performance state 289 * requirement to parent domain) is different from 290 * link->child->performance_state (current performance state requirement 291 * of the devices/sub-domains of the subdomain) and so can have a 292 * different value. 293 * 294 * Note that we also take vote from powered-off sub-domains into account 295 * as the same is done for devices right now. 296 */ 297 list_for_each_entry(link, &genpd->parent_links, parent_node) { 298 if (link->performance_state > state) 299 state = link->performance_state; 300 } 301 302 return state; 303 } 304 305 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 306 struct generic_pm_domain *parent, 307 unsigned int pstate) 308 { 309 if (!parent->set_performance_state) 310 return pstate; 311 312 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 313 parent->opp_table, 314 pstate); 315 } 316 317 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 318 unsigned int state, int depth) 319 { 320 struct generic_pm_domain *parent; 321 struct gpd_link *link; 322 int parent_state, ret; 323 324 if (state == genpd->performance_state) 325 return 0; 326 327 /* Propagate to parents of genpd */ 328 list_for_each_entry(link, &genpd->child_links, child_node) { 329 parent = link->parent; 330 331 /* Find parent's performance state */ 332 ret = genpd_xlate_performance_state(genpd, parent, state); 333 if (unlikely(ret < 0)) 334 goto err; 335 336 parent_state = ret; 337 338 genpd_lock_nested(parent, depth + 1); 339 340 link->prev_performance_state = link->performance_state; 341 link->performance_state = parent_state; 342 parent_state = _genpd_reeval_performance_state(parent, 343 parent_state); 344 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 345 if (ret) 346 link->performance_state = link->prev_performance_state; 347 348 genpd_unlock(parent); 349 350 if (ret) 351 goto err; 352 } 353 354 if (genpd->set_performance_state) { 355 ret = genpd->set_performance_state(genpd, state); 356 if (ret) 357 goto err; 358 } 359 360 genpd->performance_state = state; 361 return 0; 362 363 err: 364 /* Encountered an error, lets rollback */ 365 list_for_each_entry_continue_reverse(link, &genpd->child_links, 366 child_node) { 367 parent = link->parent; 368 369 genpd_lock_nested(parent, depth + 1); 370 371 parent_state = link->prev_performance_state; 372 link->performance_state = parent_state; 373 374 parent_state = _genpd_reeval_performance_state(parent, 375 parent_state); 376 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 377 pr_err("%s: Failed to roll back to %d performance state\n", 378 parent->name, parent_state); 379 } 380 381 genpd_unlock(parent); 382 } 383 384 return ret; 385 } 386 387 static int genpd_set_performance_state(struct device *dev, unsigned int state) 388 { 389 struct generic_pm_domain *genpd = dev_to_genpd(dev); 390 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 391 unsigned int prev_state; 392 int ret; 393 394 prev_state = gpd_data->performance_state; 395 if (prev_state == state) 396 return 0; 397 398 gpd_data->performance_state = state; 399 state = _genpd_reeval_performance_state(genpd, state); 400 401 ret = _genpd_set_performance_state(genpd, state, 0); 402 if (ret) 403 gpd_data->performance_state = prev_state; 404 405 return ret; 406 } 407 408 static int genpd_drop_performance_state(struct device *dev) 409 { 410 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 411 412 if (!genpd_set_performance_state(dev, 0)) 413 return prev_state; 414 415 return 0; 416 } 417 418 static void genpd_restore_performance_state(struct device *dev, 419 unsigned int state) 420 { 421 if (state) 422 genpd_set_performance_state(dev, state); 423 } 424 425 /** 426 * dev_pm_genpd_set_performance_state- Set performance state of device's power 427 * domain. 428 * 429 * @dev: Device for which the performance-state needs to be set. 430 * @state: Target performance state of the device. This can be set as 0 when the 431 * device doesn't have any performance state constraints left (And so 432 * the device wouldn't participate anymore to find the target 433 * performance state of the genpd). 434 * 435 * It is assumed that the users guarantee that the genpd wouldn't be detached 436 * while this routine is getting called. 437 * 438 * Returns 0 on success and negative error values on failures. 439 */ 440 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 441 { 442 struct generic_pm_domain *genpd; 443 int ret = 0; 444 445 genpd = dev_to_genpd_safe(dev); 446 if (!genpd) 447 return -ENODEV; 448 449 if (WARN_ON(!dev->power.subsys_data || 450 !dev->power.subsys_data->domain_data)) 451 return -EINVAL; 452 453 genpd_lock(genpd); 454 if (pm_runtime_suspended(dev)) { 455 dev_gpd_data(dev)->rpm_pstate = state; 456 } else { 457 ret = genpd_set_performance_state(dev, state); 458 if (!ret) 459 dev_gpd_data(dev)->rpm_pstate = 0; 460 } 461 genpd_unlock(genpd); 462 463 return ret; 464 } 465 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 466 467 /** 468 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 469 * 470 * @dev: Device to handle 471 * @next: impending interrupt/wakeup for the device 472 * 473 * 474 * Allow devices to inform of the next wakeup. It's assumed that the users 475 * guarantee that the genpd wouldn't be detached while this routine is getting 476 * called. Additionally, it's also assumed that @dev isn't runtime suspended 477 * (RPM_SUSPENDED)." 478 * Although devices are expected to update the next_wakeup after the end of 479 * their usecase as well, it is possible the devices themselves may not know 480 * about that, so stale @next will be ignored when powering off the domain. 481 */ 482 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 483 { 484 struct generic_pm_domain *genpd; 485 struct gpd_timing_data *td; 486 487 genpd = dev_to_genpd_safe(dev); 488 if (!genpd) 489 return; 490 491 td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 492 if (td) 493 td->next_wakeup = next; 494 } 495 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 496 497 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 498 { 499 unsigned int state_idx = genpd->state_idx; 500 ktime_t time_start; 501 s64 elapsed_ns; 502 int ret; 503 504 /* Notify consumers that we are about to power on. */ 505 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 506 GENPD_NOTIFY_PRE_ON, 507 GENPD_NOTIFY_OFF, NULL); 508 ret = notifier_to_errno(ret); 509 if (ret) 510 return ret; 511 512 if (!genpd->power_on) 513 goto out; 514 515 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 516 if (!timed) { 517 ret = genpd->power_on(genpd); 518 if (ret) 519 goto err; 520 521 goto out; 522 } 523 524 time_start = ktime_get(); 525 ret = genpd->power_on(genpd); 526 if (ret) 527 goto err; 528 529 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 530 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 531 goto out; 532 533 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 534 genpd->gd->max_off_time_changed = true; 535 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 536 genpd->name, "on", elapsed_ns); 537 538 out: 539 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 540 return 0; 541 err: 542 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 543 NULL); 544 return ret; 545 } 546 547 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 548 { 549 unsigned int state_idx = genpd->state_idx; 550 ktime_t time_start; 551 s64 elapsed_ns; 552 int ret; 553 554 /* Notify consumers that we are about to power off. */ 555 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 556 GENPD_NOTIFY_PRE_OFF, 557 GENPD_NOTIFY_ON, NULL); 558 ret = notifier_to_errno(ret); 559 if (ret) 560 return ret; 561 562 if (!genpd->power_off) 563 goto out; 564 565 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 566 if (!timed) { 567 ret = genpd->power_off(genpd); 568 if (ret) 569 goto busy; 570 571 goto out; 572 } 573 574 time_start = ktime_get(); 575 ret = genpd->power_off(genpd); 576 if (ret) 577 goto busy; 578 579 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 580 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 581 goto out; 582 583 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 584 genpd->gd->max_off_time_changed = true; 585 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 586 genpd->name, "off", elapsed_ns); 587 588 out: 589 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 590 NULL); 591 return 0; 592 busy: 593 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 594 return ret; 595 } 596 597 /** 598 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 599 * @genpd: PM domain to power off. 600 * 601 * Queue up the execution of genpd_power_off() unless it's already been done 602 * before. 603 */ 604 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 605 { 606 queue_work(pm_wq, &genpd->power_off_work); 607 } 608 609 /** 610 * genpd_power_off - Remove power from a given PM domain. 611 * @genpd: PM domain to power down. 612 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 613 * RPM status of the releated device is in an intermediate state, not yet turned 614 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 615 * be RPM_SUSPENDED, while it tries to power off the PM domain. 616 * @depth: nesting count for lockdep. 617 * 618 * If all of the @genpd's devices have been suspended and all of its subdomains 619 * have been powered down, remove power from @genpd. 620 */ 621 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 622 unsigned int depth) 623 { 624 struct pm_domain_data *pdd; 625 struct gpd_link *link; 626 unsigned int not_suspended = 0; 627 int ret; 628 629 /* 630 * Do not try to power off the domain in the following situations: 631 * (1) The domain is already in the "power off" state. 632 * (2) System suspend is in progress. 633 */ 634 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 635 return 0; 636 637 /* 638 * Abort power off for the PM domain in the following situations: 639 * (1) The domain is configured as always on. 640 * (2) When the domain has a subdomain being powered on. 641 */ 642 if (genpd_is_always_on(genpd) || 643 genpd_is_rpm_always_on(genpd) || 644 atomic_read(&genpd->sd_count) > 0) 645 return -EBUSY; 646 647 /* 648 * The children must be in their deepest (powered-off) states to allow 649 * the parent to be powered off. Note that, there's no need for 650 * additional locking, as powering on a child, requires the parent's 651 * lock to be acquired first. 652 */ 653 list_for_each_entry(link, &genpd->parent_links, parent_node) { 654 struct generic_pm_domain *child = link->child; 655 if (child->state_idx < child->state_count - 1) 656 return -EBUSY; 657 } 658 659 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 660 /* 661 * Do not allow PM domain to be powered off, when an IRQ safe 662 * device is part of a non-IRQ safe domain. 663 */ 664 if (!pm_runtime_suspended(pdd->dev) || 665 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 666 not_suspended++; 667 } 668 669 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 670 return -EBUSY; 671 672 if (genpd->gov && genpd->gov->power_down_ok) { 673 if (!genpd->gov->power_down_ok(&genpd->domain)) 674 return -EAGAIN; 675 } 676 677 /* Default to shallowest state. */ 678 if (!genpd->gov) 679 genpd->state_idx = 0; 680 681 /* Don't power off, if a child domain is waiting to power on. */ 682 if (atomic_read(&genpd->sd_count) > 0) 683 return -EBUSY; 684 685 ret = _genpd_power_off(genpd, true); 686 if (ret) { 687 genpd->states[genpd->state_idx].rejected++; 688 return ret; 689 } 690 691 genpd->status = GENPD_STATE_OFF; 692 genpd_update_accounting(genpd); 693 genpd->states[genpd->state_idx].usage++; 694 695 list_for_each_entry(link, &genpd->child_links, child_node) { 696 genpd_sd_counter_dec(link->parent); 697 genpd_lock_nested(link->parent, depth + 1); 698 genpd_power_off(link->parent, false, depth + 1); 699 genpd_unlock(link->parent); 700 } 701 702 return 0; 703 } 704 705 /** 706 * genpd_power_on - Restore power to a given PM domain and its parents. 707 * @genpd: PM domain to power up. 708 * @depth: nesting count for lockdep. 709 * 710 * Restore power to @genpd and all of its parents so that it is possible to 711 * resume a device belonging to it. 712 */ 713 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 714 { 715 struct gpd_link *link; 716 int ret = 0; 717 718 if (genpd_status_on(genpd)) 719 return 0; 720 721 /* 722 * The list is guaranteed not to change while the loop below is being 723 * executed, unless one of the parents' .power_on() callbacks fiddles 724 * with it. 725 */ 726 list_for_each_entry(link, &genpd->child_links, child_node) { 727 struct generic_pm_domain *parent = link->parent; 728 729 genpd_sd_counter_inc(parent); 730 731 genpd_lock_nested(parent, depth + 1); 732 ret = genpd_power_on(parent, depth + 1); 733 genpd_unlock(parent); 734 735 if (ret) { 736 genpd_sd_counter_dec(parent); 737 goto err; 738 } 739 } 740 741 ret = _genpd_power_on(genpd, true); 742 if (ret) 743 goto err; 744 745 genpd->status = GENPD_STATE_ON; 746 genpd_update_accounting(genpd); 747 748 return 0; 749 750 err: 751 list_for_each_entry_continue_reverse(link, 752 &genpd->child_links, 753 child_node) { 754 genpd_sd_counter_dec(link->parent); 755 genpd_lock_nested(link->parent, depth + 1); 756 genpd_power_off(link->parent, false, depth + 1); 757 genpd_unlock(link->parent); 758 } 759 760 return ret; 761 } 762 763 static int genpd_dev_pm_start(struct device *dev) 764 { 765 struct generic_pm_domain *genpd = dev_to_genpd(dev); 766 767 return genpd_start_dev(genpd, dev); 768 } 769 770 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 771 unsigned long val, void *ptr) 772 { 773 struct generic_pm_domain_data *gpd_data; 774 struct device *dev; 775 776 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 777 dev = gpd_data->base.dev; 778 779 for (;;) { 780 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 781 struct pm_domain_data *pdd; 782 struct gpd_timing_data *td; 783 784 spin_lock_irq(&dev->power.lock); 785 786 pdd = dev->power.subsys_data ? 787 dev->power.subsys_data->domain_data : NULL; 788 if (pdd) { 789 td = to_gpd_data(pdd)->td; 790 if (td) { 791 td->constraint_changed = true; 792 genpd = dev_to_genpd(dev); 793 } 794 } 795 796 spin_unlock_irq(&dev->power.lock); 797 798 if (!IS_ERR(genpd)) { 799 genpd_lock(genpd); 800 genpd->gd->max_off_time_changed = true; 801 genpd_unlock(genpd); 802 } 803 804 dev = dev->parent; 805 if (!dev || dev->power.ignore_children) 806 break; 807 } 808 809 return NOTIFY_DONE; 810 } 811 812 /** 813 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 814 * @work: Work structure used for scheduling the execution of this function. 815 */ 816 static void genpd_power_off_work_fn(struct work_struct *work) 817 { 818 struct generic_pm_domain *genpd; 819 820 genpd = container_of(work, struct generic_pm_domain, power_off_work); 821 822 genpd_lock(genpd); 823 genpd_power_off(genpd, false, 0); 824 genpd_unlock(genpd); 825 } 826 827 /** 828 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 829 * @dev: Device to handle. 830 */ 831 static int __genpd_runtime_suspend(struct device *dev) 832 { 833 int (*cb)(struct device *__dev); 834 835 if (dev->type && dev->type->pm) 836 cb = dev->type->pm->runtime_suspend; 837 else if (dev->class && dev->class->pm) 838 cb = dev->class->pm->runtime_suspend; 839 else if (dev->bus && dev->bus->pm) 840 cb = dev->bus->pm->runtime_suspend; 841 else 842 cb = NULL; 843 844 if (!cb && dev->driver && dev->driver->pm) 845 cb = dev->driver->pm->runtime_suspend; 846 847 return cb ? cb(dev) : 0; 848 } 849 850 /** 851 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 852 * @dev: Device to handle. 853 */ 854 static int __genpd_runtime_resume(struct device *dev) 855 { 856 int (*cb)(struct device *__dev); 857 858 if (dev->type && dev->type->pm) 859 cb = dev->type->pm->runtime_resume; 860 else if (dev->class && dev->class->pm) 861 cb = dev->class->pm->runtime_resume; 862 else if (dev->bus && dev->bus->pm) 863 cb = dev->bus->pm->runtime_resume; 864 else 865 cb = NULL; 866 867 if (!cb && dev->driver && dev->driver->pm) 868 cb = dev->driver->pm->runtime_resume; 869 870 return cb ? cb(dev) : 0; 871 } 872 873 /** 874 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 875 * @dev: Device to suspend. 876 * 877 * Carry out a runtime suspend of a device under the assumption that its 878 * pm_domain field points to the domain member of an object of type 879 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 880 */ 881 static int genpd_runtime_suspend(struct device *dev) 882 { 883 struct generic_pm_domain *genpd; 884 bool (*suspend_ok)(struct device *__dev); 885 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 886 struct gpd_timing_data *td = gpd_data->td; 887 bool runtime_pm = pm_runtime_enabled(dev); 888 ktime_t time_start = 0; 889 s64 elapsed_ns; 890 int ret; 891 892 dev_dbg(dev, "%s()\n", __func__); 893 894 genpd = dev_to_genpd(dev); 895 if (IS_ERR(genpd)) 896 return -EINVAL; 897 898 /* 899 * A runtime PM centric subsystem/driver may re-use the runtime PM 900 * callbacks for other purposes than runtime PM. In those scenarios 901 * runtime PM is disabled. Under these circumstances, we shall skip 902 * validating/measuring the PM QoS latency. 903 */ 904 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 905 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 906 return -EBUSY; 907 908 /* Measure suspend latency. */ 909 if (td && runtime_pm) 910 time_start = ktime_get(); 911 912 ret = __genpd_runtime_suspend(dev); 913 if (ret) 914 return ret; 915 916 ret = genpd_stop_dev(genpd, dev); 917 if (ret) { 918 __genpd_runtime_resume(dev); 919 return ret; 920 } 921 922 /* Update suspend latency value if the measured time exceeds it. */ 923 if (td && runtime_pm) { 924 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 925 if (elapsed_ns > td->suspend_latency_ns) { 926 td->suspend_latency_ns = elapsed_ns; 927 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 928 elapsed_ns); 929 genpd->gd->max_off_time_changed = true; 930 td->constraint_changed = true; 931 } 932 } 933 934 /* 935 * If power.irq_safe is set, this routine may be run with 936 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 937 */ 938 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 939 return 0; 940 941 genpd_lock(genpd); 942 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 943 genpd_power_off(genpd, true, 0); 944 genpd_unlock(genpd); 945 946 return 0; 947 } 948 949 /** 950 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 951 * @dev: Device to resume. 952 * 953 * Carry out a runtime resume of a device under the assumption that its 954 * pm_domain field points to the domain member of an object of type 955 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 956 */ 957 static int genpd_runtime_resume(struct device *dev) 958 { 959 struct generic_pm_domain *genpd; 960 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 961 struct gpd_timing_data *td = gpd_data->td; 962 bool timed = td && pm_runtime_enabled(dev); 963 ktime_t time_start = 0; 964 s64 elapsed_ns; 965 int ret; 966 967 dev_dbg(dev, "%s()\n", __func__); 968 969 genpd = dev_to_genpd(dev); 970 if (IS_ERR(genpd)) 971 return -EINVAL; 972 973 /* 974 * As we don't power off a non IRQ safe domain, which holds 975 * an IRQ safe device, we don't need to restore power to it. 976 */ 977 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 978 goto out; 979 980 genpd_lock(genpd); 981 ret = genpd_power_on(genpd, 0); 982 if (!ret) 983 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 984 genpd_unlock(genpd); 985 986 if (ret) 987 return ret; 988 989 out: 990 /* Measure resume latency. */ 991 if (timed) 992 time_start = ktime_get(); 993 994 ret = genpd_start_dev(genpd, dev); 995 if (ret) 996 goto err_poweroff; 997 998 ret = __genpd_runtime_resume(dev); 999 if (ret) 1000 goto err_stop; 1001 1002 /* Update resume latency value if the measured time exceeds it. */ 1003 if (timed) { 1004 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1005 if (elapsed_ns > td->resume_latency_ns) { 1006 td->resume_latency_ns = elapsed_ns; 1007 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1008 elapsed_ns); 1009 genpd->gd->max_off_time_changed = true; 1010 td->constraint_changed = true; 1011 } 1012 } 1013 1014 return 0; 1015 1016 err_stop: 1017 genpd_stop_dev(genpd, dev); 1018 err_poweroff: 1019 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1020 genpd_lock(genpd); 1021 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1022 genpd_power_off(genpd, true, 0); 1023 genpd_unlock(genpd); 1024 } 1025 1026 return ret; 1027 } 1028 1029 static bool pd_ignore_unused; 1030 static int __init pd_ignore_unused_setup(char *__unused) 1031 { 1032 pd_ignore_unused = true; 1033 return 1; 1034 } 1035 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1036 1037 /** 1038 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1039 */ 1040 static int __init genpd_power_off_unused(void) 1041 { 1042 struct generic_pm_domain *genpd; 1043 1044 if (pd_ignore_unused) { 1045 pr_warn("genpd: Not disabling unused power domains\n"); 1046 return 0; 1047 } 1048 1049 mutex_lock(&gpd_list_lock); 1050 1051 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1052 genpd_queue_power_off_work(genpd); 1053 1054 mutex_unlock(&gpd_list_lock); 1055 1056 return 0; 1057 } 1058 late_initcall(genpd_power_off_unused); 1059 1060 #ifdef CONFIG_PM_SLEEP 1061 1062 /** 1063 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1064 * @genpd: PM domain to power off, if possible. 1065 * @use_lock: use the lock. 1066 * @depth: nesting count for lockdep. 1067 * 1068 * Check if the given PM domain can be powered off (during system suspend or 1069 * hibernation) and do that if so. Also, in that case propagate to its parents. 1070 * 1071 * This function is only called in "noirq" and "syscore" stages of system power 1072 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1073 * these cases the lock must be held. 1074 */ 1075 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1076 unsigned int depth) 1077 { 1078 struct gpd_link *link; 1079 1080 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1081 return; 1082 1083 if (genpd->suspended_count != genpd->device_count 1084 || atomic_read(&genpd->sd_count) > 0) 1085 return; 1086 1087 /* Check that the children are in their deepest (powered-off) state. */ 1088 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1089 struct generic_pm_domain *child = link->child; 1090 if (child->state_idx < child->state_count - 1) 1091 return; 1092 } 1093 1094 /* Choose the deepest state when suspending */ 1095 genpd->state_idx = genpd->state_count - 1; 1096 if (_genpd_power_off(genpd, false)) 1097 return; 1098 1099 genpd->status = GENPD_STATE_OFF; 1100 1101 list_for_each_entry(link, &genpd->child_links, child_node) { 1102 genpd_sd_counter_dec(link->parent); 1103 1104 if (use_lock) 1105 genpd_lock_nested(link->parent, depth + 1); 1106 1107 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1108 1109 if (use_lock) 1110 genpd_unlock(link->parent); 1111 } 1112 } 1113 1114 /** 1115 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1116 * @genpd: PM domain to power on. 1117 * @use_lock: use the lock. 1118 * @depth: nesting count for lockdep. 1119 * 1120 * This function is only called in "noirq" and "syscore" stages of system power 1121 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1122 * these cases the lock must be held. 1123 */ 1124 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1125 unsigned int depth) 1126 { 1127 struct gpd_link *link; 1128 1129 if (genpd_status_on(genpd)) 1130 return; 1131 1132 list_for_each_entry(link, &genpd->child_links, child_node) { 1133 genpd_sd_counter_inc(link->parent); 1134 1135 if (use_lock) 1136 genpd_lock_nested(link->parent, depth + 1); 1137 1138 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1139 1140 if (use_lock) 1141 genpd_unlock(link->parent); 1142 } 1143 1144 _genpd_power_on(genpd, false); 1145 genpd->status = GENPD_STATE_ON; 1146 } 1147 1148 /** 1149 * genpd_prepare - Start power transition of a device in a PM domain. 1150 * @dev: Device to start the transition of. 1151 * 1152 * Start a power transition of a device (during a system-wide power transition) 1153 * under the assumption that its pm_domain field points to the domain member of 1154 * an object of type struct generic_pm_domain representing a PM domain 1155 * consisting of I/O devices. 1156 */ 1157 static int genpd_prepare(struct device *dev) 1158 { 1159 struct generic_pm_domain *genpd; 1160 int ret; 1161 1162 dev_dbg(dev, "%s()\n", __func__); 1163 1164 genpd = dev_to_genpd(dev); 1165 if (IS_ERR(genpd)) 1166 return -EINVAL; 1167 1168 genpd_lock(genpd); 1169 1170 if (genpd->prepared_count++ == 0) 1171 genpd->suspended_count = 0; 1172 1173 genpd_unlock(genpd); 1174 1175 ret = pm_generic_prepare(dev); 1176 if (ret < 0) { 1177 genpd_lock(genpd); 1178 1179 genpd->prepared_count--; 1180 1181 genpd_unlock(genpd); 1182 } 1183 1184 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1185 return ret >= 0 ? 0 : ret; 1186 } 1187 1188 /** 1189 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1190 * I/O pm domain. 1191 * @dev: Device to suspend. 1192 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. 1193 * 1194 * Stop the device and remove power from the domain if all devices in it have 1195 * been stopped. 1196 */ 1197 static int genpd_finish_suspend(struct device *dev, bool poweroff) 1198 { 1199 struct generic_pm_domain *genpd; 1200 int ret = 0; 1201 1202 genpd = dev_to_genpd(dev); 1203 if (IS_ERR(genpd)) 1204 return -EINVAL; 1205 1206 if (poweroff) 1207 ret = pm_generic_poweroff_noirq(dev); 1208 else 1209 ret = pm_generic_suspend_noirq(dev); 1210 if (ret) 1211 return ret; 1212 1213 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1214 return 0; 1215 1216 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1217 !pm_runtime_status_suspended(dev)) { 1218 ret = genpd_stop_dev(genpd, dev); 1219 if (ret) { 1220 if (poweroff) 1221 pm_generic_restore_noirq(dev); 1222 else 1223 pm_generic_resume_noirq(dev); 1224 return ret; 1225 } 1226 } 1227 1228 genpd_lock(genpd); 1229 genpd->suspended_count++; 1230 genpd_sync_power_off(genpd, true, 0); 1231 genpd_unlock(genpd); 1232 1233 return 0; 1234 } 1235 1236 /** 1237 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1238 * @dev: Device to suspend. 1239 * 1240 * Stop the device and remove power from the domain if all devices in it have 1241 * been stopped. 1242 */ 1243 static int genpd_suspend_noirq(struct device *dev) 1244 { 1245 dev_dbg(dev, "%s()\n", __func__); 1246 1247 return genpd_finish_suspend(dev, false); 1248 } 1249 1250 /** 1251 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1252 * @dev: Device to resume. 1253 * 1254 * Restore power to the device's PM domain, if necessary, and start the device. 1255 */ 1256 static int genpd_resume_noirq(struct device *dev) 1257 { 1258 struct generic_pm_domain *genpd; 1259 int ret; 1260 1261 dev_dbg(dev, "%s()\n", __func__); 1262 1263 genpd = dev_to_genpd(dev); 1264 if (IS_ERR(genpd)) 1265 return -EINVAL; 1266 1267 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1268 return pm_generic_resume_noirq(dev); 1269 1270 genpd_lock(genpd); 1271 genpd_sync_power_on(genpd, true, 0); 1272 genpd->suspended_count--; 1273 genpd_unlock(genpd); 1274 1275 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1276 !pm_runtime_status_suspended(dev)) { 1277 ret = genpd_start_dev(genpd, dev); 1278 if (ret) 1279 return ret; 1280 } 1281 1282 return pm_generic_resume_noirq(dev); 1283 } 1284 1285 /** 1286 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1287 * @dev: Device to freeze. 1288 * 1289 * Carry out a late freeze of a device under the assumption that its 1290 * pm_domain field points to the domain member of an object of type 1291 * struct generic_pm_domain representing a power domain consisting of I/O 1292 * devices. 1293 */ 1294 static int genpd_freeze_noirq(struct device *dev) 1295 { 1296 const struct generic_pm_domain *genpd; 1297 int ret = 0; 1298 1299 dev_dbg(dev, "%s()\n", __func__); 1300 1301 genpd = dev_to_genpd(dev); 1302 if (IS_ERR(genpd)) 1303 return -EINVAL; 1304 1305 ret = pm_generic_freeze_noirq(dev); 1306 if (ret) 1307 return ret; 1308 1309 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1310 !pm_runtime_status_suspended(dev)) 1311 ret = genpd_stop_dev(genpd, dev); 1312 1313 return ret; 1314 } 1315 1316 /** 1317 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1318 * @dev: Device to thaw. 1319 * 1320 * Start the device, unless power has been removed from the domain already 1321 * before the system transition. 1322 */ 1323 static int genpd_thaw_noirq(struct device *dev) 1324 { 1325 const struct generic_pm_domain *genpd; 1326 int ret = 0; 1327 1328 dev_dbg(dev, "%s()\n", __func__); 1329 1330 genpd = dev_to_genpd(dev); 1331 if (IS_ERR(genpd)) 1332 return -EINVAL; 1333 1334 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1335 !pm_runtime_status_suspended(dev)) { 1336 ret = genpd_start_dev(genpd, dev); 1337 if (ret) 1338 return ret; 1339 } 1340 1341 return pm_generic_thaw_noirq(dev); 1342 } 1343 1344 /** 1345 * genpd_poweroff_noirq - Completion of hibernation of device in an 1346 * I/O PM domain. 1347 * @dev: Device to poweroff. 1348 * 1349 * Stop the device and remove power from the domain if all devices in it have 1350 * been stopped. 1351 */ 1352 static int genpd_poweroff_noirq(struct device *dev) 1353 { 1354 dev_dbg(dev, "%s()\n", __func__); 1355 1356 return genpd_finish_suspend(dev, true); 1357 } 1358 1359 /** 1360 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1361 * @dev: Device to resume. 1362 * 1363 * Make sure the domain will be in the same power state as before the 1364 * hibernation the system is resuming from and start the device if necessary. 1365 */ 1366 static int genpd_restore_noirq(struct device *dev) 1367 { 1368 struct generic_pm_domain *genpd; 1369 int ret = 0; 1370 1371 dev_dbg(dev, "%s()\n", __func__); 1372 1373 genpd = dev_to_genpd(dev); 1374 if (IS_ERR(genpd)) 1375 return -EINVAL; 1376 1377 /* 1378 * At this point suspended_count == 0 means we are being run for the 1379 * first time for the given domain in the present cycle. 1380 */ 1381 genpd_lock(genpd); 1382 if (genpd->suspended_count++ == 0) { 1383 /* 1384 * The boot kernel might put the domain into arbitrary state, 1385 * so make it appear as powered off to genpd_sync_power_on(), 1386 * so that it tries to power it on in case it was really off. 1387 */ 1388 genpd->status = GENPD_STATE_OFF; 1389 } 1390 1391 genpd_sync_power_on(genpd, true, 0); 1392 genpd_unlock(genpd); 1393 1394 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1395 !pm_runtime_status_suspended(dev)) { 1396 ret = genpd_start_dev(genpd, dev); 1397 if (ret) 1398 return ret; 1399 } 1400 1401 return pm_generic_restore_noirq(dev); 1402 } 1403 1404 /** 1405 * genpd_complete - Complete power transition of a device in a power domain. 1406 * @dev: Device to complete the transition of. 1407 * 1408 * Complete a power transition of a device (during a system-wide power 1409 * transition) under the assumption that its pm_domain field points to the 1410 * domain member of an object of type struct generic_pm_domain representing 1411 * a power domain consisting of I/O devices. 1412 */ 1413 static void genpd_complete(struct device *dev) 1414 { 1415 struct generic_pm_domain *genpd; 1416 1417 dev_dbg(dev, "%s()\n", __func__); 1418 1419 genpd = dev_to_genpd(dev); 1420 if (IS_ERR(genpd)) 1421 return; 1422 1423 pm_generic_complete(dev); 1424 1425 genpd_lock(genpd); 1426 1427 genpd->prepared_count--; 1428 if (!genpd->prepared_count) 1429 genpd_queue_power_off_work(genpd); 1430 1431 genpd_unlock(genpd); 1432 } 1433 1434 static void genpd_switch_state(struct device *dev, bool suspend) 1435 { 1436 struct generic_pm_domain *genpd; 1437 bool use_lock; 1438 1439 genpd = dev_to_genpd_safe(dev); 1440 if (!genpd) 1441 return; 1442 1443 use_lock = genpd_is_irq_safe(genpd); 1444 1445 if (use_lock) 1446 genpd_lock(genpd); 1447 1448 if (suspend) { 1449 genpd->suspended_count++; 1450 genpd_sync_power_off(genpd, use_lock, 0); 1451 } else { 1452 genpd_sync_power_on(genpd, use_lock, 0); 1453 genpd->suspended_count--; 1454 } 1455 1456 if (use_lock) 1457 genpd_unlock(genpd); 1458 } 1459 1460 /** 1461 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1462 * @dev: The device that is attached to the genpd, that can be suspended. 1463 * 1464 * This routine should typically be called for a device that needs to be 1465 * suspended during the syscore suspend phase. It may also be called during 1466 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1467 * genpd. 1468 */ 1469 void dev_pm_genpd_suspend(struct device *dev) 1470 { 1471 genpd_switch_state(dev, true); 1472 } 1473 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1474 1475 /** 1476 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1477 * @dev: The device that is attached to the genpd, which needs to be resumed. 1478 * 1479 * This routine should typically be called for a device that needs to be resumed 1480 * during the syscore resume phase. It may also be called during suspend-to-idle 1481 * to resume a corresponding CPU device that is attached to a genpd. 1482 */ 1483 void dev_pm_genpd_resume(struct device *dev) 1484 { 1485 genpd_switch_state(dev, false); 1486 } 1487 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1488 1489 #else /* !CONFIG_PM_SLEEP */ 1490 1491 #define genpd_prepare NULL 1492 #define genpd_suspend_noirq NULL 1493 #define genpd_resume_noirq NULL 1494 #define genpd_freeze_noirq NULL 1495 #define genpd_thaw_noirq NULL 1496 #define genpd_poweroff_noirq NULL 1497 #define genpd_restore_noirq NULL 1498 #define genpd_complete NULL 1499 1500 #endif /* CONFIG_PM_SLEEP */ 1501 1502 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1503 bool has_governor) 1504 { 1505 struct generic_pm_domain_data *gpd_data; 1506 struct gpd_timing_data *td; 1507 int ret; 1508 1509 ret = dev_pm_get_subsys_data(dev); 1510 if (ret) 1511 return ERR_PTR(ret); 1512 1513 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1514 if (!gpd_data) { 1515 ret = -ENOMEM; 1516 goto err_put; 1517 } 1518 1519 gpd_data->base.dev = dev; 1520 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1521 1522 /* Allocate data used by a governor. */ 1523 if (has_governor) { 1524 td = kzalloc(sizeof(*td), GFP_KERNEL); 1525 if (!td) { 1526 ret = -ENOMEM; 1527 goto err_free; 1528 } 1529 1530 td->constraint_changed = true; 1531 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1532 td->next_wakeup = KTIME_MAX; 1533 gpd_data->td = td; 1534 } 1535 1536 spin_lock_irq(&dev->power.lock); 1537 1538 if (dev->power.subsys_data->domain_data) 1539 ret = -EINVAL; 1540 else 1541 dev->power.subsys_data->domain_data = &gpd_data->base; 1542 1543 spin_unlock_irq(&dev->power.lock); 1544 1545 if (ret) 1546 goto err_free; 1547 1548 return gpd_data; 1549 1550 err_free: 1551 kfree(gpd_data->td); 1552 kfree(gpd_data); 1553 err_put: 1554 dev_pm_put_subsys_data(dev); 1555 return ERR_PTR(ret); 1556 } 1557 1558 static void genpd_free_dev_data(struct device *dev, 1559 struct generic_pm_domain_data *gpd_data) 1560 { 1561 spin_lock_irq(&dev->power.lock); 1562 1563 dev->power.subsys_data->domain_data = NULL; 1564 1565 spin_unlock_irq(&dev->power.lock); 1566 1567 kfree(gpd_data->td); 1568 kfree(gpd_data); 1569 dev_pm_put_subsys_data(dev); 1570 } 1571 1572 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1573 int cpu, bool set, unsigned int depth) 1574 { 1575 struct gpd_link *link; 1576 1577 if (!genpd_is_cpu_domain(genpd)) 1578 return; 1579 1580 list_for_each_entry(link, &genpd->child_links, child_node) { 1581 struct generic_pm_domain *parent = link->parent; 1582 1583 genpd_lock_nested(parent, depth + 1); 1584 genpd_update_cpumask(parent, cpu, set, depth + 1); 1585 genpd_unlock(parent); 1586 } 1587 1588 if (set) 1589 cpumask_set_cpu(cpu, genpd->cpus); 1590 else 1591 cpumask_clear_cpu(cpu, genpd->cpus); 1592 } 1593 1594 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1595 { 1596 if (cpu >= 0) 1597 genpd_update_cpumask(genpd, cpu, true, 0); 1598 } 1599 1600 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1601 { 1602 if (cpu >= 0) 1603 genpd_update_cpumask(genpd, cpu, false, 0); 1604 } 1605 1606 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1607 { 1608 int cpu; 1609 1610 if (!genpd_is_cpu_domain(genpd)) 1611 return -1; 1612 1613 for_each_possible_cpu(cpu) { 1614 if (get_cpu_device(cpu) == dev) 1615 return cpu; 1616 } 1617 1618 return -1; 1619 } 1620 1621 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1622 struct device *base_dev) 1623 { 1624 struct genpd_governor_data *gd = genpd->gd; 1625 struct generic_pm_domain_data *gpd_data; 1626 int ret; 1627 1628 dev_dbg(dev, "%s()\n", __func__); 1629 1630 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1631 return -EINVAL; 1632 1633 gpd_data = genpd_alloc_dev_data(dev, gd); 1634 if (IS_ERR(gpd_data)) 1635 return PTR_ERR(gpd_data); 1636 1637 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1638 1639 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1640 if (ret) 1641 goto out; 1642 1643 genpd_lock(genpd); 1644 1645 genpd_set_cpumask(genpd, gpd_data->cpu); 1646 dev_pm_domain_set(dev, &genpd->domain); 1647 1648 genpd->device_count++; 1649 if (gd) 1650 gd->max_off_time_changed = true; 1651 1652 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1653 1654 genpd_unlock(genpd); 1655 out: 1656 if (ret) 1657 genpd_free_dev_data(dev, gpd_data); 1658 else 1659 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1660 DEV_PM_QOS_RESUME_LATENCY); 1661 1662 return ret; 1663 } 1664 1665 /** 1666 * pm_genpd_add_device - Add a device to an I/O PM domain. 1667 * @genpd: PM domain to add the device to. 1668 * @dev: Device to be added. 1669 */ 1670 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1671 { 1672 int ret; 1673 1674 mutex_lock(&gpd_list_lock); 1675 ret = genpd_add_device(genpd, dev, dev); 1676 mutex_unlock(&gpd_list_lock); 1677 1678 return ret; 1679 } 1680 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1681 1682 static int genpd_remove_device(struct generic_pm_domain *genpd, 1683 struct device *dev) 1684 { 1685 struct generic_pm_domain_data *gpd_data; 1686 struct pm_domain_data *pdd; 1687 int ret = 0; 1688 1689 dev_dbg(dev, "%s()\n", __func__); 1690 1691 pdd = dev->power.subsys_data->domain_data; 1692 gpd_data = to_gpd_data(pdd); 1693 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1694 DEV_PM_QOS_RESUME_LATENCY); 1695 1696 genpd_lock(genpd); 1697 1698 if (genpd->prepared_count > 0) { 1699 ret = -EAGAIN; 1700 goto out; 1701 } 1702 1703 genpd->device_count--; 1704 if (genpd->gd) 1705 genpd->gd->max_off_time_changed = true; 1706 1707 genpd_clear_cpumask(genpd, gpd_data->cpu); 1708 dev_pm_domain_set(dev, NULL); 1709 1710 list_del_init(&pdd->list_node); 1711 1712 genpd_unlock(genpd); 1713 1714 if (genpd->detach_dev) 1715 genpd->detach_dev(genpd, dev); 1716 1717 genpd_free_dev_data(dev, gpd_data); 1718 1719 return 0; 1720 1721 out: 1722 genpd_unlock(genpd); 1723 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1724 1725 return ret; 1726 } 1727 1728 /** 1729 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1730 * @dev: Device to be removed. 1731 */ 1732 int pm_genpd_remove_device(struct device *dev) 1733 { 1734 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1735 1736 if (!genpd) 1737 return -EINVAL; 1738 1739 return genpd_remove_device(genpd, dev); 1740 } 1741 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1742 1743 /** 1744 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1745 * 1746 * @dev: Device that should be associated with the notifier 1747 * @nb: The notifier block to register 1748 * 1749 * Users may call this function to add a genpd power on/off notifier for an 1750 * attached @dev. Only one notifier per device is allowed. The notifier is 1751 * sent when genpd is powering on/off the PM domain. 1752 * 1753 * It is assumed that the user guarantee that the genpd wouldn't be detached 1754 * while this routine is getting called. 1755 * 1756 * Returns 0 on success and negative error values on failures. 1757 */ 1758 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1759 { 1760 struct generic_pm_domain *genpd; 1761 struct generic_pm_domain_data *gpd_data; 1762 int ret; 1763 1764 genpd = dev_to_genpd_safe(dev); 1765 if (!genpd) 1766 return -ENODEV; 1767 1768 if (WARN_ON(!dev->power.subsys_data || 1769 !dev->power.subsys_data->domain_data)) 1770 return -EINVAL; 1771 1772 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1773 if (gpd_data->power_nb) 1774 return -EEXIST; 1775 1776 genpd_lock(genpd); 1777 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1778 genpd_unlock(genpd); 1779 1780 if (ret) { 1781 dev_warn(dev, "failed to add notifier for PM domain %s\n", 1782 genpd->name); 1783 return ret; 1784 } 1785 1786 gpd_data->power_nb = nb; 1787 return 0; 1788 } 1789 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1790 1791 /** 1792 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1793 * 1794 * @dev: Device that is associated with the notifier 1795 * 1796 * Users may call this function to remove a genpd power on/off notifier for an 1797 * attached @dev. 1798 * 1799 * It is assumed that the user guarantee that the genpd wouldn't be detached 1800 * while this routine is getting called. 1801 * 1802 * Returns 0 on success and negative error values on failures. 1803 */ 1804 int dev_pm_genpd_remove_notifier(struct device *dev) 1805 { 1806 struct generic_pm_domain *genpd; 1807 struct generic_pm_domain_data *gpd_data; 1808 int ret; 1809 1810 genpd = dev_to_genpd_safe(dev); 1811 if (!genpd) 1812 return -ENODEV; 1813 1814 if (WARN_ON(!dev->power.subsys_data || 1815 !dev->power.subsys_data->domain_data)) 1816 return -EINVAL; 1817 1818 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1819 if (!gpd_data->power_nb) 1820 return -ENODEV; 1821 1822 genpd_lock(genpd); 1823 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1824 gpd_data->power_nb); 1825 genpd_unlock(genpd); 1826 1827 if (ret) { 1828 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1829 genpd->name); 1830 return ret; 1831 } 1832 1833 gpd_data->power_nb = NULL; 1834 return 0; 1835 } 1836 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1837 1838 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1839 struct generic_pm_domain *subdomain) 1840 { 1841 struct gpd_link *link, *itr; 1842 int ret = 0; 1843 1844 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1845 || genpd == subdomain) 1846 return -EINVAL; 1847 1848 /* 1849 * If the domain can be powered on/off in an IRQ safe 1850 * context, ensure that the subdomain can also be 1851 * powered on/off in that context. 1852 */ 1853 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1854 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1855 genpd->name, subdomain->name); 1856 return -EINVAL; 1857 } 1858 1859 link = kzalloc(sizeof(*link), GFP_KERNEL); 1860 if (!link) 1861 return -ENOMEM; 1862 1863 genpd_lock(subdomain); 1864 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1865 1866 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1867 ret = -EINVAL; 1868 goto out; 1869 } 1870 1871 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1872 if (itr->child == subdomain && itr->parent == genpd) { 1873 ret = -EINVAL; 1874 goto out; 1875 } 1876 } 1877 1878 link->parent = genpd; 1879 list_add_tail(&link->parent_node, &genpd->parent_links); 1880 link->child = subdomain; 1881 list_add_tail(&link->child_node, &subdomain->child_links); 1882 if (genpd_status_on(subdomain)) 1883 genpd_sd_counter_inc(genpd); 1884 1885 out: 1886 genpd_unlock(genpd); 1887 genpd_unlock(subdomain); 1888 if (ret) 1889 kfree(link); 1890 return ret; 1891 } 1892 1893 /** 1894 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1895 * @genpd: Leader PM domain to add the subdomain to. 1896 * @subdomain: Subdomain to be added. 1897 */ 1898 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1899 struct generic_pm_domain *subdomain) 1900 { 1901 int ret; 1902 1903 mutex_lock(&gpd_list_lock); 1904 ret = genpd_add_subdomain(genpd, subdomain); 1905 mutex_unlock(&gpd_list_lock); 1906 1907 return ret; 1908 } 1909 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1910 1911 /** 1912 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1913 * @genpd: Leader PM domain to remove the subdomain from. 1914 * @subdomain: Subdomain to be removed. 1915 */ 1916 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1917 struct generic_pm_domain *subdomain) 1918 { 1919 struct gpd_link *l, *link; 1920 int ret = -EINVAL; 1921 1922 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1923 return -EINVAL; 1924 1925 genpd_lock(subdomain); 1926 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1927 1928 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 1929 pr_warn("%s: unable to remove subdomain %s\n", 1930 genpd->name, subdomain->name); 1931 ret = -EBUSY; 1932 goto out; 1933 } 1934 1935 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 1936 if (link->child != subdomain) 1937 continue; 1938 1939 list_del(&link->parent_node); 1940 list_del(&link->child_node); 1941 kfree(link); 1942 if (genpd_status_on(subdomain)) 1943 genpd_sd_counter_dec(genpd); 1944 1945 ret = 0; 1946 break; 1947 } 1948 1949 out: 1950 genpd_unlock(genpd); 1951 genpd_unlock(subdomain); 1952 1953 return ret; 1954 } 1955 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1956 1957 static void genpd_free_default_power_state(struct genpd_power_state *states, 1958 unsigned int state_count) 1959 { 1960 kfree(states); 1961 } 1962 1963 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1964 { 1965 struct genpd_power_state *state; 1966 1967 state = kzalloc(sizeof(*state), GFP_KERNEL); 1968 if (!state) 1969 return -ENOMEM; 1970 1971 genpd->states = state; 1972 genpd->state_count = 1; 1973 genpd->free_states = genpd_free_default_power_state; 1974 1975 return 0; 1976 } 1977 1978 static int genpd_alloc_data(struct generic_pm_domain *genpd) 1979 { 1980 struct genpd_governor_data *gd = NULL; 1981 int ret; 1982 1983 if (genpd_is_cpu_domain(genpd) && 1984 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 1985 return -ENOMEM; 1986 1987 if (genpd->gov) { 1988 gd = kzalloc(sizeof(*gd), GFP_KERNEL); 1989 if (!gd) { 1990 ret = -ENOMEM; 1991 goto free; 1992 } 1993 1994 gd->max_off_time_ns = -1; 1995 gd->max_off_time_changed = true; 1996 gd->next_wakeup = KTIME_MAX; 1997 } 1998 1999 /* Use only one "off" state if there were no states declared */ 2000 if (genpd->state_count == 0) { 2001 ret = genpd_set_default_power_state(genpd); 2002 if (ret) 2003 goto free; 2004 } 2005 2006 genpd->gd = gd; 2007 return 0; 2008 2009 free: 2010 if (genpd_is_cpu_domain(genpd)) 2011 free_cpumask_var(genpd->cpus); 2012 kfree(gd); 2013 return ret; 2014 } 2015 2016 static void genpd_free_data(struct generic_pm_domain *genpd) 2017 { 2018 if (genpd_is_cpu_domain(genpd)) 2019 free_cpumask_var(genpd->cpus); 2020 if (genpd->free_states) 2021 genpd->free_states(genpd->states, genpd->state_count); 2022 kfree(genpd->gd); 2023 } 2024 2025 static void genpd_lock_init(struct generic_pm_domain *genpd) 2026 { 2027 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { 2028 spin_lock_init(&genpd->slock); 2029 genpd->lock_ops = &genpd_spin_ops; 2030 } else { 2031 mutex_init(&genpd->mlock); 2032 genpd->lock_ops = &genpd_mtx_ops; 2033 } 2034 } 2035 2036 /** 2037 * pm_genpd_init - Initialize a generic I/O PM domain object. 2038 * @genpd: PM domain object to initialize. 2039 * @gov: PM domain governor to associate with the domain (may be NULL). 2040 * @is_off: Initial value of the domain's power_is_off field. 2041 * 2042 * Returns 0 on successful initialization, else a negative error code. 2043 */ 2044 int pm_genpd_init(struct generic_pm_domain *genpd, 2045 struct dev_power_governor *gov, bool is_off) 2046 { 2047 int ret; 2048 2049 if (IS_ERR_OR_NULL(genpd)) 2050 return -EINVAL; 2051 2052 INIT_LIST_HEAD(&genpd->parent_links); 2053 INIT_LIST_HEAD(&genpd->child_links); 2054 INIT_LIST_HEAD(&genpd->dev_list); 2055 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2056 genpd_lock_init(genpd); 2057 genpd->gov = gov; 2058 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2059 atomic_set(&genpd->sd_count, 0); 2060 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2061 genpd->device_count = 0; 2062 genpd->provider = NULL; 2063 genpd->has_provider = false; 2064 genpd->accounting_time = ktime_get_mono_fast_ns(); 2065 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2066 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2067 genpd->domain.ops.prepare = genpd_prepare; 2068 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2069 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2070 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2071 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2072 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2073 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2074 genpd->domain.ops.complete = genpd_complete; 2075 genpd->domain.start = genpd_dev_pm_start; 2076 2077 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2078 genpd->dev_ops.stop = pm_clk_suspend; 2079 genpd->dev_ops.start = pm_clk_resume; 2080 } 2081 2082 /* The always-on governor works better with the corresponding flag. */ 2083 if (gov == &pm_domain_always_on_gov) 2084 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2085 2086 /* Always-on domains must be powered on at initialization. */ 2087 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2088 !genpd_status_on(genpd)) { 2089 pr_err("always-on PM domain %s is not on\n", genpd->name); 2090 return -EINVAL; 2091 } 2092 2093 /* Multiple states but no governor doesn't make sense. */ 2094 if (!gov && genpd->state_count > 1) 2095 pr_warn("%s: no governor for states\n", genpd->name); 2096 2097 ret = genpd_alloc_data(genpd); 2098 if (ret) 2099 return ret; 2100 2101 device_initialize(&genpd->dev); 2102 dev_set_name(&genpd->dev, "%s", genpd->name); 2103 2104 mutex_lock(&gpd_list_lock); 2105 list_add(&genpd->gpd_list_node, &gpd_list); 2106 mutex_unlock(&gpd_list_lock); 2107 genpd_debug_add(genpd); 2108 2109 return 0; 2110 } 2111 EXPORT_SYMBOL_GPL(pm_genpd_init); 2112 2113 static int genpd_remove(struct generic_pm_domain *genpd) 2114 { 2115 struct gpd_link *l, *link; 2116 2117 if (IS_ERR_OR_NULL(genpd)) 2118 return -EINVAL; 2119 2120 genpd_lock(genpd); 2121 2122 if (genpd->has_provider) { 2123 genpd_unlock(genpd); 2124 pr_err("Provider present, unable to remove %s\n", genpd->name); 2125 return -EBUSY; 2126 } 2127 2128 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2129 genpd_unlock(genpd); 2130 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2131 return -EBUSY; 2132 } 2133 2134 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2135 list_del(&link->parent_node); 2136 list_del(&link->child_node); 2137 kfree(link); 2138 } 2139 2140 list_del(&genpd->gpd_list_node); 2141 genpd_unlock(genpd); 2142 genpd_debug_remove(genpd); 2143 cancel_work_sync(&genpd->power_off_work); 2144 genpd_free_data(genpd); 2145 2146 pr_debug("%s: removed %s\n", __func__, genpd->name); 2147 2148 return 0; 2149 } 2150 2151 /** 2152 * pm_genpd_remove - Remove a generic I/O PM domain 2153 * @genpd: Pointer to PM domain that is to be removed. 2154 * 2155 * To remove the PM domain, this function: 2156 * - Removes the PM domain as a subdomain to any parent domains, 2157 * if it was added. 2158 * - Removes the PM domain from the list of registered PM domains. 2159 * 2160 * The PM domain will only be removed, if the associated provider has 2161 * been removed, it is not a parent to any other PM domain and has no 2162 * devices associated with it. 2163 */ 2164 int pm_genpd_remove(struct generic_pm_domain *genpd) 2165 { 2166 int ret; 2167 2168 mutex_lock(&gpd_list_lock); 2169 ret = genpd_remove(genpd); 2170 mutex_unlock(&gpd_list_lock); 2171 2172 return ret; 2173 } 2174 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2175 2176 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2177 2178 /* 2179 * Device Tree based PM domain providers. 2180 * 2181 * The code below implements generic device tree based PM domain providers that 2182 * bind device tree nodes with generic PM domains registered in the system. 2183 * 2184 * Any driver that registers generic PM domains and needs to support binding of 2185 * devices to these domains is supposed to register a PM domain provider, which 2186 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2187 * 2188 * Two simple mapping functions have been provided for convenience: 2189 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2190 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2191 * index. 2192 */ 2193 2194 /** 2195 * struct of_genpd_provider - PM domain provider registration structure 2196 * @link: Entry in global list of PM domain providers 2197 * @node: Pointer to device tree node of PM domain provider 2198 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2199 * into a PM domain. 2200 * @data: context pointer to be passed into @xlate callback 2201 */ 2202 struct of_genpd_provider { 2203 struct list_head link; 2204 struct device_node *node; 2205 genpd_xlate_t xlate; 2206 void *data; 2207 }; 2208 2209 /* List of registered PM domain providers. */ 2210 static LIST_HEAD(of_genpd_providers); 2211 /* Mutex to protect the list above. */ 2212 static DEFINE_MUTEX(of_genpd_mutex); 2213 2214 /** 2215 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2216 * @genpdspec: OF phandle args to map into a PM domain 2217 * @data: xlate function private data - pointer to struct generic_pm_domain 2218 * 2219 * This is a generic xlate function that can be used to model PM domains that 2220 * have their own device tree nodes. The private data of xlate function needs 2221 * to be a valid pointer to struct generic_pm_domain. 2222 */ 2223 static struct generic_pm_domain *genpd_xlate_simple( 2224 struct of_phandle_args *genpdspec, 2225 void *data) 2226 { 2227 return data; 2228 } 2229 2230 /** 2231 * genpd_xlate_onecell() - Xlate function using a single index. 2232 * @genpdspec: OF phandle args to map into a PM domain 2233 * @data: xlate function private data - pointer to struct genpd_onecell_data 2234 * 2235 * This is a generic xlate function that can be used to model simple PM domain 2236 * controllers that have one device tree node and provide multiple PM domains. 2237 * A single cell is used as an index into an array of PM domains specified in 2238 * the genpd_onecell_data struct when registering the provider. 2239 */ 2240 static struct generic_pm_domain *genpd_xlate_onecell( 2241 struct of_phandle_args *genpdspec, 2242 void *data) 2243 { 2244 struct genpd_onecell_data *genpd_data = data; 2245 unsigned int idx = genpdspec->args[0]; 2246 2247 if (genpdspec->args_count != 1) 2248 return ERR_PTR(-EINVAL); 2249 2250 if (idx >= genpd_data->num_domains) { 2251 pr_err("%s: invalid domain index %u\n", __func__, idx); 2252 return ERR_PTR(-EINVAL); 2253 } 2254 2255 if (!genpd_data->domains[idx]) 2256 return ERR_PTR(-ENOENT); 2257 2258 return genpd_data->domains[idx]; 2259 } 2260 2261 /** 2262 * genpd_add_provider() - Register a PM domain provider for a node 2263 * @np: Device node pointer associated with the PM domain provider. 2264 * @xlate: Callback for decoding PM domain from phandle arguments. 2265 * @data: Context pointer for @xlate callback. 2266 */ 2267 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2268 void *data) 2269 { 2270 struct of_genpd_provider *cp; 2271 2272 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2273 if (!cp) 2274 return -ENOMEM; 2275 2276 cp->node = of_node_get(np); 2277 cp->data = data; 2278 cp->xlate = xlate; 2279 fwnode_dev_initialized(&np->fwnode, true); 2280 2281 mutex_lock(&of_genpd_mutex); 2282 list_add(&cp->link, &of_genpd_providers); 2283 mutex_unlock(&of_genpd_mutex); 2284 pr_debug("Added domain provider from %pOF\n", np); 2285 2286 return 0; 2287 } 2288 2289 static bool genpd_present(const struct generic_pm_domain *genpd) 2290 { 2291 bool ret = false; 2292 const struct generic_pm_domain *gpd; 2293 2294 mutex_lock(&gpd_list_lock); 2295 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2296 if (gpd == genpd) { 2297 ret = true; 2298 break; 2299 } 2300 } 2301 mutex_unlock(&gpd_list_lock); 2302 2303 return ret; 2304 } 2305 2306 /** 2307 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2308 * @np: Device node pointer associated with the PM domain provider. 2309 * @genpd: Pointer to PM domain associated with the PM domain provider. 2310 */ 2311 int of_genpd_add_provider_simple(struct device_node *np, 2312 struct generic_pm_domain *genpd) 2313 { 2314 int ret; 2315 2316 if (!np || !genpd) 2317 return -EINVAL; 2318 2319 if (!genpd_present(genpd)) 2320 return -EINVAL; 2321 2322 genpd->dev.of_node = np; 2323 2324 /* Parse genpd OPP table */ 2325 if (genpd->set_performance_state) { 2326 ret = dev_pm_opp_of_add_table(&genpd->dev); 2327 if (ret) 2328 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2329 2330 /* 2331 * Save table for faster processing while setting performance 2332 * state. 2333 */ 2334 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2335 WARN_ON(IS_ERR(genpd->opp_table)); 2336 } 2337 2338 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2339 if (ret) { 2340 if (genpd->set_performance_state) { 2341 dev_pm_opp_put_opp_table(genpd->opp_table); 2342 dev_pm_opp_of_remove_table(&genpd->dev); 2343 } 2344 2345 return ret; 2346 } 2347 2348 genpd->provider = &np->fwnode; 2349 genpd->has_provider = true; 2350 2351 return 0; 2352 } 2353 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2354 2355 /** 2356 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2357 * @np: Device node pointer associated with the PM domain provider. 2358 * @data: Pointer to the data associated with the PM domain provider. 2359 */ 2360 int of_genpd_add_provider_onecell(struct device_node *np, 2361 struct genpd_onecell_data *data) 2362 { 2363 struct generic_pm_domain *genpd; 2364 unsigned int i; 2365 int ret = -EINVAL; 2366 2367 if (!np || !data) 2368 return -EINVAL; 2369 2370 if (!data->xlate) 2371 data->xlate = genpd_xlate_onecell; 2372 2373 for (i = 0; i < data->num_domains; i++) { 2374 genpd = data->domains[i]; 2375 2376 if (!genpd) 2377 continue; 2378 if (!genpd_present(genpd)) 2379 goto error; 2380 2381 genpd->dev.of_node = np; 2382 2383 /* Parse genpd OPP table */ 2384 if (genpd->set_performance_state) { 2385 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2386 if (ret) { 2387 dev_err_probe(&genpd->dev, ret, 2388 "Failed to add OPP table for index %d\n", i); 2389 goto error; 2390 } 2391 2392 /* 2393 * Save table for faster processing while setting 2394 * performance state. 2395 */ 2396 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2397 WARN_ON(IS_ERR(genpd->opp_table)); 2398 } 2399 2400 genpd->provider = &np->fwnode; 2401 genpd->has_provider = true; 2402 } 2403 2404 ret = genpd_add_provider(np, data->xlate, data); 2405 if (ret < 0) 2406 goto error; 2407 2408 return 0; 2409 2410 error: 2411 while (i--) { 2412 genpd = data->domains[i]; 2413 2414 if (!genpd) 2415 continue; 2416 2417 genpd->provider = NULL; 2418 genpd->has_provider = false; 2419 2420 if (genpd->set_performance_state) { 2421 dev_pm_opp_put_opp_table(genpd->opp_table); 2422 dev_pm_opp_of_remove_table(&genpd->dev); 2423 } 2424 } 2425 2426 return ret; 2427 } 2428 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2429 2430 /** 2431 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2432 * @np: Device node pointer associated with the PM domain provider 2433 */ 2434 void of_genpd_del_provider(struct device_node *np) 2435 { 2436 struct of_genpd_provider *cp, *tmp; 2437 struct generic_pm_domain *gpd; 2438 2439 mutex_lock(&gpd_list_lock); 2440 mutex_lock(&of_genpd_mutex); 2441 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2442 if (cp->node == np) { 2443 /* 2444 * For each PM domain associated with the 2445 * provider, set the 'has_provider' to false 2446 * so that the PM domain can be safely removed. 2447 */ 2448 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2449 if (gpd->provider == &np->fwnode) { 2450 gpd->has_provider = false; 2451 2452 if (!gpd->set_performance_state) 2453 continue; 2454 2455 dev_pm_opp_put_opp_table(gpd->opp_table); 2456 dev_pm_opp_of_remove_table(&gpd->dev); 2457 } 2458 } 2459 2460 fwnode_dev_initialized(&cp->node->fwnode, false); 2461 list_del(&cp->link); 2462 of_node_put(cp->node); 2463 kfree(cp); 2464 break; 2465 } 2466 } 2467 mutex_unlock(&of_genpd_mutex); 2468 mutex_unlock(&gpd_list_lock); 2469 } 2470 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2471 2472 /** 2473 * genpd_get_from_provider() - Look-up PM domain 2474 * @genpdspec: OF phandle args to use for look-up 2475 * 2476 * Looks for a PM domain provider under the node specified by @genpdspec and if 2477 * found, uses xlate function of the provider to map phandle args to a PM 2478 * domain. 2479 * 2480 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2481 * on failure. 2482 */ 2483 static struct generic_pm_domain *genpd_get_from_provider( 2484 struct of_phandle_args *genpdspec) 2485 { 2486 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2487 struct of_genpd_provider *provider; 2488 2489 if (!genpdspec) 2490 return ERR_PTR(-EINVAL); 2491 2492 mutex_lock(&of_genpd_mutex); 2493 2494 /* Check if we have such a provider in our array */ 2495 list_for_each_entry(provider, &of_genpd_providers, link) { 2496 if (provider->node == genpdspec->np) 2497 genpd = provider->xlate(genpdspec, provider->data); 2498 if (!IS_ERR(genpd)) 2499 break; 2500 } 2501 2502 mutex_unlock(&of_genpd_mutex); 2503 2504 return genpd; 2505 } 2506 2507 /** 2508 * of_genpd_add_device() - Add a device to an I/O PM domain 2509 * @genpdspec: OF phandle args to use for look-up PM domain 2510 * @dev: Device to be added. 2511 * 2512 * Looks-up an I/O PM domain based upon phandle args provided and adds 2513 * the device to the PM domain. Returns a negative error code on failure. 2514 */ 2515 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2516 { 2517 struct generic_pm_domain *genpd; 2518 int ret; 2519 2520 mutex_lock(&gpd_list_lock); 2521 2522 genpd = genpd_get_from_provider(genpdspec); 2523 if (IS_ERR(genpd)) { 2524 ret = PTR_ERR(genpd); 2525 goto out; 2526 } 2527 2528 ret = genpd_add_device(genpd, dev, dev); 2529 2530 out: 2531 mutex_unlock(&gpd_list_lock); 2532 2533 return ret; 2534 } 2535 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2536 2537 /** 2538 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2539 * @parent_spec: OF phandle args to use for parent PM domain look-up 2540 * @subdomain_spec: OF phandle args to use for subdomain look-up 2541 * 2542 * Looks-up a parent PM domain and subdomain based upon phandle args 2543 * provided and adds the subdomain to the parent PM domain. Returns a 2544 * negative error code on failure. 2545 */ 2546 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2547 struct of_phandle_args *subdomain_spec) 2548 { 2549 struct generic_pm_domain *parent, *subdomain; 2550 int ret; 2551 2552 mutex_lock(&gpd_list_lock); 2553 2554 parent = genpd_get_from_provider(parent_spec); 2555 if (IS_ERR(parent)) { 2556 ret = PTR_ERR(parent); 2557 goto out; 2558 } 2559 2560 subdomain = genpd_get_from_provider(subdomain_spec); 2561 if (IS_ERR(subdomain)) { 2562 ret = PTR_ERR(subdomain); 2563 goto out; 2564 } 2565 2566 ret = genpd_add_subdomain(parent, subdomain); 2567 2568 out: 2569 mutex_unlock(&gpd_list_lock); 2570 2571 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2572 } 2573 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2574 2575 /** 2576 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2577 * @parent_spec: OF phandle args to use for parent PM domain look-up 2578 * @subdomain_spec: OF phandle args to use for subdomain look-up 2579 * 2580 * Looks-up a parent PM domain and subdomain based upon phandle args 2581 * provided and removes the subdomain from the parent PM domain. Returns a 2582 * negative error code on failure. 2583 */ 2584 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 2585 struct of_phandle_args *subdomain_spec) 2586 { 2587 struct generic_pm_domain *parent, *subdomain; 2588 int ret; 2589 2590 mutex_lock(&gpd_list_lock); 2591 2592 parent = genpd_get_from_provider(parent_spec); 2593 if (IS_ERR(parent)) { 2594 ret = PTR_ERR(parent); 2595 goto out; 2596 } 2597 2598 subdomain = genpd_get_from_provider(subdomain_spec); 2599 if (IS_ERR(subdomain)) { 2600 ret = PTR_ERR(subdomain); 2601 goto out; 2602 } 2603 2604 ret = pm_genpd_remove_subdomain(parent, subdomain); 2605 2606 out: 2607 mutex_unlock(&gpd_list_lock); 2608 2609 return ret; 2610 } 2611 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2612 2613 /** 2614 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2615 * @np: Pointer to device node associated with provider 2616 * 2617 * Find the last PM domain that was added by a particular provider and 2618 * remove this PM domain from the list of PM domains. The provider is 2619 * identified by the 'provider' device structure that is passed. The PM 2620 * domain will only be removed, if the provider associated with domain 2621 * has been removed. 2622 * 2623 * Returns a valid pointer to struct generic_pm_domain on success or 2624 * ERR_PTR() on failure. 2625 */ 2626 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2627 { 2628 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2629 int ret; 2630 2631 if (IS_ERR_OR_NULL(np)) 2632 return ERR_PTR(-EINVAL); 2633 2634 mutex_lock(&gpd_list_lock); 2635 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2636 if (gpd->provider == &np->fwnode) { 2637 ret = genpd_remove(gpd); 2638 genpd = ret ? ERR_PTR(ret) : gpd; 2639 break; 2640 } 2641 } 2642 mutex_unlock(&gpd_list_lock); 2643 2644 return genpd; 2645 } 2646 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2647 2648 static void genpd_release_dev(struct device *dev) 2649 { 2650 of_node_put(dev->of_node); 2651 kfree(dev); 2652 } 2653 2654 static struct bus_type genpd_bus_type = { 2655 .name = "genpd", 2656 }; 2657 2658 /** 2659 * genpd_dev_pm_detach - Detach a device from its PM domain. 2660 * @dev: Device to detach. 2661 * @power_off: Currently not used 2662 * 2663 * Try to locate a corresponding generic PM domain, which the device was 2664 * attached to previously. If such is found, the device is detached from it. 2665 */ 2666 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2667 { 2668 struct generic_pm_domain *pd; 2669 unsigned int i; 2670 int ret = 0; 2671 2672 pd = dev_to_genpd(dev); 2673 if (IS_ERR(pd)) 2674 return; 2675 2676 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2677 2678 /* Drop the default performance state */ 2679 if (dev_gpd_data(dev)->default_pstate) { 2680 dev_pm_genpd_set_performance_state(dev, 0); 2681 dev_gpd_data(dev)->default_pstate = 0; 2682 } 2683 2684 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2685 ret = genpd_remove_device(pd, dev); 2686 if (ret != -EAGAIN) 2687 break; 2688 2689 mdelay(i); 2690 cond_resched(); 2691 } 2692 2693 if (ret < 0) { 2694 dev_err(dev, "failed to remove from PM domain %s: %d", 2695 pd->name, ret); 2696 return; 2697 } 2698 2699 /* Check if PM domain can be powered off after removing this device. */ 2700 genpd_queue_power_off_work(pd); 2701 2702 /* Unregister the device if it was created by genpd. */ 2703 if (dev->bus == &genpd_bus_type) 2704 device_unregister(dev); 2705 } 2706 2707 static void genpd_dev_pm_sync(struct device *dev) 2708 { 2709 struct generic_pm_domain *pd; 2710 2711 pd = dev_to_genpd(dev); 2712 if (IS_ERR(pd)) 2713 return; 2714 2715 genpd_queue_power_off_work(pd); 2716 } 2717 2718 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2719 unsigned int index, bool power_on) 2720 { 2721 struct of_phandle_args pd_args; 2722 struct generic_pm_domain *pd; 2723 int pstate; 2724 int ret; 2725 2726 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2727 "#power-domain-cells", index, &pd_args); 2728 if (ret < 0) 2729 return ret; 2730 2731 mutex_lock(&gpd_list_lock); 2732 pd = genpd_get_from_provider(&pd_args); 2733 of_node_put(pd_args.np); 2734 if (IS_ERR(pd)) { 2735 mutex_unlock(&gpd_list_lock); 2736 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2737 __func__, PTR_ERR(pd)); 2738 return driver_deferred_probe_check_state(base_dev); 2739 } 2740 2741 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2742 2743 ret = genpd_add_device(pd, dev, base_dev); 2744 mutex_unlock(&gpd_list_lock); 2745 2746 if (ret < 0) 2747 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 2748 2749 dev->pm_domain->detach = genpd_dev_pm_detach; 2750 dev->pm_domain->sync = genpd_dev_pm_sync; 2751 2752 if (power_on) { 2753 genpd_lock(pd); 2754 ret = genpd_power_on(pd, 0); 2755 genpd_unlock(pd); 2756 } 2757 2758 if (ret) { 2759 genpd_remove_device(pd, dev); 2760 return -EPROBE_DEFER; 2761 } 2762 2763 /* Set the default performance state */ 2764 pstate = of_get_required_opp_performance_state(dev->of_node, index); 2765 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 2766 ret = pstate; 2767 goto err; 2768 } else if (pstate > 0) { 2769 ret = dev_pm_genpd_set_performance_state(dev, pstate); 2770 if (ret) 2771 goto err; 2772 dev_gpd_data(dev)->default_pstate = pstate; 2773 } 2774 return 1; 2775 2776 err: 2777 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 2778 pd->name, ret); 2779 genpd_remove_device(pd, dev); 2780 return ret; 2781 } 2782 2783 /** 2784 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2785 * @dev: Device to attach. 2786 * 2787 * Parse device's OF node to find a PM domain specifier. If such is found, 2788 * attaches the device to retrieved pm_domain ops. 2789 * 2790 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2791 * PM domain or when multiple power-domains exists for it, else a negative error 2792 * code. Note that if a power-domain exists for the device, but it cannot be 2793 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2794 * not probed and to re-try again later. 2795 */ 2796 int genpd_dev_pm_attach(struct device *dev) 2797 { 2798 if (!dev->of_node) 2799 return 0; 2800 2801 /* 2802 * Devices with multiple PM domains must be attached separately, as we 2803 * can only attach one PM domain per device. 2804 */ 2805 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2806 "#power-domain-cells") != 1) 2807 return 0; 2808 2809 return __genpd_dev_pm_attach(dev, dev, 0, true); 2810 } 2811 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2812 2813 /** 2814 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2815 * @dev: The device used to lookup the PM domain. 2816 * @index: The index of the PM domain. 2817 * 2818 * Parse device's OF node to find a PM domain specifier at the provided @index. 2819 * If such is found, creates a virtual device and attaches it to the retrieved 2820 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2821 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2822 * 2823 * Returns the created virtual device if successfully attached PM domain, NULL 2824 * when the device don't need a PM domain, else an ERR_PTR() in case of 2825 * failures. If a power-domain exists for the device, but cannot be found or 2826 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2827 * is not probed and to re-try again later. 2828 */ 2829 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2830 unsigned int index) 2831 { 2832 struct device *virt_dev; 2833 int num_domains; 2834 int ret; 2835 2836 if (!dev->of_node) 2837 return NULL; 2838 2839 /* Verify that the index is within a valid range. */ 2840 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2841 "#power-domain-cells"); 2842 if (index >= num_domains) 2843 return NULL; 2844 2845 /* Allocate and register device on the genpd bus. */ 2846 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2847 if (!virt_dev) 2848 return ERR_PTR(-ENOMEM); 2849 2850 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2851 virt_dev->bus = &genpd_bus_type; 2852 virt_dev->release = genpd_release_dev; 2853 virt_dev->of_node = of_node_get(dev->of_node); 2854 2855 ret = device_register(virt_dev); 2856 if (ret) { 2857 put_device(virt_dev); 2858 return ERR_PTR(ret); 2859 } 2860 2861 /* Try to attach the device to the PM domain at the specified index. */ 2862 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2863 if (ret < 1) { 2864 device_unregister(virt_dev); 2865 return ret ? ERR_PTR(ret) : NULL; 2866 } 2867 2868 pm_runtime_enable(virt_dev); 2869 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 2870 2871 return virt_dev; 2872 } 2873 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 2874 2875 /** 2876 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 2877 * @dev: The device used to lookup the PM domain. 2878 * @name: The name of the PM domain. 2879 * 2880 * Parse device's OF node to find a PM domain specifier using the 2881 * power-domain-names DT property. For further description see 2882 * genpd_dev_pm_attach_by_id(). 2883 */ 2884 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 2885 { 2886 int index; 2887 2888 if (!dev->of_node) 2889 return NULL; 2890 2891 index = of_property_match_string(dev->of_node, "power-domain-names", 2892 name); 2893 if (index < 0) 2894 return NULL; 2895 2896 return genpd_dev_pm_attach_by_id(dev, index); 2897 } 2898 2899 static const struct of_device_id idle_state_match[] = { 2900 { .compatible = "domain-idle-state", }, 2901 { } 2902 }; 2903 2904 static int genpd_parse_state(struct genpd_power_state *genpd_state, 2905 struct device_node *state_node) 2906 { 2907 int err; 2908 u32 residency; 2909 u32 entry_latency, exit_latency; 2910 2911 err = of_property_read_u32(state_node, "entry-latency-us", 2912 &entry_latency); 2913 if (err) { 2914 pr_debug(" * %pOF missing entry-latency-us property\n", 2915 state_node); 2916 return -EINVAL; 2917 } 2918 2919 err = of_property_read_u32(state_node, "exit-latency-us", 2920 &exit_latency); 2921 if (err) { 2922 pr_debug(" * %pOF missing exit-latency-us property\n", 2923 state_node); 2924 return -EINVAL; 2925 } 2926 2927 err = of_property_read_u32(state_node, "min-residency-us", &residency); 2928 if (!err) 2929 genpd_state->residency_ns = 1000 * residency; 2930 2931 genpd_state->power_on_latency_ns = 1000 * exit_latency; 2932 genpd_state->power_off_latency_ns = 1000 * entry_latency; 2933 genpd_state->fwnode = &state_node->fwnode; 2934 2935 return 0; 2936 } 2937 2938 static int genpd_iterate_idle_states(struct device_node *dn, 2939 struct genpd_power_state *states) 2940 { 2941 int ret; 2942 struct of_phandle_iterator it; 2943 struct device_node *np; 2944 int i = 0; 2945 2946 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 2947 if (ret <= 0) 2948 return ret == -ENOENT ? 0 : ret; 2949 2950 /* Loop over the phandles until all the requested entry is found */ 2951 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 2952 np = it.node; 2953 if (!of_match_node(idle_state_match, np)) 2954 continue; 2955 2956 if (!of_device_is_available(np)) 2957 continue; 2958 2959 if (states) { 2960 ret = genpd_parse_state(&states[i], np); 2961 if (ret) { 2962 pr_err("Parsing idle state node %pOF failed with err %d\n", 2963 np, ret); 2964 of_node_put(np); 2965 return ret; 2966 } 2967 } 2968 i++; 2969 } 2970 2971 return i; 2972 } 2973 2974 /** 2975 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 2976 * 2977 * @dn: The genpd device node 2978 * @states: The pointer to which the state array will be saved. 2979 * @n: The count of elements in the array returned from this function. 2980 * 2981 * Returns the device states parsed from the OF node. The memory for the states 2982 * is allocated by this function and is the responsibility of the caller to 2983 * free the memory after use. If any or zero compatible domain idle states is 2984 * found it returns 0 and in case of errors, a negative error code is returned. 2985 */ 2986 int of_genpd_parse_idle_states(struct device_node *dn, 2987 struct genpd_power_state **states, int *n) 2988 { 2989 struct genpd_power_state *st; 2990 int ret; 2991 2992 ret = genpd_iterate_idle_states(dn, NULL); 2993 if (ret < 0) 2994 return ret; 2995 2996 if (!ret) { 2997 *states = NULL; 2998 *n = 0; 2999 return 0; 3000 } 3001 3002 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 3003 if (!st) 3004 return -ENOMEM; 3005 3006 ret = genpd_iterate_idle_states(dn, st); 3007 if (ret <= 0) { 3008 kfree(st); 3009 return ret < 0 ? ret : -EINVAL; 3010 } 3011 3012 *states = st; 3013 *n = ret; 3014 3015 return 0; 3016 } 3017 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3018 3019 /** 3020 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. 3021 * 3022 * @genpd_dev: Genpd's device for which the performance-state needs to be found. 3023 * @opp: struct dev_pm_opp of the OPP for which we need to find performance 3024 * state. 3025 * 3026 * Returns performance state encoded in the OPP of the genpd. This calls 3027 * platform specific genpd->opp_to_performance_state() callback to translate 3028 * power domain OPP to performance state. 3029 * 3030 * Returns performance state on success and 0 on failure. 3031 */ 3032 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, 3033 struct dev_pm_opp *opp) 3034 { 3035 struct generic_pm_domain *genpd = NULL; 3036 int state; 3037 3038 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); 3039 3040 if (unlikely(!genpd->opp_to_performance_state)) 3041 return 0; 3042 3043 genpd_lock(genpd); 3044 state = genpd->opp_to_performance_state(genpd, opp); 3045 genpd_unlock(genpd); 3046 3047 return state; 3048 } 3049 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); 3050 3051 static int __init genpd_bus_init(void) 3052 { 3053 return bus_register(&genpd_bus_type); 3054 } 3055 core_initcall(genpd_bus_init); 3056 3057 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3058 3059 3060 /*** debugfs support ***/ 3061 3062 #ifdef CONFIG_DEBUG_FS 3063 /* 3064 * TODO: This function is a slightly modified version of rtpm_status_show 3065 * from sysfs.c, so generalize it. 3066 */ 3067 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3068 { 3069 static const char * const status_lookup[] = { 3070 [RPM_ACTIVE] = "active", 3071 [RPM_RESUMING] = "resuming", 3072 [RPM_SUSPENDED] = "suspended", 3073 [RPM_SUSPENDING] = "suspending" 3074 }; 3075 const char *p = ""; 3076 3077 if (dev->power.runtime_error) 3078 p = "error"; 3079 else if (dev->power.disable_depth) 3080 p = "unsupported"; 3081 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3082 p = status_lookup[dev->power.runtime_status]; 3083 else 3084 WARN_ON(1); 3085 3086 seq_printf(s, "%-25s ", p); 3087 } 3088 3089 static void perf_status_str(struct seq_file *s, struct device *dev) 3090 { 3091 struct generic_pm_domain_data *gpd_data; 3092 3093 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3094 seq_put_decimal_ull(s, "", gpd_data->performance_state); 3095 } 3096 3097 static int genpd_summary_one(struct seq_file *s, 3098 struct generic_pm_domain *genpd) 3099 { 3100 static const char * const status_lookup[] = { 3101 [GENPD_STATE_ON] = "on", 3102 [GENPD_STATE_OFF] = "off" 3103 }; 3104 struct pm_domain_data *pm_data; 3105 const char *kobj_path; 3106 struct gpd_link *link; 3107 char state[16]; 3108 int ret; 3109 3110 ret = genpd_lock_interruptible(genpd); 3111 if (ret) 3112 return -ERESTARTSYS; 3113 3114 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3115 goto exit; 3116 if (!genpd_status_on(genpd)) 3117 snprintf(state, sizeof(state), "%s-%u", 3118 status_lookup[genpd->status], genpd->state_idx); 3119 else 3120 snprintf(state, sizeof(state), "%s", 3121 status_lookup[genpd->status]); 3122 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3123 3124 /* 3125 * Modifications on the list require holding locks on both 3126 * parent and child, so we are safe. 3127 * Also genpd->name is immutable. 3128 */ 3129 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3130 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3131 seq_printf(s, "\n%48s", " "); 3132 seq_printf(s, "%s", link->child->name); 3133 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3134 seq_puts(s, ", "); 3135 } 3136 3137 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3138 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3139 genpd_is_irq_safe(genpd) ? 3140 GFP_ATOMIC : GFP_KERNEL); 3141 if (kobj_path == NULL) 3142 continue; 3143 3144 seq_printf(s, "\n %-50s ", kobj_path); 3145 rtpm_status_str(s, pm_data->dev); 3146 perf_status_str(s, pm_data->dev); 3147 kfree(kobj_path); 3148 } 3149 3150 seq_puts(s, "\n"); 3151 exit: 3152 genpd_unlock(genpd); 3153 3154 return 0; 3155 } 3156 3157 static int summary_show(struct seq_file *s, void *data) 3158 { 3159 struct generic_pm_domain *genpd; 3160 int ret = 0; 3161 3162 seq_puts(s, "domain status children performance\n"); 3163 seq_puts(s, " /device runtime status\n"); 3164 seq_puts(s, "----------------------------------------------------------------------------------------------\n"); 3165 3166 ret = mutex_lock_interruptible(&gpd_list_lock); 3167 if (ret) 3168 return -ERESTARTSYS; 3169 3170 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3171 ret = genpd_summary_one(s, genpd); 3172 if (ret) 3173 break; 3174 } 3175 mutex_unlock(&gpd_list_lock); 3176 3177 return ret; 3178 } 3179 3180 static int status_show(struct seq_file *s, void *data) 3181 { 3182 static const char * const status_lookup[] = { 3183 [GENPD_STATE_ON] = "on", 3184 [GENPD_STATE_OFF] = "off" 3185 }; 3186 3187 struct generic_pm_domain *genpd = s->private; 3188 int ret = 0; 3189 3190 ret = genpd_lock_interruptible(genpd); 3191 if (ret) 3192 return -ERESTARTSYS; 3193 3194 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3195 goto exit; 3196 3197 if (genpd->status == GENPD_STATE_OFF) 3198 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3199 genpd->state_idx); 3200 else 3201 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3202 exit: 3203 genpd_unlock(genpd); 3204 return ret; 3205 } 3206 3207 static int sub_domains_show(struct seq_file *s, void *data) 3208 { 3209 struct generic_pm_domain *genpd = s->private; 3210 struct gpd_link *link; 3211 int ret = 0; 3212 3213 ret = genpd_lock_interruptible(genpd); 3214 if (ret) 3215 return -ERESTARTSYS; 3216 3217 list_for_each_entry(link, &genpd->parent_links, parent_node) 3218 seq_printf(s, "%s\n", link->child->name); 3219 3220 genpd_unlock(genpd); 3221 return ret; 3222 } 3223 3224 static int idle_states_show(struct seq_file *s, void *data) 3225 { 3226 struct generic_pm_domain *genpd = s->private; 3227 u64 now, delta, idle_time = 0; 3228 unsigned int i; 3229 int ret = 0; 3230 3231 ret = genpd_lock_interruptible(genpd); 3232 if (ret) 3233 return -ERESTARTSYS; 3234 3235 seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3236 3237 for (i = 0; i < genpd->state_count; i++) { 3238 idle_time += genpd->states[i].idle_time; 3239 3240 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3241 now = ktime_get_mono_fast_ns(); 3242 if (now > genpd->accounting_time) { 3243 delta = now - genpd->accounting_time; 3244 idle_time += delta; 3245 } 3246 } 3247 3248 do_div(idle_time, NSEC_PER_MSEC); 3249 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, 3250 genpd->states[i].usage, genpd->states[i].rejected); 3251 } 3252 3253 genpd_unlock(genpd); 3254 return ret; 3255 } 3256 3257 static int active_time_show(struct seq_file *s, void *data) 3258 { 3259 struct generic_pm_domain *genpd = s->private; 3260 u64 now, on_time, delta = 0; 3261 int ret = 0; 3262 3263 ret = genpd_lock_interruptible(genpd); 3264 if (ret) 3265 return -ERESTARTSYS; 3266 3267 if (genpd->status == GENPD_STATE_ON) { 3268 now = ktime_get_mono_fast_ns(); 3269 if (now > genpd->accounting_time) 3270 delta = now - genpd->accounting_time; 3271 } 3272 3273 on_time = genpd->on_time + delta; 3274 do_div(on_time, NSEC_PER_MSEC); 3275 seq_printf(s, "%llu ms\n", on_time); 3276 3277 genpd_unlock(genpd); 3278 return ret; 3279 } 3280 3281 static int total_idle_time_show(struct seq_file *s, void *data) 3282 { 3283 struct generic_pm_domain *genpd = s->private; 3284 u64 now, delta, total = 0; 3285 unsigned int i; 3286 int ret = 0; 3287 3288 ret = genpd_lock_interruptible(genpd); 3289 if (ret) 3290 return -ERESTARTSYS; 3291 3292 for (i = 0; i < genpd->state_count; i++) { 3293 total += genpd->states[i].idle_time; 3294 3295 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3296 now = ktime_get_mono_fast_ns(); 3297 if (now > genpd->accounting_time) { 3298 delta = now - genpd->accounting_time; 3299 total += delta; 3300 } 3301 } 3302 } 3303 3304 do_div(total, NSEC_PER_MSEC); 3305 seq_printf(s, "%llu ms\n", total); 3306 3307 genpd_unlock(genpd); 3308 return ret; 3309 } 3310 3311 3312 static int devices_show(struct seq_file *s, void *data) 3313 { 3314 struct generic_pm_domain *genpd = s->private; 3315 struct pm_domain_data *pm_data; 3316 const char *kobj_path; 3317 int ret = 0; 3318 3319 ret = genpd_lock_interruptible(genpd); 3320 if (ret) 3321 return -ERESTARTSYS; 3322 3323 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3324 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3325 genpd_is_irq_safe(genpd) ? 3326 GFP_ATOMIC : GFP_KERNEL); 3327 if (kobj_path == NULL) 3328 continue; 3329 3330 seq_printf(s, "%s\n", kobj_path); 3331 kfree(kobj_path); 3332 } 3333 3334 genpd_unlock(genpd); 3335 return ret; 3336 } 3337 3338 static int perf_state_show(struct seq_file *s, void *data) 3339 { 3340 struct generic_pm_domain *genpd = s->private; 3341 3342 if (genpd_lock_interruptible(genpd)) 3343 return -ERESTARTSYS; 3344 3345 seq_printf(s, "%u\n", genpd->performance_state); 3346 3347 genpd_unlock(genpd); 3348 return 0; 3349 } 3350 3351 DEFINE_SHOW_ATTRIBUTE(summary); 3352 DEFINE_SHOW_ATTRIBUTE(status); 3353 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3354 DEFINE_SHOW_ATTRIBUTE(idle_states); 3355 DEFINE_SHOW_ATTRIBUTE(active_time); 3356 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3357 DEFINE_SHOW_ATTRIBUTE(devices); 3358 DEFINE_SHOW_ATTRIBUTE(perf_state); 3359 3360 static void genpd_debug_add(struct generic_pm_domain *genpd) 3361 { 3362 struct dentry *d; 3363 3364 if (!genpd_debugfs_dir) 3365 return; 3366 3367 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3368 3369 debugfs_create_file("current_state", 0444, 3370 d, genpd, &status_fops); 3371 debugfs_create_file("sub_domains", 0444, 3372 d, genpd, &sub_domains_fops); 3373 debugfs_create_file("idle_states", 0444, 3374 d, genpd, &idle_states_fops); 3375 debugfs_create_file("active_time", 0444, 3376 d, genpd, &active_time_fops); 3377 debugfs_create_file("total_idle_time", 0444, 3378 d, genpd, &total_idle_time_fops); 3379 debugfs_create_file("devices", 0444, 3380 d, genpd, &devices_fops); 3381 if (genpd->set_performance_state) 3382 debugfs_create_file("perf_state", 0444, 3383 d, genpd, &perf_state_fops); 3384 } 3385 3386 static int __init genpd_debug_init(void) 3387 { 3388 struct generic_pm_domain *genpd; 3389 3390 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3391 3392 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3393 NULL, &summary_fops); 3394 3395 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3396 genpd_debug_add(genpd); 3397 3398 return 0; 3399 } 3400 late_initcall(genpd_debug_init); 3401 3402 static void __exit genpd_debug_exit(void) 3403 { 3404 debugfs_remove_recursive(genpd_debugfs_dir); 3405 } 3406 __exitcall(genpd_debug_exit); 3407 #endif /* CONFIG_DEBUG_FS */ 3408