1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 #include <linux/cpu.h> 24 #include <linux/debugfs.h> 25 26 #include "power.h" 27 28 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 29 30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 31 ({ \ 32 type (*__routine)(struct device *__d); \ 33 type __ret = (type)0; \ 34 \ 35 __routine = genpd->dev_ops.callback; \ 36 if (__routine) { \ 37 __ret = __routine(dev); \ 38 } \ 39 __ret; \ 40 }) 41 42 static LIST_HEAD(gpd_list); 43 static DEFINE_MUTEX(gpd_list_lock); 44 45 struct genpd_lock_ops { 46 void (*lock)(struct generic_pm_domain *genpd); 47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 48 int (*lock_interruptible)(struct generic_pm_domain *genpd); 49 void (*unlock)(struct generic_pm_domain *genpd); 50 }; 51 52 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 53 { 54 mutex_lock(&genpd->mlock); 55 } 56 57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 58 int depth) 59 { 60 mutex_lock_nested(&genpd->mlock, depth); 61 } 62 63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 64 { 65 return mutex_lock_interruptible(&genpd->mlock); 66 } 67 68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 69 { 70 return mutex_unlock(&genpd->mlock); 71 } 72 73 static const struct genpd_lock_ops genpd_mtx_ops = { 74 .lock = genpd_lock_mtx, 75 .lock_nested = genpd_lock_nested_mtx, 76 .lock_interruptible = genpd_lock_interruptible_mtx, 77 .unlock = genpd_unlock_mtx, 78 }; 79 80 static void genpd_lock_spin(struct generic_pm_domain *genpd) 81 __acquires(&genpd->slock) 82 { 83 unsigned long flags; 84 85 spin_lock_irqsave(&genpd->slock, flags); 86 genpd->lock_flags = flags; 87 } 88 89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 90 int depth) 91 __acquires(&genpd->slock) 92 { 93 unsigned long flags; 94 95 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 96 genpd->lock_flags = flags; 97 } 98 99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 100 __acquires(&genpd->slock) 101 { 102 unsigned long flags; 103 104 spin_lock_irqsave(&genpd->slock, flags); 105 genpd->lock_flags = flags; 106 return 0; 107 } 108 109 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 110 __releases(&genpd->slock) 111 { 112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 113 } 114 115 static const struct genpd_lock_ops genpd_spin_ops = { 116 .lock = genpd_lock_spin, 117 .lock_nested = genpd_lock_nested_spin, 118 .lock_interruptible = genpd_lock_interruptible_spin, 119 .unlock = genpd_unlock_spin, 120 }; 121 122 #define genpd_lock(p) p->lock_ops->lock(p) 123 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 124 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 125 #define genpd_unlock(p) p->lock_ops->unlock(p) 126 127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 133 134 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, 135 const struct generic_pm_domain *genpd) 136 { 137 bool ret; 138 139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 140 141 /* 142 * Warn once if an IRQ safe device is attached to a no sleep domain, as 143 * to indicate a suboptimal configuration for PM. For an always on 144 * domain this isn't case, thus don't warn. 145 */ 146 if (ret && !genpd_is_always_on(genpd)) 147 dev_warn_once(dev, "PM domain %s will not be powered off\n", 148 genpd->name); 149 150 return ret; 151 } 152 153 static int genpd_runtime_suspend(struct device *dev); 154 155 /* 156 * Get the generic PM domain for a particular struct device. 157 * This validates the struct device pointer, the PM domain pointer, 158 * and checks that the PM domain pointer is a real generic PM domain. 159 * Any failure results in NULL being returned. 160 */ 161 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 162 { 163 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 164 return NULL; 165 166 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 167 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 168 return pd_to_genpd(dev->pm_domain); 169 170 return NULL; 171 } 172 173 /* 174 * This should only be used where we are certain that the pm_domain 175 * attached to the device is a genpd domain. 176 */ 177 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 178 { 179 if (IS_ERR_OR_NULL(dev->pm_domain)) 180 return ERR_PTR(-EINVAL); 181 182 return pd_to_genpd(dev->pm_domain); 183 } 184 185 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 186 struct device *dev) 187 { 188 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 189 } 190 191 static int genpd_start_dev(const struct generic_pm_domain *genpd, 192 struct device *dev) 193 { 194 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 195 } 196 197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 198 { 199 bool ret = false; 200 201 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 202 ret = !!atomic_dec_and_test(&genpd->sd_count); 203 204 return ret; 205 } 206 207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 208 { 209 atomic_inc(&genpd->sd_count); 210 smp_mb__after_atomic(); 211 } 212 213 #ifdef CONFIG_DEBUG_FS 214 static struct dentry *genpd_debugfs_dir; 215 216 static void genpd_debug_add(struct generic_pm_domain *genpd); 217 218 static void genpd_debug_remove(struct generic_pm_domain *genpd) 219 { 220 struct dentry *d; 221 222 d = debugfs_lookup(genpd->name, genpd_debugfs_dir); 223 debugfs_remove(d); 224 } 225 226 static void genpd_update_accounting(struct generic_pm_domain *genpd) 227 { 228 ktime_t delta, now; 229 230 now = ktime_get(); 231 delta = ktime_sub(now, genpd->accounting_time); 232 233 /* 234 * If genpd->status is active, it means we are just 235 * out of off and so update the idle time and vice 236 * versa. 237 */ 238 if (genpd->status == GENPD_STATE_ON) { 239 int state_idx = genpd->state_idx; 240 241 genpd->states[state_idx].idle_time = 242 ktime_add(genpd->states[state_idx].idle_time, delta); 243 } else { 244 genpd->on_time = ktime_add(genpd->on_time, delta); 245 } 246 247 genpd->accounting_time = now; 248 } 249 #else 250 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 251 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 252 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 253 #endif 254 255 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 256 unsigned int state) 257 { 258 struct generic_pm_domain_data *pd_data; 259 struct pm_domain_data *pdd; 260 struct gpd_link *link; 261 262 /* New requested state is same as Max requested state */ 263 if (state == genpd->performance_state) 264 return state; 265 266 /* New requested state is higher than Max requested state */ 267 if (state > genpd->performance_state) 268 return state; 269 270 /* Traverse all devices within the domain */ 271 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 272 pd_data = to_gpd_data(pdd); 273 274 if (pd_data->performance_state > state) 275 state = pd_data->performance_state; 276 } 277 278 /* 279 * Traverse all sub-domains within the domain. This can be 280 * done without any additional locking as the link->performance_state 281 * field is protected by the parent genpd->lock, which is already taken. 282 * 283 * Also note that link->performance_state (subdomain's performance state 284 * requirement to parent domain) is different from 285 * link->child->performance_state (current performance state requirement 286 * of the devices/sub-domains of the subdomain) and so can have a 287 * different value. 288 * 289 * Note that we also take vote from powered-off sub-domains into account 290 * as the same is done for devices right now. 291 */ 292 list_for_each_entry(link, &genpd->parent_links, parent_node) { 293 if (link->performance_state > state) 294 state = link->performance_state; 295 } 296 297 return state; 298 } 299 300 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 301 unsigned int state, int depth) 302 { 303 struct generic_pm_domain *parent; 304 struct gpd_link *link; 305 int parent_state, ret; 306 307 if (state == genpd->performance_state) 308 return 0; 309 310 /* Propagate to parents of genpd */ 311 list_for_each_entry(link, &genpd->child_links, child_node) { 312 parent = link->parent; 313 314 if (!parent->set_performance_state) 315 continue; 316 317 /* Find parent's performance state */ 318 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, 319 parent->opp_table, 320 state); 321 if (unlikely(ret < 0)) 322 goto err; 323 324 parent_state = ret; 325 326 genpd_lock_nested(parent, depth + 1); 327 328 link->prev_performance_state = link->performance_state; 329 link->performance_state = parent_state; 330 parent_state = _genpd_reeval_performance_state(parent, 331 parent_state); 332 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 333 if (ret) 334 link->performance_state = link->prev_performance_state; 335 336 genpd_unlock(parent); 337 338 if (ret) 339 goto err; 340 } 341 342 ret = genpd->set_performance_state(genpd, state); 343 if (ret) 344 goto err; 345 346 genpd->performance_state = state; 347 return 0; 348 349 err: 350 /* Encountered an error, lets rollback */ 351 list_for_each_entry_continue_reverse(link, &genpd->child_links, 352 child_node) { 353 parent = link->parent; 354 355 if (!parent->set_performance_state) 356 continue; 357 358 genpd_lock_nested(parent, depth + 1); 359 360 parent_state = link->prev_performance_state; 361 link->performance_state = parent_state; 362 363 parent_state = _genpd_reeval_performance_state(parent, 364 parent_state); 365 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 366 pr_err("%s: Failed to roll back to %d performance state\n", 367 parent->name, parent_state); 368 } 369 370 genpd_unlock(parent); 371 } 372 373 return ret; 374 } 375 376 /** 377 * dev_pm_genpd_set_performance_state- Set performance state of device's power 378 * domain. 379 * 380 * @dev: Device for which the performance-state needs to be set. 381 * @state: Target performance state of the device. This can be set as 0 when the 382 * device doesn't have any performance state constraints left (And so 383 * the device wouldn't participate anymore to find the target 384 * performance state of the genpd). 385 * 386 * It is assumed that the users guarantee that the genpd wouldn't be detached 387 * while this routine is getting called. 388 * 389 * Returns 0 on success and negative error values on failures. 390 */ 391 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 392 { 393 struct generic_pm_domain *genpd; 394 struct generic_pm_domain_data *gpd_data; 395 unsigned int prev; 396 int ret; 397 398 genpd = dev_to_genpd_safe(dev); 399 if (!genpd) 400 return -ENODEV; 401 402 if (unlikely(!genpd->set_performance_state)) 403 return -EINVAL; 404 405 if (WARN_ON(!dev->power.subsys_data || 406 !dev->power.subsys_data->domain_data)) 407 return -EINVAL; 408 409 genpd_lock(genpd); 410 411 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 412 prev = gpd_data->performance_state; 413 gpd_data->performance_state = state; 414 415 state = _genpd_reeval_performance_state(genpd, state); 416 ret = _genpd_set_performance_state(genpd, state, 0); 417 if (ret) 418 gpd_data->performance_state = prev; 419 420 genpd_unlock(genpd); 421 422 return ret; 423 } 424 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 425 426 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 427 { 428 unsigned int state_idx = genpd->state_idx; 429 ktime_t time_start; 430 s64 elapsed_ns; 431 int ret; 432 433 /* Notify consumers that we are about to power on. */ 434 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 435 GENPD_NOTIFY_PRE_ON, 436 GENPD_NOTIFY_OFF, NULL); 437 ret = notifier_to_errno(ret); 438 if (ret) 439 return ret; 440 441 if (!genpd->power_on) 442 goto out; 443 444 if (!timed) { 445 ret = genpd->power_on(genpd); 446 if (ret) 447 goto err; 448 449 goto out; 450 } 451 452 time_start = ktime_get(); 453 ret = genpd->power_on(genpd); 454 if (ret) 455 goto err; 456 457 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 458 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 459 goto out; 460 461 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 462 genpd->max_off_time_changed = true; 463 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 464 genpd->name, "on", elapsed_ns); 465 466 out: 467 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 468 return 0; 469 err: 470 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 471 NULL); 472 return ret; 473 } 474 475 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 476 { 477 unsigned int state_idx = genpd->state_idx; 478 ktime_t time_start; 479 s64 elapsed_ns; 480 int ret; 481 482 /* Notify consumers that we are about to power off. */ 483 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 484 GENPD_NOTIFY_PRE_OFF, 485 GENPD_NOTIFY_ON, NULL); 486 ret = notifier_to_errno(ret); 487 if (ret) 488 return ret; 489 490 if (!genpd->power_off) 491 goto out; 492 493 if (!timed) { 494 ret = genpd->power_off(genpd); 495 if (ret) 496 goto busy; 497 498 goto out; 499 } 500 501 time_start = ktime_get(); 502 ret = genpd->power_off(genpd); 503 if (ret) 504 goto busy; 505 506 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 507 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 508 goto out; 509 510 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 511 genpd->max_off_time_changed = true; 512 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 513 genpd->name, "off", elapsed_ns); 514 515 out: 516 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 517 NULL); 518 return 0; 519 busy: 520 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 521 return ret; 522 } 523 524 /** 525 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 526 * @genpd: PM domain to power off. 527 * 528 * Queue up the execution of genpd_power_off() unless it's already been done 529 * before. 530 */ 531 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 532 { 533 queue_work(pm_wq, &genpd->power_off_work); 534 } 535 536 /** 537 * genpd_power_off - Remove power from a given PM domain. 538 * @genpd: PM domain to power down. 539 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 540 * RPM status of the releated device is in an intermediate state, not yet turned 541 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 542 * be RPM_SUSPENDED, while it tries to power off the PM domain. 543 * 544 * If all of the @genpd's devices have been suspended and all of its subdomains 545 * have been powered down, remove power from @genpd. 546 */ 547 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 548 unsigned int depth) 549 { 550 struct pm_domain_data *pdd; 551 struct gpd_link *link; 552 unsigned int not_suspended = 0; 553 int ret; 554 555 /* 556 * Do not try to power off the domain in the following situations: 557 * (1) The domain is already in the "power off" state. 558 * (2) System suspend is in progress. 559 */ 560 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 561 return 0; 562 563 /* 564 * Abort power off for the PM domain in the following situations: 565 * (1) The domain is configured as always on. 566 * (2) When the domain has a subdomain being powered on. 567 */ 568 if (genpd_is_always_on(genpd) || 569 genpd_is_rpm_always_on(genpd) || 570 atomic_read(&genpd->sd_count) > 0) 571 return -EBUSY; 572 573 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 574 enum pm_qos_flags_status stat; 575 576 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF); 577 if (stat > PM_QOS_FLAGS_NONE) 578 return -EBUSY; 579 580 /* 581 * Do not allow PM domain to be powered off, when an IRQ safe 582 * device is part of a non-IRQ safe domain. 583 */ 584 if (!pm_runtime_suspended(pdd->dev) || 585 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) 586 not_suspended++; 587 } 588 589 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 590 return -EBUSY; 591 592 if (genpd->gov && genpd->gov->power_down_ok) { 593 if (!genpd->gov->power_down_ok(&genpd->domain)) 594 return -EAGAIN; 595 } 596 597 /* Default to shallowest state. */ 598 if (!genpd->gov) 599 genpd->state_idx = 0; 600 601 /* Don't power off, if a child domain is waiting to power on. */ 602 if (atomic_read(&genpd->sd_count) > 0) 603 return -EBUSY; 604 605 ret = _genpd_power_off(genpd, true); 606 if (ret) { 607 genpd->states[genpd->state_idx].rejected++; 608 return ret; 609 } 610 611 genpd->status = GENPD_STATE_OFF; 612 genpd_update_accounting(genpd); 613 genpd->states[genpd->state_idx].usage++; 614 615 list_for_each_entry(link, &genpd->child_links, child_node) { 616 genpd_sd_counter_dec(link->parent); 617 genpd_lock_nested(link->parent, depth + 1); 618 genpd_power_off(link->parent, false, depth + 1); 619 genpd_unlock(link->parent); 620 } 621 622 return 0; 623 } 624 625 /** 626 * genpd_power_on - Restore power to a given PM domain and its parents. 627 * @genpd: PM domain to power up. 628 * @depth: nesting count for lockdep. 629 * 630 * Restore power to @genpd and all of its parents so that it is possible to 631 * resume a device belonging to it. 632 */ 633 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 634 { 635 struct gpd_link *link; 636 int ret = 0; 637 638 if (genpd_status_on(genpd)) 639 return 0; 640 641 /* 642 * The list is guaranteed not to change while the loop below is being 643 * executed, unless one of the parents' .power_on() callbacks fiddles 644 * with it. 645 */ 646 list_for_each_entry(link, &genpd->child_links, child_node) { 647 struct generic_pm_domain *parent = link->parent; 648 649 genpd_sd_counter_inc(parent); 650 651 genpd_lock_nested(parent, depth + 1); 652 ret = genpd_power_on(parent, depth + 1); 653 genpd_unlock(parent); 654 655 if (ret) { 656 genpd_sd_counter_dec(parent); 657 goto err; 658 } 659 } 660 661 ret = _genpd_power_on(genpd, true); 662 if (ret) 663 goto err; 664 665 genpd->status = GENPD_STATE_ON; 666 genpd_update_accounting(genpd); 667 668 return 0; 669 670 err: 671 list_for_each_entry_continue_reverse(link, 672 &genpd->child_links, 673 child_node) { 674 genpd_sd_counter_dec(link->parent); 675 genpd_lock_nested(link->parent, depth + 1); 676 genpd_power_off(link->parent, false, depth + 1); 677 genpd_unlock(link->parent); 678 } 679 680 return ret; 681 } 682 683 static int genpd_dev_pm_start(struct device *dev) 684 { 685 struct generic_pm_domain *genpd = dev_to_genpd(dev); 686 687 return genpd_start_dev(genpd, dev); 688 } 689 690 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 691 unsigned long val, void *ptr) 692 { 693 struct generic_pm_domain_data *gpd_data; 694 struct device *dev; 695 696 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 697 dev = gpd_data->base.dev; 698 699 for (;;) { 700 struct generic_pm_domain *genpd; 701 struct pm_domain_data *pdd; 702 703 spin_lock_irq(&dev->power.lock); 704 705 pdd = dev->power.subsys_data ? 706 dev->power.subsys_data->domain_data : NULL; 707 if (pdd) { 708 to_gpd_data(pdd)->td.constraint_changed = true; 709 genpd = dev_to_genpd(dev); 710 } else { 711 genpd = ERR_PTR(-ENODATA); 712 } 713 714 spin_unlock_irq(&dev->power.lock); 715 716 if (!IS_ERR(genpd)) { 717 genpd_lock(genpd); 718 genpd->max_off_time_changed = true; 719 genpd_unlock(genpd); 720 } 721 722 dev = dev->parent; 723 if (!dev || dev->power.ignore_children) 724 break; 725 } 726 727 return NOTIFY_DONE; 728 } 729 730 /** 731 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 732 * @work: Work structure used for scheduling the execution of this function. 733 */ 734 static void genpd_power_off_work_fn(struct work_struct *work) 735 { 736 struct generic_pm_domain *genpd; 737 738 genpd = container_of(work, struct generic_pm_domain, power_off_work); 739 740 genpd_lock(genpd); 741 genpd_power_off(genpd, false, 0); 742 genpd_unlock(genpd); 743 } 744 745 /** 746 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 747 * @dev: Device to handle. 748 */ 749 static int __genpd_runtime_suspend(struct device *dev) 750 { 751 int (*cb)(struct device *__dev); 752 753 if (dev->type && dev->type->pm) 754 cb = dev->type->pm->runtime_suspend; 755 else if (dev->class && dev->class->pm) 756 cb = dev->class->pm->runtime_suspend; 757 else if (dev->bus && dev->bus->pm) 758 cb = dev->bus->pm->runtime_suspend; 759 else 760 cb = NULL; 761 762 if (!cb && dev->driver && dev->driver->pm) 763 cb = dev->driver->pm->runtime_suspend; 764 765 return cb ? cb(dev) : 0; 766 } 767 768 /** 769 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 770 * @dev: Device to handle. 771 */ 772 static int __genpd_runtime_resume(struct device *dev) 773 { 774 int (*cb)(struct device *__dev); 775 776 if (dev->type && dev->type->pm) 777 cb = dev->type->pm->runtime_resume; 778 else if (dev->class && dev->class->pm) 779 cb = dev->class->pm->runtime_resume; 780 else if (dev->bus && dev->bus->pm) 781 cb = dev->bus->pm->runtime_resume; 782 else 783 cb = NULL; 784 785 if (!cb && dev->driver && dev->driver->pm) 786 cb = dev->driver->pm->runtime_resume; 787 788 return cb ? cb(dev) : 0; 789 } 790 791 /** 792 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 793 * @dev: Device to suspend. 794 * 795 * Carry out a runtime suspend of a device under the assumption that its 796 * pm_domain field points to the domain member of an object of type 797 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 798 */ 799 static int genpd_runtime_suspend(struct device *dev) 800 { 801 struct generic_pm_domain *genpd; 802 bool (*suspend_ok)(struct device *__dev); 803 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 804 bool runtime_pm = pm_runtime_enabled(dev); 805 ktime_t time_start; 806 s64 elapsed_ns; 807 int ret; 808 809 dev_dbg(dev, "%s()\n", __func__); 810 811 genpd = dev_to_genpd(dev); 812 if (IS_ERR(genpd)) 813 return -EINVAL; 814 815 /* 816 * A runtime PM centric subsystem/driver may re-use the runtime PM 817 * callbacks for other purposes than runtime PM. In those scenarios 818 * runtime PM is disabled. Under these circumstances, we shall skip 819 * validating/measuring the PM QoS latency. 820 */ 821 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 822 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 823 return -EBUSY; 824 825 /* Measure suspend latency. */ 826 time_start = 0; 827 if (runtime_pm) 828 time_start = ktime_get(); 829 830 ret = __genpd_runtime_suspend(dev); 831 if (ret) 832 return ret; 833 834 ret = genpd_stop_dev(genpd, dev); 835 if (ret) { 836 __genpd_runtime_resume(dev); 837 return ret; 838 } 839 840 /* Update suspend latency value if the measured time exceeds it. */ 841 if (runtime_pm) { 842 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 843 if (elapsed_ns > td->suspend_latency_ns) { 844 td->suspend_latency_ns = elapsed_ns; 845 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 846 elapsed_ns); 847 genpd->max_off_time_changed = true; 848 td->constraint_changed = true; 849 } 850 } 851 852 /* 853 * If power.irq_safe is set, this routine may be run with 854 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 855 */ 856 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) 857 return 0; 858 859 genpd_lock(genpd); 860 genpd_power_off(genpd, true, 0); 861 genpd_unlock(genpd); 862 863 return 0; 864 } 865 866 /** 867 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 868 * @dev: Device to resume. 869 * 870 * Carry out a runtime resume of a device under the assumption that its 871 * pm_domain field points to the domain member of an object of type 872 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 873 */ 874 static int genpd_runtime_resume(struct device *dev) 875 { 876 struct generic_pm_domain *genpd; 877 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 878 bool runtime_pm = pm_runtime_enabled(dev); 879 ktime_t time_start; 880 s64 elapsed_ns; 881 int ret; 882 bool timed = true; 883 884 dev_dbg(dev, "%s()\n", __func__); 885 886 genpd = dev_to_genpd(dev); 887 if (IS_ERR(genpd)) 888 return -EINVAL; 889 890 /* 891 * As we don't power off a non IRQ safe domain, which holds 892 * an IRQ safe device, we don't need to restore power to it. 893 */ 894 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { 895 timed = false; 896 goto out; 897 } 898 899 genpd_lock(genpd); 900 ret = genpd_power_on(genpd, 0); 901 genpd_unlock(genpd); 902 903 if (ret) 904 return ret; 905 906 out: 907 /* Measure resume latency. */ 908 time_start = 0; 909 if (timed && runtime_pm) 910 time_start = ktime_get(); 911 912 ret = genpd_start_dev(genpd, dev); 913 if (ret) 914 goto err_poweroff; 915 916 ret = __genpd_runtime_resume(dev); 917 if (ret) 918 goto err_stop; 919 920 /* Update resume latency value if the measured time exceeds it. */ 921 if (timed && runtime_pm) { 922 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 923 if (elapsed_ns > td->resume_latency_ns) { 924 td->resume_latency_ns = elapsed_ns; 925 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 926 elapsed_ns); 927 genpd->max_off_time_changed = true; 928 td->constraint_changed = true; 929 } 930 } 931 932 return 0; 933 934 err_stop: 935 genpd_stop_dev(genpd, dev); 936 err_poweroff: 937 if (!pm_runtime_is_irq_safe(dev) || 938 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { 939 genpd_lock(genpd); 940 genpd_power_off(genpd, true, 0); 941 genpd_unlock(genpd); 942 } 943 944 return ret; 945 } 946 947 static bool pd_ignore_unused; 948 static int __init pd_ignore_unused_setup(char *__unused) 949 { 950 pd_ignore_unused = true; 951 return 1; 952 } 953 __setup("pd_ignore_unused", pd_ignore_unused_setup); 954 955 /** 956 * genpd_power_off_unused - Power off all PM domains with no devices in use. 957 */ 958 static int __init genpd_power_off_unused(void) 959 { 960 struct generic_pm_domain *genpd; 961 962 if (pd_ignore_unused) { 963 pr_warn("genpd: Not disabling unused power domains\n"); 964 return 0; 965 } 966 967 mutex_lock(&gpd_list_lock); 968 969 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 970 genpd_queue_power_off_work(genpd); 971 972 mutex_unlock(&gpd_list_lock); 973 974 return 0; 975 } 976 late_initcall(genpd_power_off_unused); 977 978 #ifdef CONFIG_PM_SLEEP 979 980 /** 981 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 982 * @genpd: PM domain to power off, if possible. 983 * @use_lock: use the lock. 984 * @depth: nesting count for lockdep. 985 * 986 * Check if the given PM domain can be powered off (during system suspend or 987 * hibernation) and do that if so. Also, in that case propagate to its parents. 988 * 989 * This function is only called in "noirq" and "syscore" stages of system power 990 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 991 * these cases the lock must be held. 992 */ 993 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 994 unsigned int depth) 995 { 996 struct gpd_link *link; 997 998 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 999 return; 1000 1001 if (genpd->suspended_count != genpd->device_count 1002 || atomic_read(&genpd->sd_count) > 0) 1003 return; 1004 1005 /* Choose the deepest state when suspending */ 1006 genpd->state_idx = genpd->state_count - 1; 1007 if (_genpd_power_off(genpd, false)) 1008 return; 1009 1010 genpd->status = GENPD_STATE_OFF; 1011 1012 list_for_each_entry(link, &genpd->child_links, child_node) { 1013 genpd_sd_counter_dec(link->parent); 1014 1015 if (use_lock) 1016 genpd_lock_nested(link->parent, depth + 1); 1017 1018 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1019 1020 if (use_lock) 1021 genpd_unlock(link->parent); 1022 } 1023 } 1024 1025 /** 1026 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1027 * @genpd: PM domain to power on. 1028 * @use_lock: use the lock. 1029 * @depth: nesting count for lockdep. 1030 * 1031 * This function is only called in "noirq" and "syscore" stages of system power 1032 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1033 * these cases the lock must be held. 1034 */ 1035 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1036 unsigned int depth) 1037 { 1038 struct gpd_link *link; 1039 1040 if (genpd_status_on(genpd)) 1041 return; 1042 1043 list_for_each_entry(link, &genpd->child_links, child_node) { 1044 genpd_sd_counter_inc(link->parent); 1045 1046 if (use_lock) 1047 genpd_lock_nested(link->parent, depth + 1); 1048 1049 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1050 1051 if (use_lock) 1052 genpd_unlock(link->parent); 1053 } 1054 1055 _genpd_power_on(genpd, false); 1056 genpd->status = GENPD_STATE_ON; 1057 } 1058 1059 /** 1060 * resume_needed - Check whether to resume a device before system suspend. 1061 * @dev: Device to check. 1062 * @genpd: PM domain the device belongs to. 1063 * 1064 * There are two cases in which a device that can wake up the system from sleep 1065 * states should be resumed by genpd_prepare(): (1) if the device is enabled 1066 * to wake up the system and it has to remain active for this purpose while the 1067 * system is in the sleep state and (2) if the device is not enabled to wake up 1068 * the system from sleep states and it generally doesn't generate wakeup signals 1069 * by itself (those signals are generated on its behalf by other parts of the 1070 * system). In the latter case it may be necessary to reconfigure the device's 1071 * wakeup settings during system suspend, because it may have been set up to 1072 * signal remote wakeup from the system's working state as needed by runtime PM. 1073 * Return 'true' in either of the above cases. 1074 */ 1075 static bool resume_needed(struct device *dev, 1076 const struct generic_pm_domain *genpd) 1077 { 1078 bool active_wakeup; 1079 1080 if (!device_can_wakeup(dev)) 1081 return false; 1082 1083 active_wakeup = genpd_is_active_wakeup(genpd); 1084 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 1085 } 1086 1087 /** 1088 * genpd_prepare - Start power transition of a device in a PM domain. 1089 * @dev: Device to start the transition of. 1090 * 1091 * Start a power transition of a device (during a system-wide power transition) 1092 * under the assumption that its pm_domain field points to the domain member of 1093 * an object of type struct generic_pm_domain representing a PM domain 1094 * consisting of I/O devices. 1095 */ 1096 static int genpd_prepare(struct device *dev) 1097 { 1098 struct generic_pm_domain *genpd; 1099 int ret; 1100 1101 dev_dbg(dev, "%s()\n", __func__); 1102 1103 genpd = dev_to_genpd(dev); 1104 if (IS_ERR(genpd)) 1105 return -EINVAL; 1106 1107 /* 1108 * If a wakeup request is pending for the device, it should be woken up 1109 * at this point and a system wakeup event should be reported if it's 1110 * set up to wake up the system from sleep states. 1111 */ 1112 if (resume_needed(dev, genpd)) 1113 pm_runtime_resume(dev); 1114 1115 genpd_lock(genpd); 1116 1117 if (genpd->prepared_count++ == 0) 1118 genpd->suspended_count = 0; 1119 1120 genpd_unlock(genpd); 1121 1122 ret = pm_generic_prepare(dev); 1123 if (ret < 0) { 1124 genpd_lock(genpd); 1125 1126 genpd->prepared_count--; 1127 1128 genpd_unlock(genpd); 1129 } 1130 1131 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1132 return ret >= 0 ? 0 : ret; 1133 } 1134 1135 /** 1136 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1137 * I/O pm domain. 1138 * @dev: Device to suspend. 1139 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. 1140 * 1141 * Stop the device and remove power from the domain if all devices in it have 1142 * been stopped. 1143 */ 1144 static int genpd_finish_suspend(struct device *dev, bool poweroff) 1145 { 1146 struct generic_pm_domain *genpd; 1147 int ret = 0; 1148 1149 genpd = dev_to_genpd(dev); 1150 if (IS_ERR(genpd)) 1151 return -EINVAL; 1152 1153 if (poweroff) 1154 ret = pm_generic_poweroff_noirq(dev); 1155 else 1156 ret = pm_generic_suspend_noirq(dev); 1157 if (ret) 1158 return ret; 1159 1160 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1161 return 0; 1162 1163 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1164 !pm_runtime_status_suspended(dev)) { 1165 ret = genpd_stop_dev(genpd, dev); 1166 if (ret) { 1167 if (poweroff) 1168 pm_generic_restore_noirq(dev); 1169 else 1170 pm_generic_resume_noirq(dev); 1171 return ret; 1172 } 1173 } 1174 1175 genpd_lock(genpd); 1176 genpd->suspended_count++; 1177 genpd_sync_power_off(genpd, true, 0); 1178 genpd_unlock(genpd); 1179 1180 return 0; 1181 } 1182 1183 /** 1184 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1185 * @dev: Device to suspend. 1186 * 1187 * Stop the device and remove power from the domain if all devices in it have 1188 * been stopped. 1189 */ 1190 static int genpd_suspend_noirq(struct device *dev) 1191 { 1192 dev_dbg(dev, "%s()\n", __func__); 1193 1194 return genpd_finish_suspend(dev, false); 1195 } 1196 1197 /** 1198 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1199 * @dev: Device to resume. 1200 * 1201 * Restore power to the device's PM domain, if necessary, and start the device. 1202 */ 1203 static int genpd_resume_noirq(struct device *dev) 1204 { 1205 struct generic_pm_domain *genpd; 1206 int ret; 1207 1208 dev_dbg(dev, "%s()\n", __func__); 1209 1210 genpd = dev_to_genpd(dev); 1211 if (IS_ERR(genpd)) 1212 return -EINVAL; 1213 1214 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1215 return pm_generic_resume_noirq(dev); 1216 1217 genpd_lock(genpd); 1218 genpd_sync_power_on(genpd, true, 0); 1219 genpd->suspended_count--; 1220 genpd_unlock(genpd); 1221 1222 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1223 !pm_runtime_status_suspended(dev)) { 1224 ret = genpd_start_dev(genpd, dev); 1225 if (ret) 1226 return ret; 1227 } 1228 1229 return pm_generic_resume_noirq(dev); 1230 } 1231 1232 /** 1233 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1234 * @dev: Device to freeze. 1235 * 1236 * Carry out a late freeze of a device under the assumption that its 1237 * pm_domain field points to the domain member of an object of type 1238 * struct generic_pm_domain representing a power domain consisting of I/O 1239 * devices. 1240 */ 1241 static int genpd_freeze_noirq(struct device *dev) 1242 { 1243 const struct generic_pm_domain *genpd; 1244 int ret = 0; 1245 1246 dev_dbg(dev, "%s()\n", __func__); 1247 1248 genpd = dev_to_genpd(dev); 1249 if (IS_ERR(genpd)) 1250 return -EINVAL; 1251 1252 ret = pm_generic_freeze_noirq(dev); 1253 if (ret) 1254 return ret; 1255 1256 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1257 !pm_runtime_status_suspended(dev)) 1258 ret = genpd_stop_dev(genpd, dev); 1259 1260 return ret; 1261 } 1262 1263 /** 1264 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1265 * @dev: Device to thaw. 1266 * 1267 * Start the device, unless power has been removed from the domain already 1268 * before the system transition. 1269 */ 1270 static int genpd_thaw_noirq(struct device *dev) 1271 { 1272 const struct generic_pm_domain *genpd; 1273 int ret = 0; 1274 1275 dev_dbg(dev, "%s()\n", __func__); 1276 1277 genpd = dev_to_genpd(dev); 1278 if (IS_ERR(genpd)) 1279 return -EINVAL; 1280 1281 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1282 !pm_runtime_status_suspended(dev)) { 1283 ret = genpd_start_dev(genpd, dev); 1284 if (ret) 1285 return ret; 1286 } 1287 1288 return pm_generic_thaw_noirq(dev); 1289 } 1290 1291 /** 1292 * genpd_poweroff_noirq - Completion of hibernation of device in an 1293 * I/O PM domain. 1294 * @dev: Device to poweroff. 1295 * 1296 * Stop the device and remove power from the domain if all devices in it have 1297 * been stopped. 1298 */ 1299 static int genpd_poweroff_noirq(struct device *dev) 1300 { 1301 dev_dbg(dev, "%s()\n", __func__); 1302 1303 return genpd_finish_suspend(dev, true); 1304 } 1305 1306 /** 1307 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1308 * @dev: Device to resume. 1309 * 1310 * Make sure the domain will be in the same power state as before the 1311 * hibernation the system is resuming from and start the device if necessary. 1312 */ 1313 static int genpd_restore_noirq(struct device *dev) 1314 { 1315 struct generic_pm_domain *genpd; 1316 int ret = 0; 1317 1318 dev_dbg(dev, "%s()\n", __func__); 1319 1320 genpd = dev_to_genpd(dev); 1321 if (IS_ERR(genpd)) 1322 return -EINVAL; 1323 1324 /* 1325 * At this point suspended_count == 0 means we are being run for the 1326 * first time for the given domain in the present cycle. 1327 */ 1328 genpd_lock(genpd); 1329 if (genpd->suspended_count++ == 0) { 1330 /* 1331 * The boot kernel might put the domain into arbitrary state, 1332 * so make it appear as powered off to genpd_sync_power_on(), 1333 * so that it tries to power it on in case it was really off. 1334 */ 1335 genpd->status = GENPD_STATE_OFF; 1336 } 1337 1338 genpd_sync_power_on(genpd, true, 0); 1339 genpd_unlock(genpd); 1340 1341 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1342 !pm_runtime_status_suspended(dev)) { 1343 ret = genpd_start_dev(genpd, dev); 1344 if (ret) 1345 return ret; 1346 } 1347 1348 return pm_generic_restore_noirq(dev); 1349 } 1350 1351 /** 1352 * genpd_complete - Complete power transition of a device in a power domain. 1353 * @dev: Device to complete the transition of. 1354 * 1355 * Complete a power transition of a device (during a system-wide power 1356 * transition) under the assumption that its pm_domain field points to the 1357 * domain member of an object of type struct generic_pm_domain representing 1358 * a power domain consisting of I/O devices. 1359 */ 1360 static void genpd_complete(struct device *dev) 1361 { 1362 struct generic_pm_domain *genpd; 1363 1364 dev_dbg(dev, "%s()\n", __func__); 1365 1366 genpd = dev_to_genpd(dev); 1367 if (IS_ERR(genpd)) 1368 return; 1369 1370 pm_generic_complete(dev); 1371 1372 genpd_lock(genpd); 1373 1374 genpd->prepared_count--; 1375 if (!genpd->prepared_count) 1376 genpd_queue_power_off_work(genpd); 1377 1378 genpd_unlock(genpd); 1379 } 1380 1381 static void genpd_switch_state(struct device *dev, bool suspend) 1382 { 1383 struct generic_pm_domain *genpd; 1384 bool use_lock; 1385 1386 genpd = dev_to_genpd_safe(dev); 1387 if (!genpd) 1388 return; 1389 1390 use_lock = genpd_is_irq_safe(genpd); 1391 1392 if (use_lock) 1393 genpd_lock(genpd); 1394 1395 if (suspend) { 1396 genpd->suspended_count++; 1397 genpd_sync_power_off(genpd, use_lock, 0); 1398 } else { 1399 genpd_sync_power_on(genpd, use_lock, 0); 1400 genpd->suspended_count--; 1401 } 1402 1403 if (use_lock) 1404 genpd_unlock(genpd); 1405 } 1406 1407 /** 1408 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1409 * @dev: The device that is attached to the genpd, that can be suspended. 1410 * 1411 * This routine should typically be called for a device that needs to be 1412 * suspended during the syscore suspend phase. It may also be called during 1413 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1414 * genpd. 1415 */ 1416 void dev_pm_genpd_suspend(struct device *dev) 1417 { 1418 genpd_switch_state(dev, true); 1419 } 1420 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1421 1422 /** 1423 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1424 * @dev: The device that is attached to the genpd, which needs to be resumed. 1425 * 1426 * This routine should typically be called for a device that needs to be resumed 1427 * during the syscore resume phase. It may also be called during suspend-to-idle 1428 * to resume a corresponding CPU device that is attached to a genpd. 1429 */ 1430 void dev_pm_genpd_resume(struct device *dev) 1431 { 1432 genpd_switch_state(dev, false); 1433 } 1434 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1435 1436 #else /* !CONFIG_PM_SLEEP */ 1437 1438 #define genpd_prepare NULL 1439 #define genpd_suspend_noirq NULL 1440 #define genpd_resume_noirq NULL 1441 #define genpd_freeze_noirq NULL 1442 #define genpd_thaw_noirq NULL 1443 #define genpd_poweroff_noirq NULL 1444 #define genpd_restore_noirq NULL 1445 #define genpd_complete NULL 1446 1447 #endif /* CONFIG_PM_SLEEP */ 1448 1449 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) 1450 { 1451 struct generic_pm_domain_data *gpd_data; 1452 int ret; 1453 1454 ret = dev_pm_get_subsys_data(dev); 1455 if (ret) 1456 return ERR_PTR(ret); 1457 1458 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1459 if (!gpd_data) { 1460 ret = -ENOMEM; 1461 goto err_put; 1462 } 1463 1464 gpd_data->base.dev = dev; 1465 gpd_data->td.constraint_changed = true; 1466 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1467 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1468 1469 spin_lock_irq(&dev->power.lock); 1470 1471 if (dev->power.subsys_data->domain_data) { 1472 ret = -EINVAL; 1473 goto err_free; 1474 } 1475 1476 dev->power.subsys_data->domain_data = &gpd_data->base; 1477 1478 spin_unlock_irq(&dev->power.lock); 1479 1480 return gpd_data; 1481 1482 err_free: 1483 spin_unlock_irq(&dev->power.lock); 1484 kfree(gpd_data); 1485 err_put: 1486 dev_pm_put_subsys_data(dev); 1487 return ERR_PTR(ret); 1488 } 1489 1490 static void genpd_free_dev_data(struct device *dev, 1491 struct generic_pm_domain_data *gpd_data) 1492 { 1493 spin_lock_irq(&dev->power.lock); 1494 1495 dev->power.subsys_data->domain_data = NULL; 1496 1497 spin_unlock_irq(&dev->power.lock); 1498 1499 kfree(gpd_data); 1500 dev_pm_put_subsys_data(dev); 1501 } 1502 1503 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1504 int cpu, bool set, unsigned int depth) 1505 { 1506 struct gpd_link *link; 1507 1508 if (!genpd_is_cpu_domain(genpd)) 1509 return; 1510 1511 list_for_each_entry(link, &genpd->child_links, child_node) { 1512 struct generic_pm_domain *parent = link->parent; 1513 1514 genpd_lock_nested(parent, depth + 1); 1515 genpd_update_cpumask(parent, cpu, set, depth + 1); 1516 genpd_unlock(parent); 1517 } 1518 1519 if (set) 1520 cpumask_set_cpu(cpu, genpd->cpus); 1521 else 1522 cpumask_clear_cpu(cpu, genpd->cpus); 1523 } 1524 1525 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1526 { 1527 if (cpu >= 0) 1528 genpd_update_cpumask(genpd, cpu, true, 0); 1529 } 1530 1531 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1532 { 1533 if (cpu >= 0) 1534 genpd_update_cpumask(genpd, cpu, false, 0); 1535 } 1536 1537 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1538 { 1539 int cpu; 1540 1541 if (!genpd_is_cpu_domain(genpd)) 1542 return -1; 1543 1544 for_each_possible_cpu(cpu) { 1545 if (get_cpu_device(cpu) == dev) 1546 return cpu; 1547 } 1548 1549 return -1; 1550 } 1551 1552 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1553 struct device *base_dev) 1554 { 1555 struct generic_pm_domain_data *gpd_data; 1556 int ret; 1557 1558 dev_dbg(dev, "%s()\n", __func__); 1559 1560 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1561 return -EINVAL; 1562 1563 gpd_data = genpd_alloc_dev_data(dev); 1564 if (IS_ERR(gpd_data)) 1565 return PTR_ERR(gpd_data); 1566 1567 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1568 1569 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1570 if (ret) 1571 goto out; 1572 1573 genpd_lock(genpd); 1574 1575 genpd_set_cpumask(genpd, gpd_data->cpu); 1576 dev_pm_domain_set(dev, &genpd->domain); 1577 1578 genpd->device_count++; 1579 genpd->max_off_time_changed = true; 1580 1581 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1582 1583 genpd_unlock(genpd); 1584 out: 1585 if (ret) 1586 genpd_free_dev_data(dev, gpd_data); 1587 else 1588 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1589 DEV_PM_QOS_RESUME_LATENCY); 1590 1591 return ret; 1592 } 1593 1594 /** 1595 * pm_genpd_add_device - Add a device to an I/O PM domain. 1596 * @genpd: PM domain to add the device to. 1597 * @dev: Device to be added. 1598 */ 1599 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1600 { 1601 int ret; 1602 1603 mutex_lock(&gpd_list_lock); 1604 ret = genpd_add_device(genpd, dev, dev); 1605 mutex_unlock(&gpd_list_lock); 1606 1607 return ret; 1608 } 1609 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1610 1611 static int genpd_remove_device(struct generic_pm_domain *genpd, 1612 struct device *dev) 1613 { 1614 struct generic_pm_domain_data *gpd_data; 1615 struct pm_domain_data *pdd; 1616 int ret = 0; 1617 1618 dev_dbg(dev, "%s()\n", __func__); 1619 1620 pdd = dev->power.subsys_data->domain_data; 1621 gpd_data = to_gpd_data(pdd); 1622 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1623 DEV_PM_QOS_RESUME_LATENCY); 1624 1625 genpd_lock(genpd); 1626 1627 if (genpd->prepared_count > 0) { 1628 ret = -EAGAIN; 1629 goto out; 1630 } 1631 1632 genpd->device_count--; 1633 genpd->max_off_time_changed = true; 1634 1635 genpd_clear_cpumask(genpd, gpd_data->cpu); 1636 dev_pm_domain_set(dev, NULL); 1637 1638 list_del_init(&pdd->list_node); 1639 1640 genpd_unlock(genpd); 1641 1642 if (genpd->detach_dev) 1643 genpd->detach_dev(genpd, dev); 1644 1645 genpd_free_dev_data(dev, gpd_data); 1646 1647 return 0; 1648 1649 out: 1650 genpd_unlock(genpd); 1651 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1652 1653 return ret; 1654 } 1655 1656 /** 1657 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1658 * @dev: Device to be removed. 1659 */ 1660 int pm_genpd_remove_device(struct device *dev) 1661 { 1662 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1663 1664 if (!genpd) 1665 return -EINVAL; 1666 1667 return genpd_remove_device(genpd, dev); 1668 } 1669 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1670 1671 /** 1672 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1673 * 1674 * @dev: Device that should be associated with the notifier 1675 * @nb: The notifier block to register 1676 * 1677 * Users may call this function to add a genpd power on/off notifier for an 1678 * attached @dev. Only one notifier per device is allowed. The notifier is 1679 * sent when genpd is powering on/off the PM domain. 1680 * 1681 * It is assumed that the user guarantee that the genpd wouldn't be detached 1682 * while this routine is getting called. 1683 * 1684 * Returns 0 on success and negative error values on failures. 1685 */ 1686 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1687 { 1688 struct generic_pm_domain *genpd; 1689 struct generic_pm_domain_data *gpd_data; 1690 int ret; 1691 1692 genpd = dev_to_genpd_safe(dev); 1693 if (!genpd) 1694 return -ENODEV; 1695 1696 if (WARN_ON(!dev->power.subsys_data || 1697 !dev->power.subsys_data->domain_data)) 1698 return -EINVAL; 1699 1700 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1701 if (gpd_data->power_nb) 1702 return -EEXIST; 1703 1704 genpd_lock(genpd); 1705 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1706 genpd_unlock(genpd); 1707 1708 if (ret) { 1709 dev_warn(dev, "failed to add notifier for PM domain %s\n", 1710 genpd->name); 1711 return ret; 1712 } 1713 1714 gpd_data->power_nb = nb; 1715 return 0; 1716 } 1717 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1718 1719 /** 1720 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1721 * 1722 * @dev: Device that is associated with the notifier 1723 * 1724 * Users may call this function to remove a genpd power on/off notifier for an 1725 * attached @dev. 1726 * 1727 * It is assumed that the user guarantee that the genpd wouldn't be detached 1728 * while this routine is getting called. 1729 * 1730 * Returns 0 on success and negative error values on failures. 1731 */ 1732 int dev_pm_genpd_remove_notifier(struct device *dev) 1733 { 1734 struct generic_pm_domain *genpd; 1735 struct generic_pm_domain_data *gpd_data; 1736 int ret; 1737 1738 genpd = dev_to_genpd_safe(dev); 1739 if (!genpd) 1740 return -ENODEV; 1741 1742 if (WARN_ON(!dev->power.subsys_data || 1743 !dev->power.subsys_data->domain_data)) 1744 return -EINVAL; 1745 1746 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1747 if (!gpd_data->power_nb) 1748 return -ENODEV; 1749 1750 genpd_lock(genpd); 1751 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1752 gpd_data->power_nb); 1753 genpd_unlock(genpd); 1754 1755 if (ret) { 1756 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1757 genpd->name); 1758 return ret; 1759 } 1760 1761 gpd_data->power_nb = NULL; 1762 return 0; 1763 } 1764 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1765 1766 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1767 struct generic_pm_domain *subdomain) 1768 { 1769 struct gpd_link *link, *itr; 1770 int ret = 0; 1771 1772 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1773 || genpd == subdomain) 1774 return -EINVAL; 1775 1776 /* 1777 * If the domain can be powered on/off in an IRQ safe 1778 * context, ensure that the subdomain can also be 1779 * powered on/off in that context. 1780 */ 1781 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1782 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1783 genpd->name, subdomain->name); 1784 return -EINVAL; 1785 } 1786 1787 link = kzalloc(sizeof(*link), GFP_KERNEL); 1788 if (!link) 1789 return -ENOMEM; 1790 1791 genpd_lock(subdomain); 1792 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1793 1794 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1795 ret = -EINVAL; 1796 goto out; 1797 } 1798 1799 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1800 if (itr->child == subdomain && itr->parent == genpd) { 1801 ret = -EINVAL; 1802 goto out; 1803 } 1804 } 1805 1806 link->parent = genpd; 1807 list_add_tail(&link->parent_node, &genpd->parent_links); 1808 link->child = subdomain; 1809 list_add_tail(&link->child_node, &subdomain->child_links); 1810 if (genpd_status_on(subdomain)) 1811 genpd_sd_counter_inc(genpd); 1812 1813 out: 1814 genpd_unlock(genpd); 1815 genpd_unlock(subdomain); 1816 if (ret) 1817 kfree(link); 1818 return ret; 1819 } 1820 1821 /** 1822 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1823 * @genpd: Leader PM domain to add the subdomain to. 1824 * @subdomain: Subdomain to be added. 1825 */ 1826 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1827 struct generic_pm_domain *subdomain) 1828 { 1829 int ret; 1830 1831 mutex_lock(&gpd_list_lock); 1832 ret = genpd_add_subdomain(genpd, subdomain); 1833 mutex_unlock(&gpd_list_lock); 1834 1835 return ret; 1836 } 1837 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1838 1839 /** 1840 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1841 * @genpd: Leader PM domain to remove the subdomain from. 1842 * @subdomain: Subdomain to be removed. 1843 */ 1844 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1845 struct generic_pm_domain *subdomain) 1846 { 1847 struct gpd_link *l, *link; 1848 int ret = -EINVAL; 1849 1850 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1851 return -EINVAL; 1852 1853 genpd_lock(subdomain); 1854 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1855 1856 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 1857 pr_warn("%s: unable to remove subdomain %s\n", 1858 genpd->name, subdomain->name); 1859 ret = -EBUSY; 1860 goto out; 1861 } 1862 1863 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 1864 if (link->child != subdomain) 1865 continue; 1866 1867 list_del(&link->parent_node); 1868 list_del(&link->child_node); 1869 kfree(link); 1870 if (genpd_status_on(subdomain)) 1871 genpd_sd_counter_dec(genpd); 1872 1873 ret = 0; 1874 break; 1875 } 1876 1877 out: 1878 genpd_unlock(genpd); 1879 genpd_unlock(subdomain); 1880 1881 return ret; 1882 } 1883 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1884 1885 static void genpd_free_default_power_state(struct genpd_power_state *states, 1886 unsigned int state_count) 1887 { 1888 kfree(states); 1889 } 1890 1891 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1892 { 1893 struct genpd_power_state *state; 1894 1895 state = kzalloc(sizeof(*state), GFP_KERNEL); 1896 if (!state) 1897 return -ENOMEM; 1898 1899 genpd->states = state; 1900 genpd->state_count = 1; 1901 genpd->free_states = genpd_free_default_power_state; 1902 1903 return 0; 1904 } 1905 1906 static void genpd_lock_init(struct generic_pm_domain *genpd) 1907 { 1908 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { 1909 spin_lock_init(&genpd->slock); 1910 genpd->lock_ops = &genpd_spin_ops; 1911 } else { 1912 mutex_init(&genpd->mlock); 1913 genpd->lock_ops = &genpd_mtx_ops; 1914 } 1915 } 1916 1917 /** 1918 * pm_genpd_init - Initialize a generic I/O PM domain object. 1919 * @genpd: PM domain object to initialize. 1920 * @gov: PM domain governor to associate with the domain (may be NULL). 1921 * @is_off: Initial value of the domain's power_is_off field. 1922 * 1923 * Returns 0 on successful initialization, else a negative error code. 1924 */ 1925 int pm_genpd_init(struct generic_pm_domain *genpd, 1926 struct dev_power_governor *gov, bool is_off) 1927 { 1928 int ret; 1929 1930 if (IS_ERR_OR_NULL(genpd)) 1931 return -EINVAL; 1932 1933 INIT_LIST_HEAD(&genpd->parent_links); 1934 INIT_LIST_HEAD(&genpd->child_links); 1935 INIT_LIST_HEAD(&genpd->dev_list); 1936 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 1937 genpd_lock_init(genpd); 1938 genpd->gov = gov; 1939 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1940 atomic_set(&genpd->sd_count, 0); 1941 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 1942 genpd->device_count = 0; 1943 genpd->max_off_time_ns = -1; 1944 genpd->max_off_time_changed = true; 1945 genpd->provider = NULL; 1946 genpd->has_provider = false; 1947 genpd->accounting_time = ktime_get(); 1948 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 1949 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 1950 genpd->domain.ops.prepare = genpd_prepare; 1951 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 1952 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 1953 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 1954 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 1955 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 1956 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 1957 genpd->domain.ops.complete = genpd_complete; 1958 genpd->domain.start = genpd_dev_pm_start; 1959 1960 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1961 genpd->dev_ops.stop = pm_clk_suspend; 1962 genpd->dev_ops.start = pm_clk_resume; 1963 } 1964 1965 /* Always-on domains must be powered on at initialization. */ 1966 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 1967 !genpd_status_on(genpd)) 1968 return -EINVAL; 1969 1970 if (genpd_is_cpu_domain(genpd) && 1971 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 1972 return -ENOMEM; 1973 1974 /* Use only one "off" state if there were no states declared */ 1975 if (genpd->state_count == 0) { 1976 ret = genpd_set_default_power_state(genpd); 1977 if (ret) { 1978 if (genpd_is_cpu_domain(genpd)) 1979 free_cpumask_var(genpd->cpus); 1980 return ret; 1981 } 1982 } else if (!gov && genpd->state_count > 1) { 1983 pr_warn("%s: no governor for states\n", genpd->name); 1984 } 1985 1986 device_initialize(&genpd->dev); 1987 dev_set_name(&genpd->dev, "%s", genpd->name); 1988 1989 mutex_lock(&gpd_list_lock); 1990 list_add(&genpd->gpd_list_node, &gpd_list); 1991 genpd_debug_add(genpd); 1992 mutex_unlock(&gpd_list_lock); 1993 1994 return 0; 1995 } 1996 EXPORT_SYMBOL_GPL(pm_genpd_init); 1997 1998 static int genpd_remove(struct generic_pm_domain *genpd) 1999 { 2000 struct gpd_link *l, *link; 2001 2002 if (IS_ERR_OR_NULL(genpd)) 2003 return -EINVAL; 2004 2005 genpd_lock(genpd); 2006 2007 if (genpd->has_provider) { 2008 genpd_unlock(genpd); 2009 pr_err("Provider present, unable to remove %s\n", genpd->name); 2010 return -EBUSY; 2011 } 2012 2013 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2014 genpd_unlock(genpd); 2015 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2016 return -EBUSY; 2017 } 2018 2019 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2020 list_del(&link->parent_node); 2021 list_del(&link->child_node); 2022 kfree(link); 2023 } 2024 2025 genpd_debug_remove(genpd); 2026 list_del(&genpd->gpd_list_node); 2027 genpd_unlock(genpd); 2028 cancel_work_sync(&genpd->power_off_work); 2029 if (genpd_is_cpu_domain(genpd)) 2030 free_cpumask_var(genpd->cpus); 2031 if (genpd->free_states) 2032 genpd->free_states(genpd->states, genpd->state_count); 2033 2034 pr_debug("%s: removed %s\n", __func__, genpd->name); 2035 2036 return 0; 2037 } 2038 2039 /** 2040 * pm_genpd_remove - Remove a generic I/O PM domain 2041 * @genpd: Pointer to PM domain that is to be removed. 2042 * 2043 * To remove the PM domain, this function: 2044 * - Removes the PM domain as a subdomain to any parent domains, 2045 * if it was added. 2046 * - Removes the PM domain from the list of registered PM domains. 2047 * 2048 * The PM domain will only be removed, if the associated provider has 2049 * been removed, it is not a parent to any other PM domain and has no 2050 * devices associated with it. 2051 */ 2052 int pm_genpd_remove(struct generic_pm_domain *genpd) 2053 { 2054 int ret; 2055 2056 mutex_lock(&gpd_list_lock); 2057 ret = genpd_remove(genpd); 2058 mutex_unlock(&gpd_list_lock); 2059 2060 return ret; 2061 } 2062 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2063 2064 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2065 2066 /* 2067 * Device Tree based PM domain providers. 2068 * 2069 * The code below implements generic device tree based PM domain providers that 2070 * bind device tree nodes with generic PM domains registered in the system. 2071 * 2072 * Any driver that registers generic PM domains and needs to support binding of 2073 * devices to these domains is supposed to register a PM domain provider, which 2074 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2075 * 2076 * Two simple mapping functions have been provided for convenience: 2077 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2078 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2079 * index. 2080 */ 2081 2082 /** 2083 * struct of_genpd_provider - PM domain provider registration structure 2084 * @link: Entry in global list of PM domain providers 2085 * @node: Pointer to device tree node of PM domain provider 2086 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2087 * into a PM domain. 2088 * @data: context pointer to be passed into @xlate callback 2089 */ 2090 struct of_genpd_provider { 2091 struct list_head link; 2092 struct device_node *node; 2093 genpd_xlate_t xlate; 2094 void *data; 2095 }; 2096 2097 /* List of registered PM domain providers. */ 2098 static LIST_HEAD(of_genpd_providers); 2099 /* Mutex to protect the list above. */ 2100 static DEFINE_MUTEX(of_genpd_mutex); 2101 2102 /** 2103 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2104 * @genpdspec: OF phandle args to map into a PM domain 2105 * @data: xlate function private data - pointer to struct generic_pm_domain 2106 * 2107 * This is a generic xlate function that can be used to model PM domains that 2108 * have their own device tree nodes. The private data of xlate function needs 2109 * to be a valid pointer to struct generic_pm_domain. 2110 */ 2111 static struct generic_pm_domain *genpd_xlate_simple( 2112 struct of_phandle_args *genpdspec, 2113 void *data) 2114 { 2115 return data; 2116 } 2117 2118 /** 2119 * genpd_xlate_onecell() - Xlate function using a single index. 2120 * @genpdspec: OF phandle args to map into a PM domain 2121 * @data: xlate function private data - pointer to struct genpd_onecell_data 2122 * 2123 * This is a generic xlate function that can be used to model simple PM domain 2124 * controllers that have one device tree node and provide multiple PM domains. 2125 * A single cell is used as an index into an array of PM domains specified in 2126 * the genpd_onecell_data struct when registering the provider. 2127 */ 2128 static struct generic_pm_domain *genpd_xlate_onecell( 2129 struct of_phandle_args *genpdspec, 2130 void *data) 2131 { 2132 struct genpd_onecell_data *genpd_data = data; 2133 unsigned int idx = genpdspec->args[0]; 2134 2135 if (genpdspec->args_count != 1) 2136 return ERR_PTR(-EINVAL); 2137 2138 if (idx >= genpd_data->num_domains) { 2139 pr_err("%s: invalid domain index %u\n", __func__, idx); 2140 return ERR_PTR(-EINVAL); 2141 } 2142 2143 if (!genpd_data->domains[idx]) 2144 return ERR_PTR(-ENOENT); 2145 2146 return genpd_data->domains[idx]; 2147 } 2148 2149 /** 2150 * genpd_add_provider() - Register a PM domain provider for a node 2151 * @np: Device node pointer associated with the PM domain provider. 2152 * @xlate: Callback for decoding PM domain from phandle arguments. 2153 * @data: Context pointer for @xlate callback. 2154 */ 2155 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2156 void *data) 2157 { 2158 struct of_genpd_provider *cp; 2159 2160 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2161 if (!cp) 2162 return -ENOMEM; 2163 2164 cp->node = of_node_get(np); 2165 cp->data = data; 2166 cp->xlate = xlate; 2167 2168 mutex_lock(&of_genpd_mutex); 2169 list_add(&cp->link, &of_genpd_providers); 2170 mutex_unlock(&of_genpd_mutex); 2171 pr_debug("Added domain provider from %pOF\n", np); 2172 2173 return 0; 2174 } 2175 2176 static bool genpd_present(const struct generic_pm_domain *genpd) 2177 { 2178 const struct generic_pm_domain *gpd; 2179 2180 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 2181 if (gpd == genpd) 2182 return true; 2183 return false; 2184 } 2185 2186 /** 2187 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2188 * @np: Device node pointer associated with the PM domain provider. 2189 * @genpd: Pointer to PM domain associated with the PM domain provider. 2190 */ 2191 int of_genpd_add_provider_simple(struct device_node *np, 2192 struct generic_pm_domain *genpd) 2193 { 2194 int ret = -EINVAL; 2195 2196 if (!np || !genpd) 2197 return -EINVAL; 2198 2199 mutex_lock(&gpd_list_lock); 2200 2201 if (!genpd_present(genpd)) 2202 goto unlock; 2203 2204 genpd->dev.of_node = np; 2205 2206 /* Parse genpd OPP table */ 2207 if (genpd->set_performance_state) { 2208 ret = dev_pm_opp_of_add_table(&genpd->dev); 2209 if (ret) { 2210 if (ret != -EPROBE_DEFER) 2211 dev_err(&genpd->dev, "Failed to add OPP table: %d\n", 2212 ret); 2213 goto unlock; 2214 } 2215 2216 /* 2217 * Save table for faster processing while setting performance 2218 * state. 2219 */ 2220 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2221 WARN_ON(IS_ERR(genpd->opp_table)); 2222 } 2223 2224 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2225 if (ret) { 2226 if (genpd->set_performance_state) { 2227 dev_pm_opp_put_opp_table(genpd->opp_table); 2228 dev_pm_opp_of_remove_table(&genpd->dev); 2229 } 2230 2231 goto unlock; 2232 } 2233 2234 genpd->provider = &np->fwnode; 2235 genpd->has_provider = true; 2236 2237 unlock: 2238 mutex_unlock(&gpd_list_lock); 2239 2240 return ret; 2241 } 2242 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2243 2244 /** 2245 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2246 * @np: Device node pointer associated with the PM domain provider. 2247 * @data: Pointer to the data associated with the PM domain provider. 2248 */ 2249 int of_genpd_add_provider_onecell(struct device_node *np, 2250 struct genpd_onecell_data *data) 2251 { 2252 struct generic_pm_domain *genpd; 2253 unsigned int i; 2254 int ret = -EINVAL; 2255 2256 if (!np || !data) 2257 return -EINVAL; 2258 2259 mutex_lock(&gpd_list_lock); 2260 2261 if (!data->xlate) 2262 data->xlate = genpd_xlate_onecell; 2263 2264 for (i = 0; i < data->num_domains; i++) { 2265 genpd = data->domains[i]; 2266 2267 if (!genpd) 2268 continue; 2269 if (!genpd_present(genpd)) 2270 goto error; 2271 2272 genpd->dev.of_node = np; 2273 2274 /* Parse genpd OPP table */ 2275 if (genpd->set_performance_state) { 2276 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2277 if (ret) { 2278 if (ret != -EPROBE_DEFER) 2279 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", 2280 i, ret); 2281 goto error; 2282 } 2283 2284 /* 2285 * Save table for faster processing while setting 2286 * performance state. 2287 */ 2288 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2289 WARN_ON(IS_ERR(genpd->opp_table)); 2290 } 2291 2292 genpd->provider = &np->fwnode; 2293 genpd->has_provider = true; 2294 } 2295 2296 ret = genpd_add_provider(np, data->xlate, data); 2297 if (ret < 0) 2298 goto error; 2299 2300 mutex_unlock(&gpd_list_lock); 2301 2302 return 0; 2303 2304 error: 2305 while (i--) { 2306 genpd = data->domains[i]; 2307 2308 if (!genpd) 2309 continue; 2310 2311 genpd->provider = NULL; 2312 genpd->has_provider = false; 2313 2314 if (genpd->set_performance_state) { 2315 dev_pm_opp_put_opp_table(genpd->opp_table); 2316 dev_pm_opp_of_remove_table(&genpd->dev); 2317 } 2318 } 2319 2320 mutex_unlock(&gpd_list_lock); 2321 2322 return ret; 2323 } 2324 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2325 2326 /** 2327 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2328 * @np: Device node pointer associated with the PM domain provider 2329 */ 2330 void of_genpd_del_provider(struct device_node *np) 2331 { 2332 struct of_genpd_provider *cp, *tmp; 2333 struct generic_pm_domain *gpd; 2334 2335 mutex_lock(&gpd_list_lock); 2336 mutex_lock(&of_genpd_mutex); 2337 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2338 if (cp->node == np) { 2339 /* 2340 * For each PM domain associated with the 2341 * provider, set the 'has_provider' to false 2342 * so that the PM domain can be safely removed. 2343 */ 2344 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2345 if (gpd->provider == &np->fwnode) { 2346 gpd->has_provider = false; 2347 2348 if (!gpd->set_performance_state) 2349 continue; 2350 2351 dev_pm_opp_put_opp_table(gpd->opp_table); 2352 dev_pm_opp_of_remove_table(&gpd->dev); 2353 } 2354 } 2355 2356 list_del(&cp->link); 2357 of_node_put(cp->node); 2358 kfree(cp); 2359 break; 2360 } 2361 } 2362 mutex_unlock(&of_genpd_mutex); 2363 mutex_unlock(&gpd_list_lock); 2364 } 2365 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2366 2367 /** 2368 * genpd_get_from_provider() - Look-up PM domain 2369 * @genpdspec: OF phandle args to use for look-up 2370 * 2371 * Looks for a PM domain provider under the node specified by @genpdspec and if 2372 * found, uses xlate function of the provider to map phandle args to a PM 2373 * domain. 2374 * 2375 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2376 * on failure. 2377 */ 2378 static struct generic_pm_domain *genpd_get_from_provider( 2379 struct of_phandle_args *genpdspec) 2380 { 2381 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2382 struct of_genpd_provider *provider; 2383 2384 if (!genpdspec) 2385 return ERR_PTR(-EINVAL); 2386 2387 mutex_lock(&of_genpd_mutex); 2388 2389 /* Check if we have such a provider in our array */ 2390 list_for_each_entry(provider, &of_genpd_providers, link) { 2391 if (provider->node == genpdspec->np) 2392 genpd = provider->xlate(genpdspec, provider->data); 2393 if (!IS_ERR(genpd)) 2394 break; 2395 } 2396 2397 mutex_unlock(&of_genpd_mutex); 2398 2399 return genpd; 2400 } 2401 2402 /** 2403 * of_genpd_add_device() - Add a device to an I/O PM domain 2404 * @genpdspec: OF phandle args to use for look-up PM domain 2405 * @dev: Device to be added. 2406 * 2407 * Looks-up an I/O PM domain based upon phandle args provided and adds 2408 * the device to the PM domain. Returns a negative error code on failure. 2409 */ 2410 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2411 { 2412 struct generic_pm_domain *genpd; 2413 int ret; 2414 2415 mutex_lock(&gpd_list_lock); 2416 2417 genpd = genpd_get_from_provider(genpdspec); 2418 if (IS_ERR(genpd)) { 2419 ret = PTR_ERR(genpd); 2420 goto out; 2421 } 2422 2423 ret = genpd_add_device(genpd, dev, dev); 2424 2425 out: 2426 mutex_unlock(&gpd_list_lock); 2427 2428 return ret; 2429 } 2430 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2431 2432 /** 2433 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2434 * @parent_spec: OF phandle args to use for parent PM domain look-up 2435 * @subdomain_spec: OF phandle args to use for subdomain look-up 2436 * 2437 * Looks-up a parent PM domain and subdomain based upon phandle args 2438 * provided and adds the subdomain to the parent PM domain. Returns a 2439 * negative error code on failure. 2440 */ 2441 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2442 struct of_phandle_args *subdomain_spec) 2443 { 2444 struct generic_pm_domain *parent, *subdomain; 2445 int ret; 2446 2447 mutex_lock(&gpd_list_lock); 2448 2449 parent = genpd_get_from_provider(parent_spec); 2450 if (IS_ERR(parent)) { 2451 ret = PTR_ERR(parent); 2452 goto out; 2453 } 2454 2455 subdomain = genpd_get_from_provider(subdomain_spec); 2456 if (IS_ERR(subdomain)) { 2457 ret = PTR_ERR(subdomain); 2458 goto out; 2459 } 2460 2461 ret = genpd_add_subdomain(parent, subdomain); 2462 2463 out: 2464 mutex_unlock(&gpd_list_lock); 2465 2466 return ret; 2467 } 2468 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2469 2470 /** 2471 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2472 * @parent_spec: OF phandle args to use for parent PM domain look-up 2473 * @subdomain_spec: OF phandle args to use for subdomain look-up 2474 * 2475 * Looks-up a parent PM domain and subdomain based upon phandle args 2476 * provided and removes the subdomain from the parent PM domain. Returns a 2477 * negative error code on failure. 2478 */ 2479 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 2480 struct of_phandle_args *subdomain_spec) 2481 { 2482 struct generic_pm_domain *parent, *subdomain; 2483 int ret; 2484 2485 mutex_lock(&gpd_list_lock); 2486 2487 parent = genpd_get_from_provider(parent_spec); 2488 if (IS_ERR(parent)) { 2489 ret = PTR_ERR(parent); 2490 goto out; 2491 } 2492 2493 subdomain = genpd_get_from_provider(subdomain_spec); 2494 if (IS_ERR(subdomain)) { 2495 ret = PTR_ERR(subdomain); 2496 goto out; 2497 } 2498 2499 ret = pm_genpd_remove_subdomain(parent, subdomain); 2500 2501 out: 2502 mutex_unlock(&gpd_list_lock); 2503 2504 return ret; 2505 } 2506 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2507 2508 /** 2509 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2510 * @provider: Pointer to device structure associated with provider 2511 * 2512 * Find the last PM domain that was added by a particular provider and 2513 * remove this PM domain from the list of PM domains. The provider is 2514 * identified by the 'provider' device structure that is passed. The PM 2515 * domain will only be removed, if the provider associated with domain 2516 * has been removed. 2517 * 2518 * Returns a valid pointer to struct generic_pm_domain on success or 2519 * ERR_PTR() on failure. 2520 */ 2521 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2522 { 2523 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2524 int ret; 2525 2526 if (IS_ERR_OR_NULL(np)) 2527 return ERR_PTR(-EINVAL); 2528 2529 mutex_lock(&gpd_list_lock); 2530 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2531 if (gpd->provider == &np->fwnode) { 2532 ret = genpd_remove(gpd); 2533 genpd = ret ? ERR_PTR(ret) : gpd; 2534 break; 2535 } 2536 } 2537 mutex_unlock(&gpd_list_lock); 2538 2539 return genpd; 2540 } 2541 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2542 2543 static void genpd_release_dev(struct device *dev) 2544 { 2545 of_node_put(dev->of_node); 2546 kfree(dev); 2547 } 2548 2549 static struct bus_type genpd_bus_type = { 2550 .name = "genpd", 2551 }; 2552 2553 /** 2554 * genpd_dev_pm_detach - Detach a device from its PM domain. 2555 * @dev: Device to detach. 2556 * @power_off: Currently not used 2557 * 2558 * Try to locate a corresponding generic PM domain, which the device was 2559 * attached to previously. If such is found, the device is detached from it. 2560 */ 2561 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2562 { 2563 struct generic_pm_domain *pd; 2564 unsigned int i; 2565 int ret = 0; 2566 2567 pd = dev_to_genpd(dev); 2568 if (IS_ERR(pd)) 2569 return; 2570 2571 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2572 2573 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2574 ret = genpd_remove_device(pd, dev); 2575 if (ret != -EAGAIN) 2576 break; 2577 2578 mdelay(i); 2579 cond_resched(); 2580 } 2581 2582 if (ret < 0) { 2583 dev_err(dev, "failed to remove from PM domain %s: %d", 2584 pd->name, ret); 2585 return; 2586 } 2587 2588 /* Check if PM domain can be powered off after removing this device. */ 2589 genpd_queue_power_off_work(pd); 2590 2591 /* Unregister the device if it was created by genpd. */ 2592 if (dev->bus == &genpd_bus_type) 2593 device_unregister(dev); 2594 } 2595 2596 static void genpd_dev_pm_sync(struct device *dev) 2597 { 2598 struct generic_pm_domain *pd; 2599 2600 pd = dev_to_genpd(dev); 2601 if (IS_ERR(pd)) 2602 return; 2603 2604 genpd_queue_power_off_work(pd); 2605 } 2606 2607 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2608 unsigned int index, bool power_on) 2609 { 2610 struct of_phandle_args pd_args; 2611 struct generic_pm_domain *pd; 2612 int ret; 2613 2614 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2615 "#power-domain-cells", index, &pd_args); 2616 if (ret < 0) 2617 return ret; 2618 2619 mutex_lock(&gpd_list_lock); 2620 pd = genpd_get_from_provider(&pd_args); 2621 of_node_put(pd_args.np); 2622 if (IS_ERR(pd)) { 2623 mutex_unlock(&gpd_list_lock); 2624 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2625 __func__, PTR_ERR(pd)); 2626 return driver_deferred_probe_check_state(base_dev); 2627 } 2628 2629 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2630 2631 ret = genpd_add_device(pd, dev, base_dev); 2632 mutex_unlock(&gpd_list_lock); 2633 2634 if (ret < 0) { 2635 if (ret != -EPROBE_DEFER) 2636 dev_err(dev, "failed to add to PM domain %s: %d", 2637 pd->name, ret); 2638 return ret; 2639 } 2640 2641 dev->pm_domain->detach = genpd_dev_pm_detach; 2642 dev->pm_domain->sync = genpd_dev_pm_sync; 2643 2644 if (power_on) { 2645 genpd_lock(pd); 2646 ret = genpd_power_on(pd, 0); 2647 genpd_unlock(pd); 2648 } 2649 2650 if (ret) 2651 genpd_remove_device(pd, dev); 2652 2653 return ret ? -EPROBE_DEFER : 1; 2654 } 2655 2656 /** 2657 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2658 * @dev: Device to attach. 2659 * 2660 * Parse device's OF node to find a PM domain specifier. If such is found, 2661 * attaches the device to retrieved pm_domain ops. 2662 * 2663 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2664 * PM domain or when multiple power-domains exists for it, else a negative error 2665 * code. Note that if a power-domain exists for the device, but it cannot be 2666 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2667 * not probed and to re-try again later. 2668 */ 2669 int genpd_dev_pm_attach(struct device *dev) 2670 { 2671 if (!dev->of_node) 2672 return 0; 2673 2674 /* 2675 * Devices with multiple PM domains must be attached separately, as we 2676 * can only attach one PM domain per device. 2677 */ 2678 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2679 "#power-domain-cells") != 1) 2680 return 0; 2681 2682 return __genpd_dev_pm_attach(dev, dev, 0, true); 2683 } 2684 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2685 2686 /** 2687 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2688 * @dev: The device used to lookup the PM domain. 2689 * @index: The index of the PM domain. 2690 * 2691 * Parse device's OF node to find a PM domain specifier at the provided @index. 2692 * If such is found, creates a virtual device and attaches it to the retrieved 2693 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2694 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2695 * 2696 * Returns the created virtual device if successfully attached PM domain, NULL 2697 * when the device don't need a PM domain, else an ERR_PTR() in case of 2698 * failures. If a power-domain exists for the device, but cannot be found or 2699 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2700 * is not probed and to re-try again later. 2701 */ 2702 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2703 unsigned int index) 2704 { 2705 struct device *virt_dev; 2706 int num_domains; 2707 int ret; 2708 2709 if (!dev->of_node) 2710 return NULL; 2711 2712 /* Verify that the index is within a valid range. */ 2713 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2714 "#power-domain-cells"); 2715 if (index >= num_domains) 2716 return NULL; 2717 2718 /* Allocate and register device on the genpd bus. */ 2719 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2720 if (!virt_dev) 2721 return ERR_PTR(-ENOMEM); 2722 2723 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2724 virt_dev->bus = &genpd_bus_type; 2725 virt_dev->release = genpd_release_dev; 2726 virt_dev->of_node = of_node_get(dev->of_node); 2727 2728 ret = device_register(virt_dev); 2729 if (ret) { 2730 put_device(virt_dev); 2731 return ERR_PTR(ret); 2732 } 2733 2734 /* Try to attach the device to the PM domain at the specified index. */ 2735 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2736 if (ret < 1) { 2737 device_unregister(virt_dev); 2738 return ret ? ERR_PTR(ret) : NULL; 2739 } 2740 2741 pm_runtime_enable(virt_dev); 2742 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 2743 2744 return virt_dev; 2745 } 2746 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 2747 2748 /** 2749 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 2750 * @dev: The device used to lookup the PM domain. 2751 * @name: The name of the PM domain. 2752 * 2753 * Parse device's OF node to find a PM domain specifier using the 2754 * power-domain-names DT property. For further description see 2755 * genpd_dev_pm_attach_by_id(). 2756 */ 2757 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 2758 { 2759 int index; 2760 2761 if (!dev->of_node) 2762 return NULL; 2763 2764 index = of_property_match_string(dev->of_node, "power-domain-names", 2765 name); 2766 if (index < 0) 2767 return NULL; 2768 2769 return genpd_dev_pm_attach_by_id(dev, index); 2770 } 2771 2772 static const struct of_device_id idle_state_match[] = { 2773 { .compatible = "domain-idle-state", }, 2774 { } 2775 }; 2776 2777 static int genpd_parse_state(struct genpd_power_state *genpd_state, 2778 struct device_node *state_node) 2779 { 2780 int err; 2781 u32 residency; 2782 u32 entry_latency, exit_latency; 2783 2784 err = of_property_read_u32(state_node, "entry-latency-us", 2785 &entry_latency); 2786 if (err) { 2787 pr_debug(" * %pOF missing entry-latency-us property\n", 2788 state_node); 2789 return -EINVAL; 2790 } 2791 2792 err = of_property_read_u32(state_node, "exit-latency-us", 2793 &exit_latency); 2794 if (err) { 2795 pr_debug(" * %pOF missing exit-latency-us property\n", 2796 state_node); 2797 return -EINVAL; 2798 } 2799 2800 err = of_property_read_u32(state_node, "min-residency-us", &residency); 2801 if (!err) 2802 genpd_state->residency_ns = 1000 * residency; 2803 2804 genpd_state->power_on_latency_ns = 1000 * exit_latency; 2805 genpd_state->power_off_latency_ns = 1000 * entry_latency; 2806 genpd_state->fwnode = &state_node->fwnode; 2807 2808 return 0; 2809 } 2810 2811 static int genpd_iterate_idle_states(struct device_node *dn, 2812 struct genpd_power_state *states) 2813 { 2814 int ret; 2815 struct of_phandle_iterator it; 2816 struct device_node *np; 2817 int i = 0; 2818 2819 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 2820 if (ret <= 0) 2821 return ret == -ENOENT ? 0 : ret; 2822 2823 /* Loop over the phandles until all the requested entry is found */ 2824 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 2825 np = it.node; 2826 if (!of_match_node(idle_state_match, np)) 2827 continue; 2828 if (states) { 2829 ret = genpd_parse_state(&states[i], np); 2830 if (ret) { 2831 pr_err("Parsing idle state node %pOF failed with err %d\n", 2832 np, ret); 2833 of_node_put(np); 2834 return ret; 2835 } 2836 } 2837 i++; 2838 } 2839 2840 return i; 2841 } 2842 2843 /** 2844 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 2845 * 2846 * @dn: The genpd device node 2847 * @states: The pointer to which the state array will be saved. 2848 * @n: The count of elements in the array returned from this function. 2849 * 2850 * Returns the device states parsed from the OF node. The memory for the states 2851 * is allocated by this function and is the responsibility of the caller to 2852 * free the memory after use. If any or zero compatible domain idle states is 2853 * found it returns 0 and in case of errors, a negative error code is returned. 2854 */ 2855 int of_genpd_parse_idle_states(struct device_node *dn, 2856 struct genpd_power_state **states, int *n) 2857 { 2858 struct genpd_power_state *st; 2859 int ret; 2860 2861 ret = genpd_iterate_idle_states(dn, NULL); 2862 if (ret < 0) 2863 return ret; 2864 2865 if (!ret) { 2866 *states = NULL; 2867 *n = 0; 2868 return 0; 2869 } 2870 2871 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 2872 if (!st) 2873 return -ENOMEM; 2874 2875 ret = genpd_iterate_idle_states(dn, st); 2876 if (ret <= 0) { 2877 kfree(st); 2878 return ret < 0 ? ret : -EINVAL; 2879 } 2880 2881 *states = st; 2882 *n = ret; 2883 2884 return 0; 2885 } 2886 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 2887 2888 /** 2889 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. 2890 * 2891 * @genpd_dev: Genpd's device for which the performance-state needs to be found. 2892 * @opp: struct dev_pm_opp of the OPP for which we need to find performance 2893 * state. 2894 * 2895 * Returns performance state encoded in the OPP of the genpd. This calls 2896 * platform specific genpd->opp_to_performance_state() callback to translate 2897 * power domain OPP to performance state. 2898 * 2899 * Returns performance state on success and 0 on failure. 2900 */ 2901 unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, 2902 struct dev_pm_opp *opp) 2903 { 2904 struct generic_pm_domain *genpd = NULL; 2905 int state; 2906 2907 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); 2908 2909 if (unlikely(!genpd->opp_to_performance_state)) 2910 return 0; 2911 2912 genpd_lock(genpd); 2913 state = genpd->opp_to_performance_state(genpd, opp); 2914 genpd_unlock(genpd); 2915 2916 return state; 2917 } 2918 EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); 2919 2920 static int __init genpd_bus_init(void) 2921 { 2922 return bus_register(&genpd_bus_type); 2923 } 2924 core_initcall(genpd_bus_init); 2925 2926 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2927 2928 2929 /*** debugfs support ***/ 2930 2931 #ifdef CONFIG_DEBUG_FS 2932 /* 2933 * TODO: This function is a slightly modified version of rtpm_status_show 2934 * from sysfs.c, so generalize it. 2935 */ 2936 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2937 { 2938 static const char * const status_lookup[] = { 2939 [RPM_ACTIVE] = "active", 2940 [RPM_RESUMING] = "resuming", 2941 [RPM_SUSPENDED] = "suspended", 2942 [RPM_SUSPENDING] = "suspending" 2943 }; 2944 const char *p = ""; 2945 2946 if (dev->power.runtime_error) 2947 p = "error"; 2948 else if (dev->power.disable_depth) 2949 p = "unsupported"; 2950 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 2951 p = status_lookup[dev->power.runtime_status]; 2952 else 2953 WARN_ON(1); 2954 2955 seq_puts(s, p); 2956 } 2957 2958 static int genpd_summary_one(struct seq_file *s, 2959 struct generic_pm_domain *genpd) 2960 { 2961 static const char * const status_lookup[] = { 2962 [GENPD_STATE_ON] = "on", 2963 [GENPD_STATE_OFF] = "off" 2964 }; 2965 struct pm_domain_data *pm_data; 2966 const char *kobj_path; 2967 struct gpd_link *link; 2968 char state[16]; 2969 int ret; 2970 2971 ret = genpd_lock_interruptible(genpd); 2972 if (ret) 2973 return -ERESTARTSYS; 2974 2975 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 2976 goto exit; 2977 if (!genpd_status_on(genpd)) 2978 snprintf(state, sizeof(state), "%s-%u", 2979 status_lookup[genpd->status], genpd->state_idx); 2980 else 2981 snprintf(state, sizeof(state), "%s", 2982 status_lookup[genpd->status]); 2983 seq_printf(s, "%-30s %-15s ", genpd->name, state); 2984 2985 /* 2986 * Modifications on the list require holding locks on both 2987 * parent and child, so we are safe. 2988 * Also genpd->name is immutable. 2989 */ 2990 list_for_each_entry(link, &genpd->parent_links, parent_node) { 2991 seq_printf(s, "%s", link->child->name); 2992 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 2993 seq_puts(s, ", "); 2994 } 2995 2996 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 2997 kobj_path = kobject_get_path(&pm_data->dev->kobj, 2998 genpd_is_irq_safe(genpd) ? 2999 GFP_ATOMIC : GFP_KERNEL); 3000 if (kobj_path == NULL) 3001 continue; 3002 3003 seq_printf(s, "\n %-50s ", kobj_path); 3004 rtpm_status_str(s, pm_data->dev); 3005 kfree(kobj_path); 3006 } 3007 3008 seq_puts(s, "\n"); 3009 exit: 3010 genpd_unlock(genpd); 3011 3012 return 0; 3013 } 3014 3015 static int summary_show(struct seq_file *s, void *data) 3016 { 3017 struct generic_pm_domain *genpd; 3018 int ret = 0; 3019 3020 seq_puts(s, "domain status children\n"); 3021 seq_puts(s, " /device runtime status\n"); 3022 seq_puts(s, "----------------------------------------------------------------------\n"); 3023 3024 ret = mutex_lock_interruptible(&gpd_list_lock); 3025 if (ret) 3026 return -ERESTARTSYS; 3027 3028 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3029 ret = genpd_summary_one(s, genpd); 3030 if (ret) 3031 break; 3032 } 3033 mutex_unlock(&gpd_list_lock); 3034 3035 return ret; 3036 } 3037 3038 static int status_show(struct seq_file *s, void *data) 3039 { 3040 static const char * const status_lookup[] = { 3041 [GENPD_STATE_ON] = "on", 3042 [GENPD_STATE_OFF] = "off" 3043 }; 3044 3045 struct generic_pm_domain *genpd = s->private; 3046 int ret = 0; 3047 3048 ret = genpd_lock_interruptible(genpd); 3049 if (ret) 3050 return -ERESTARTSYS; 3051 3052 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3053 goto exit; 3054 3055 if (genpd->status == GENPD_STATE_OFF) 3056 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3057 genpd->state_idx); 3058 else 3059 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3060 exit: 3061 genpd_unlock(genpd); 3062 return ret; 3063 } 3064 3065 static int sub_domains_show(struct seq_file *s, void *data) 3066 { 3067 struct generic_pm_domain *genpd = s->private; 3068 struct gpd_link *link; 3069 int ret = 0; 3070 3071 ret = genpd_lock_interruptible(genpd); 3072 if (ret) 3073 return -ERESTARTSYS; 3074 3075 list_for_each_entry(link, &genpd->parent_links, parent_node) 3076 seq_printf(s, "%s\n", link->child->name); 3077 3078 genpd_unlock(genpd); 3079 return ret; 3080 } 3081 3082 static int idle_states_show(struct seq_file *s, void *data) 3083 { 3084 struct generic_pm_domain *genpd = s->private; 3085 unsigned int i; 3086 int ret = 0; 3087 3088 ret = genpd_lock_interruptible(genpd); 3089 if (ret) 3090 return -ERESTARTSYS; 3091 3092 seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3093 3094 for (i = 0; i < genpd->state_count; i++) { 3095 ktime_t delta = 0; 3096 s64 msecs; 3097 3098 if ((genpd->status == GENPD_STATE_OFF) && 3099 (genpd->state_idx == i)) 3100 delta = ktime_sub(ktime_get(), genpd->accounting_time); 3101 3102 msecs = ktime_to_ms( 3103 ktime_add(genpd->states[i].idle_time, delta)); 3104 seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs, 3105 genpd->states[i].usage, genpd->states[i].rejected); 3106 } 3107 3108 genpd_unlock(genpd); 3109 return ret; 3110 } 3111 3112 static int active_time_show(struct seq_file *s, void *data) 3113 { 3114 struct generic_pm_domain *genpd = s->private; 3115 ktime_t delta = 0; 3116 int ret = 0; 3117 3118 ret = genpd_lock_interruptible(genpd); 3119 if (ret) 3120 return -ERESTARTSYS; 3121 3122 if (genpd->status == GENPD_STATE_ON) 3123 delta = ktime_sub(ktime_get(), genpd->accounting_time); 3124 3125 seq_printf(s, "%lld ms\n", ktime_to_ms( 3126 ktime_add(genpd->on_time, delta))); 3127 3128 genpd_unlock(genpd); 3129 return ret; 3130 } 3131 3132 static int total_idle_time_show(struct seq_file *s, void *data) 3133 { 3134 struct generic_pm_domain *genpd = s->private; 3135 ktime_t delta = 0, total = 0; 3136 unsigned int i; 3137 int ret = 0; 3138 3139 ret = genpd_lock_interruptible(genpd); 3140 if (ret) 3141 return -ERESTARTSYS; 3142 3143 for (i = 0; i < genpd->state_count; i++) { 3144 3145 if ((genpd->status == GENPD_STATE_OFF) && 3146 (genpd->state_idx == i)) 3147 delta = ktime_sub(ktime_get(), genpd->accounting_time); 3148 3149 total = ktime_add(total, genpd->states[i].idle_time); 3150 } 3151 total = ktime_add(total, delta); 3152 3153 seq_printf(s, "%lld ms\n", ktime_to_ms(total)); 3154 3155 genpd_unlock(genpd); 3156 return ret; 3157 } 3158 3159 3160 static int devices_show(struct seq_file *s, void *data) 3161 { 3162 struct generic_pm_domain *genpd = s->private; 3163 struct pm_domain_data *pm_data; 3164 const char *kobj_path; 3165 int ret = 0; 3166 3167 ret = genpd_lock_interruptible(genpd); 3168 if (ret) 3169 return -ERESTARTSYS; 3170 3171 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3172 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3173 genpd_is_irq_safe(genpd) ? 3174 GFP_ATOMIC : GFP_KERNEL); 3175 if (kobj_path == NULL) 3176 continue; 3177 3178 seq_printf(s, "%s\n", kobj_path); 3179 kfree(kobj_path); 3180 } 3181 3182 genpd_unlock(genpd); 3183 return ret; 3184 } 3185 3186 static int perf_state_show(struct seq_file *s, void *data) 3187 { 3188 struct generic_pm_domain *genpd = s->private; 3189 3190 if (genpd_lock_interruptible(genpd)) 3191 return -ERESTARTSYS; 3192 3193 seq_printf(s, "%u\n", genpd->performance_state); 3194 3195 genpd_unlock(genpd); 3196 return 0; 3197 } 3198 3199 DEFINE_SHOW_ATTRIBUTE(summary); 3200 DEFINE_SHOW_ATTRIBUTE(status); 3201 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3202 DEFINE_SHOW_ATTRIBUTE(idle_states); 3203 DEFINE_SHOW_ATTRIBUTE(active_time); 3204 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3205 DEFINE_SHOW_ATTRIBUTE(devices); 3206 DEFINE_SHOW_ATTRIBUTE(perf_state); 3207 3208 static void genpd_debug_add(struct generic_pm_domain *genpd) 3209 { 3210 struct dentry *d; 3211 3212 if (!genpd_debugfs_dir) 3213 return; 3214 3215 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3216 3217 debugfs_create_file("current_state", 0444, 3218 d, genpd, &status_fops); 3219 debugfs_create_file("sub_domains", 0444, 3220 d, genpd, &sub_domains_fops); 3221 debugfs_create_file("idle_states", 0444, 3222 d, genpd, &idle_states_fops); 3223 debugfs_create_file("active_time", 0444, 3224 d, genpd, &active_time_fops); 3225 debugfs_create_file("total_idle_time", 0444, 3226 d, genpd, &total_idle_time_fops); 3227 debugfs_create_file("devices", 0444, 3228 d, genpd, &devices_fops); 3229 if (genpd->set_performance_state) 3230 debugfs_create_file("perf_state", 0444, 3231 d, genpd, &perf_state_fops); 3232 } 3233 3234 static int __init genpd_debug_init(void) 3235 { 3236 struct generic_pm_domain *genpd; 3237 3238 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3239 3240 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3241 NULL, &summary_fops); 3242 3243 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3244 genpd_debug_add(genpd); 3245 3246 return 0; 3247 } 3248 late_initcall(genpd_debug_init); 3249 3250 static void __exit genpd_debug_exit(void) 3251 { 3252 debugfs_remove_recursive(genpd_debugfs_dir); 3253 } 3254 __exitcall(genpd_debug_exit); 3255 #endif /* CONFIG_DEBUG_FS */ 3256