1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/pm_domain.h> 15 #include <linux/pm_qos.h> 16 #include <linux/pm_clock.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 #include <linux/sched.h> 20 #include <linux/suspend.h> 21 #include <linux/export.h> 22 23 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 24 25 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 26 ({ \ 27 type (*__routine)(struct device *__d); \ 28 type __ret = (type)0; \ 29 \ 30 __routine = genpd->dev_ops.callback; \ 31 if (__routine) { \ 32 __ret = __routine(dev); \ 33 } \ 34 __ret; \ 35 }) 36 37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ 38 ({ \ 39 ktime_t __start = ktime_get(); \ 40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 43 if (!__retval && __elapsed > __td->field) { \ 44 __td->field = __elapsed; \ 45 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ 46 __elapsed); \ 47 genpd->max_off_time_changed = true; \ 48 __td->constraint_changed = true; \ 49 } \ 50 __retval; \ 51 }) 52 53 static LIST_HEAD(gpd_list); 54 static DEFINE_MUTEX(gpd_list_lock); 55 56 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) 57 { 58 struct generic_pm_domain *genpd = NULL, *gpd; 59 60 if (IS_ERR_OR_NULL(domain_name)) 61 return NULL; 62 63 mutex_lock(&gpd_list_lock); 64 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 65 if (!strcmp(gpd->name, domain_name)) { 66 genpd = gpd; 67 break; 68 } 69 } 70 mutex_unlock(&gpd_list_lock); 71 return genpd; 72 } 73 74 /* 75 * Get the generic PM domain for a particular struct device. 76 * This validates the struct device pointer, the PM domain pointer, 77 * and checks that the PM domain pointer is a real generic PM domain. 78 * Any failure results in NULL being returned. 79 */ 80 struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) 81 { 82 struct generic_pm_domain *genpd = NULL, *gpd; 83 84 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 85 return NULL; 86 87 mutex_lock(&gpd_list_lock); 88 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 89 if (&gpd->domain == dev->pm_domain) { 90 genpd = gpd; 91 break; 92 } 93 } 94 mutex_unlock(&gpd_list_lock); 95 96 return genpd; 97 } 98 99 /* 100 * This should only be used where we are certain that the pm_domain 101 * attached to the device is a genpd domain. 102 */ 103 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 104 { 105 if (IS_ERR_OR_NULL(dev->pm_domain)) 106 return ERR_PTR(-EINVAL); 107 108 return pd_to_genpd(dev->pm_domain); 109 } 110 111 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 112 { 113 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, 114 stop_latency_ns, "stop"); 115 } 116 117 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 118 { 119 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 120 start_latency_ns, "start"); 121 } 122 123 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 124 { 125 bool ret = false; 126 127 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 128 ret = !!atomic_dec_and_test(&genpd->sd_count); 129 130 return ret; 131 } 132 133 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 134 { 135 atomic_inc(&genpd->sd_count); 136 smp_mb__after_atomic(); 137 } 138 139 static void genpd_acquire_lock(struct generic_pm_domain *genpd) 140 { 141 DEFINE_WAIT(wait); 142 143 mutex_lock(&genpd->lock); 144 /* 145 * Wait for the domain to transition into either the active, 146 * or the power off state. 147 */ 148 for (;;) { 149 prepare_to_wait(&genpd->status_wait_queue, &wait, 150 TASK_UNINTERRUPTIBLE); 151 if (genpd->status == GPD_STATE_ACTIVE 152 || genpd->status == GPD_STATE_POWER_OFF) 153 break; 154 mutex_unlock(&genpd->lock); 155 156 schedule(); 157 158 mutex_lock(&genpd->lock); 159 } 160 finish_wait(&genpd->status_wait_queue, &wait); 161 } 162 163 static void genpd_release_lock(struct generic_pm_domain *genpd) 164 { 165 mutex_unlock(&genpd->lock); 166 } 167 168 static void genpd_set_active(struct generic_pm_domain *genpd) 169 { 170 if (genpd->resume_count == 0) 171 genpd->status = GPD_STATE_ACTIVE; 172 } 173 174 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 175 { 176 s64 usecs64; 177 178 if (!genpd->cpuidle_data) 179 return; 180 181 usecs64 = genpd->power_on_latency_ns; 182 do_div(usecs64, NSEC_PER_USEC); 183 usecs64 += genpd->cpuidle_data->saved_exit_latency; 184 genpd->cpuidle_data->idle_state->exit_latency = usecs64; 185 } 186 187 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) 188 { 189 ktime_t time_start; 190 s64 elapsed_ns; 191 int ret; 192 193 if (!genpd->power_on) 194 return 0; 195 196 if (!timed) 197 return genpd->power_on(genpd); 198 199 time_start = ktime_get(); 200 ret = genpd->power_on(genpd); 201 if (ret) 202 return ret; 203 204 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 205 if (elapsed_ns <= genpd->power_on_latency_ns) 206 return ret; 207 208 genpd->power_on_latency_ns = elapsed_ns; 209 genpd->max_off_time_changed = true; 210 genpd_recalc_cpu_exit_latency(genpd); 211 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 212 genpd->name, "on", elapsed_ns); 213 214 return ret; 215 } 216 217 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) 218 { 219 ktime_t time_start; 220 s64 elapsed_ns; 221 int ret; 222 223 if (!genpd->power_off) 224 return 0; 225 226 if (!timed) 227 return genpd->power_off(genpd); 228 229 time_start = ktime_get(); 230 ret = genpd->power_off(genpd); 231 if (ret == -EBUSY) 232 return ret; 233 234 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 235 if (elapsed_ns <= genpd->power_off_latency_ns) 236 return ret; 237 238 genpd->power_off_latency_ns = elapsed_ns; 239 genpd->max_off_time_changed = true; 240 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 241 genpd->name, "off", elapsed_ns); 242 243 return ret; 244 } 245 246 /** 247 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 248 * @genpd: PM domain to power up. 249 * 250 * Restore power to @genpd and all of its masters so that it is possible to 251 * resume a device belonging to it. 252 */ 253 static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 254 __releases(&genpd->lock) __acquires(&genpd->lock) 255 { 256 struct gpd_link *link; 257 DEFINE_WAIT(wait); 258 int ret = 0; 259 260 /* If the domain's master is being waited for, we have to wait too. */ 261 for (;;) { 262 prepare_to_wait(&genpd->status_wait_queue, &wait, 263 TASK_UNINTERRUPTIBLE); 264 if (genpd->status != GPD_STATE_WAIT_MASTER) 265 break; 266 mutex_unlock(&genpd->lock); 267 268 schedule(); 269 270 mutex_lock(&genpd->lock); 271 } 272 finish_wait(&genpd->status_wait_queue, &wait); 273 274 if (genpd->status == GPD_STATE_ACTIVE 275 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 276 return 0; 277 278 if (genpd->status != GPD_STATE_POWER_OFF) { 279 genpd_set_active(genpd); 280 return 0; 281 } 282 283 if (genpd->cpuidle_data) { 284 cpuidle_pause_and_lock(); 285 genpd->cpuidle_data->idle_state->disabled = true; 286 cpuidle_resume_and_unlock(); 287 goto out; 288 } 289 290 /* 291 * The list is guaranteed not to change while the loop below is being 292 * executed, unless one of the masters' .power_on() callbacks fiddles 293 * with it. 294 */ 295 list_for_each_entry(link, &genpd->slave_links, slave_node) { 296 genpd_sd_counter_inc(link->master); 297 genpd->status = GPD_STATE_WAIT_MASTER; 298 299 mutex_unlock(&genpd->lock); 300 301 ret = pm_genpd_poweron(link->master); 302 303 mutex_lock(&genpd->lock); 304 305 /* 306 * The "wait for parent" status is guaranteed not to change 307 * while the master is powering on. 308 */ 309 genpd->status = GPD_STATE_POWER_OFF; 310 wake_up_all(&genpd->status_wait_queue); 311 if (ret) { 312 genpd_sd_counter_dec(link->master); 313 goto err; 314 } 315 } 316 317 ret = genpd_power_on(genpd, true); 318 if (ret) 319 goto err; 320 321 out: 322 genpd_set_active(genpd); 323 324 return 0; 325 326 err: 327 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 328 genpd_sd_counter_dec(link->master); 329 330 return ret; 331 } 332 333 /** 334 * pm_genpd_poweron - Restore power to a given PM domain and its masters. 335 * @genpd: PM domain to power up. 336 */ 337 int pm_genpd_poweron(struct generic_pm_domain *genpd) 338 { 339 int ret; 340 341 mutex_lock(&genpd->lock); 342 ret = __pm_genpd_poweron(genpd); 343 mutex_unlock(&genpd->lock); 344 return ret; 345 } 346 347 /** 348 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. 349 * @domain_name: Name of the PM domain to power up. 350 */ 351 int pm_genpd_name_poweron(const char *domain_name) 352 { 353 struct generic_pm_domain *genpd; 354 355 genpd = pm_genpd_lookup_name(domain_name); 356 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 357 } 358 359 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 360 struct device *dev) 361 { 362 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 363 } 364 365 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 366 { 367 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 368 save_state_latency_ns, "state save"); 369 } 370 371 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) 372 { 373 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 374 restore_state_latency_ns, 375 "state restore"); 376 } 377 378 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 379 unsigned long val, void *ptr) 380 { 381 struct generic_pm_domain_data *gpd_data; 382 struct device *dev; 383 384 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 385 dev = gpd_data->base.dev; 386 387 for (;;) { 388 struct generic_pm_domain *genpd; 389 struct pm_domain_data *pdd; 390 391 spin_lock_irq(&dev->power.lock); 392 393 pdd = dev->power.subsys_data ? 394 dev->power.subsys_data->domain_data : NULL; 395 if (pdd && pdd->dev) { 396 to_gpd_data(pdd)->td.constraint_changed = true; 397 genpd = dev_to_genpd(dev); 398 } else { 399 genpd = ERR_PTR(-ENODATA); 400 } 401 402 spin_unlock_irq(&dev->power.lock); 403 404 if (!IS_ERR(genpd)) { 405 mutex_lock(&genpd->lock); 406 genpd->max_off_time_changed = true; 407 mutex_unlock(&genpd->lock); 408 } 409 410 dev = dev->parent; 411 if (!dev || dev->power.ignore_children) 412 break; 413 } 414 415 return NOTIFY_DONE; 416 } 417 418 /** 419 * __pm_genpd_save_device - Save the pre-suspend state of a device. 420 * @pdd: Domain data of the device to save the state of. 421 * @genpd: PM domain the device belongs to. 422 */ 423 static int __pm_genpd_save_device(struct pm_domain_data *pdd, 424 struct generic_pm_domain *genpd) 425 __releases(&genpd->lock) __acquires(&genpd->lock) 426 { 427 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 428 struct device *dev = pdd->dev; 429 int ret = 0; 430 431 if (gpd_data->need_restore > 0) 432 return 0; 433 434 /* 435 * If the value of the need_restore flag is still unknown at this point, 436 * we trust that pm_genpd_poweroff() has verified that the device is 437 * already runtime PM suspended. 438 */ 439 if (gpd_data->need_restore < 0) { 440 gpd_data->need_restore = 1; 441 return 0; 442 } 443 444 mutex_unlock(&genpd->lock); 445 446 genpd_start_dev(genpd, dev); 447 ret = genpd_save_dev(genpd, dev); 448 genpd_stop_dev(genpd, dev); 449 450 mutex_lock(&genpd->lock); 451 452 if (!ret) 453 gpd_data->need_restore = 1; 454 455 return ret; 456 } 457 458 /** 459 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 460 * @pdd: Domain data of the device to restore the state of. 461 * @genpd: PM domain the device belongs to. 462 */ 463 static void __pm_genpd_restore_device(struct pm_domain_data *pdd, 464 struct generic_pm_domain *genpd) 465 __releases(&genpd->lock) __acquires(&genpd->lock) 466 { 467 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 468 struct device *dev = pdd->dev; 469 int need_restore = gpd_data->need_restore; 470 471 gpd_data->need_restore = 0; 472 mutex_unlock(&genpd->lock); 473 474 genpd_start_dev(genpd, dev); 475 476 /* 477 * Call genpd_restore_dev() for recently added devices too (need_restore 478 * is negative then). 479 */ 480 if (need_restore) 481 genpd_restore_dev(genpd, dev); 482 483 mutex_lock(&genpd->lock); 484 } 485 486 /** 487 * genpd_abort_poweroff - Check if a PM domain power off should be aborted. 488 * @genpd: PM domain to check. 489 * 490 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during 491 * a "power off" operation, which means that a "power on" has occured in the 492 * meantime, or if its resume_count field is different from zero, which means 493 * that one of its devices has been resumed in the meantime. 494 */ 495 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 496 { 497 return genpd->status == GPD_STATE_WAIT_MASTER 498 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 499 } 500 501 /** 502 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 503 * @genpd: PM domait to power off. 504 * 505 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 506 * before. 507 */ 508 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 509 { 510 queue_work(pm_wq, &genpd->power_off_work); 511 } 512 513 /** 514 * pm_genpd_poweroff - Remove power from a given PM domain. 515 * @genpd: PM domain to power down. 516 * 517 * If all of the @genpd's devices have been suspended and all of its subdomains 518 * have been powered down, run the runtime suspend callbacks provided by all of 519 * the @genpd's devices' drivers and remove power from @genpd. 520 */ 521 static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 522 __releases(&genpd->lock) __acquires(&genpd->lock) 523 { 524 struct pm_domain_data *pdd; 525 struct gpd_link *link; 526 unsigned int not_suspended; 527 int ret = 0; 528 529 start: 530 /* 531 * Do not try to power off the domain in the following situations: 532 * (1) The domain is already in the "power off" state. 533 * (2) The domain is waiting for its master to power up. 534 * (3) One of the domain's devices is being resumed right now. 535 * (4) System suspend is in progress. 536 */ 537 if (genpd->status == GPD_STATE_POWER_OFF 538 || genpd->status == GPD_STATE_WAIT_MASTER 539 || genpd->resume_count > 0 || genpd->prepared_count > 0) 540 return 0; 541 542 if (atomic_read(&genpd->sd_count) > 0) 543 return -EBUSY; 544 545 not_suspended = 0; 546 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 547 enum pm_qos_flags_status stat; 548 549 stat = dev_pm_qos_flags(pdd->dev, 550 PM_QOS_FLAG_NO_POWER_OFF 551 | PM_QOS_FLAG_REMOTE_WAKEUP); 552 if (stat > PM_QOS_FLAGS_NONE) 553 return -EBUSY; 554 555 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 556 || pdd->dev->power.irq_safe)) 557 not_suspended++; 558 } 559 560 if (not_suspended > genpd->in_progress) 561 return -EBUSY; 562 563 if (genpd->poweroff_task) { 564 /* 565 * Another instance of pm_genpd_poweroff() is executing 566 * callbacks, so tell it to start over and return. 567 */ 568 genpd->status = GPD_STATE_REPEAT; 569 return 0; 570 } 571 572 if (genpd->gov && genpd->gov->power_down_ok) { 573 if (!genpd->gov->power_down_ok(&genpd->domain)) 574 return -EAGAIN; 575 } 576 577 genpd->status = GPD_STATE_BUSY; 578 genpd->poweroff_task = current; 579 580 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { 581 ret = atomic_read(&genpd->sd_count) == 0 ? 582 __pm_genpd_save_device(pdd, genpd) : -EBUSY; 583 584 if (genpd_abort_poweroff(genpd)) 585 goto out; 586 587 if (ret) { 588 genpd_set_active(genpd); 589 goto out; 590 } 591 592 if (genpd->status == GPD_STATE_REPEAT) { 593 genpd->poweroff_task = NULL; 594 goto start; 595 } 596 } 597 598 if (genpd->cpuidle_data) { 599 /* 600 * If cpuidle_data is set, cpuidle should turn the domain off 601 * when the CPU in it is idle. In that case we don't decrement 602 * the subdomain counts of the master domains, so that power is 603 * not removed from the current domain prematurely as a result 604 * of cutting off the masters' power. 605 */ 606 genpd->status = GPD_STATE_POWER_OFF; 607 cpuidle_pause_and_lock(); 608 genpd->cpuidle_data->idle_state->disabled = false; 609 cpuidle_resume_and_unlock(); 610 goto out; 611 } 612 613 if (genpd->power_off) { 614 if (atomic_read(&genpd->sd_count) > 0) { 615 ret = -EBUSY; 616 goto out; 617 } 618 619 /* 620 * If sd_count > 0 at this point, one of the subdomains hasn't 621 * managed to call pm_genpd_poweron() for the master yet after 622 * incrementing it. In that case pm_genpd_poweron() will wait 623 * for us to drop the lock, so we can call .power_off() and let 624 * the pm_genpd_poweron() restore power for us (this shouldn't 625 * happen very often). 626 */ 627 ret = genpd_power_off(genpd, true); 628 if (ret == -EBUSY) { 629 genpd_set_active(genpd); 630 goto out; 631 } 632 } 633 634 genpd->status = GPD_STATE_POWER_OFF; 635 636 list_for_each_entry(link, &genpd->slave_links, slave_node) { 637 genpd_sd_counter_dec(link->master); 638 genpd_queue_power_off_work(link->master); 639 } 640 641 out: 642 genpd->poweroff_task = NULL; 643 wake_up_all(&genpd->status_wait_queue); 644 return ret; 645 } 646 647 /** 648 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 649 * @work: Work structure used for scheduling the execution of this function. 650 */ 651 static void genpd_power_off_work_fn(struct work_struct *work) 652 { 653 struct generic_pm_domain *genpd; 654 655 genpd = container_of(work, struct generic_pm_domain, power_off_work); 656 657 genpd_acquire_lock(genpd); 658 pm_genpd_poweroff(genpd); 659 genpd_release_lock(genpd); 660 } 661 662 /** 663 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 664 * @dev: Device to suspend. 665 * 666 * Carry out a runtime suspend of a device under the assumption that its 667 * pm_domain field points to the domain member of an object of type 668 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 669 */ 670 static int pm_genpd_runtime_suspend(struct device *dev) 671 { 672 struct generic_pm_domain *genpd; 673 struct generic_pm_domain_data *gpd_data; 674 bool (*stop_ok)(struct device *__dev); 675 int ret; 676 677 dev_dbg(dev, "%s()\n", __func__); 678 679 genpd = dev_to_genpd(dev); 680 if (IS_ERR(genpd)) 681 return -EINVAL; 682 683 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 684 if (stop_ok && !stop_ok(dev)) 685 return -EBUSY; 686 687 ret = genpd_stop_dev(genpd, dev); 688 if (ret) 689 return ret; 690 691 /* 692 * If power.irq_safe is set, this routine will be run with interrupts 693 * off, so it can't use mutexes. 694 */ 695 if (dev->power.irq_safe) 696 return 0; 697 698 mutex_lock(&genpd->lock); 699 700 /* 701 * If we have an unknown state of the need_restore flag, it means none 702 * of the runtime PM callbacks has been invoked yet. Let's update the 703 * flag to reflect that the current state is active. 704 */ 705 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 706 if (gpd_data->need_restore < 0) 707 gpd_data->need_restore = 0; 708 709 genpd->in_progress++; 710 pm_genpd_poweroff(genpd); 711 genpd->in_progress--; 712 mutex_unlock(&genpd->lock); 713 714 return 0; 715 } 716 717 /** 718 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 719 * @dev: Device to resume. 720 * 721 * Carry out a runtime resume of a device under the assumption that its 722 * pm_domain field points to the domain member of an object of type 723 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 724 */ 725 static int pm_genpd_runtime_resume(struct device *dev) 726 { 727 struct generic_pm_domain *genpd; 728 DEFINE_WAIT(wait); 729 int ret; 730 731 dev_dbg(dev, "%s()\n", __func__); 732 733 genpd = dev_to_genpd(dev); 734 if (IS_ERR(genpd)) 735 return -EINVAL; 736 737 /* If power.irq_safe, the PM domain is never powered off. */ 738 if (dev->power.irq_safe) 739 return genpd_start_dev_no_timing(genpd, dev); 740 741 mutex_lock(&genpd->lock); 742 ret = __pm_genpd_poweron(genpd); 743 if (ret) { 744 mutex_unlock(&genpd->lock); 745 return ret; 746 } 747 genpd->status = GPD_STATE_BUSY; 748 genpd->resume_count++; 749 for (;;) { 750 prepare_to_wait(&genpd->status_wait_queue, &wait, 751 TASK_UNINTERRUPTIBLE); 752 /* 753 * If current is the powering off task, we have been called 754 * reentrantly from one of the device callbacks, so we should 755 * not wait. 756 */ 757 if (!genpd->poweroff_task || genpd->poweroff_task == current) 758 break; 759 mutex_unlock(&genpd->lock); 760 761 schedule(); 762 763 mutex_lock(&genpd->lock); 764 } 765 finish_wait(&genpd->status_wait_queue, &wait); 766 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); 767 genpd->resume_count--; 768 genpd_set_active(genpd); 769 wake_up_all(&genpd->status_wait_queue); 770 mutex_unlock(&genpd->lock); 771 772 return 0; 773 } 774 775 static bool pd_ignore_unused; 776 static int __init pd_ignore_unused_setup(char *__unused) 777 { 778 pd_ignore_unused = true; 779 return 1; 780 } 781 __setup("pd_ignore_unused", pd_ignore_unused_setup); 782 783 /** 784 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. 785 */ 786 void pm_genpd_poweroff_unused(void) 787 { 788 struct generic_pm_domain *genpd; 789 790 if (pd_ignore_unused) { 791 pr_warn("genpd: Not disabling unused power domains\n"); 792 return; 793 } 794 795 mutex_lock(&gpd_list_lock); 796 797 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 798 genpd_queue_power_off_work(genpd); 799 800 mutex_unlock(&gpd_list_lock); 801 } 802 803 static int __init genpd_poweroff_unused(void) 804 { 805 pm_genpd_poweroff_unused(); 806 return 0; 807 } 808 late_initcall(genpd_poweroff_unused); 809 810 #ifdef CONFIG_PM_SLEEP 811 812 /** 813 * pm_genpd_present - Check if the given PM domain has been initialized. 814 * @genpd: PM domain to check. 815 */ 816 static bool pm_genpd_present(const struct generic_pm_domain *genpd) 817 { 818 const struct generic_pm_domain *gpd; 819 820 if (IS_ERR_OR_NULL(genpd)) 821 return false; 822 823 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 824 if (gpd == genpd) 825 return true; 826 827 return false; 828 } 829 830 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 831 struct device *dev) 832 { 833 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 834 } 835 836 /** 837 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 838 * @genpd: PM domain to power off, if possible. 839 * @timed: True if latency measurements are allowed. 840 * 841 * Check if the given PM domain can be powered off (during system suspend or 842 * hibernation) and do that if so. Also, in that case propagate to its masters. 843 * 844 * This function is only called in "noirq" and "syscore" stages of system power 845 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 846 * executed sequentially, so it is guaranteed that it will never run twice in 847 * parallel). 848 */ 849 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, 850 bool timed) 851 { 852 struct gpd_link *link; 853 854 if (genpd->status == GPD_STATE_POWER_OFF) 855 return; 856 857 if (genpd->suspended_count != genpd->device_count 858 || atomic_read(&genpd->sd_count) > 0) 859 return; 860 861 genpd_power_off(genpd, timed); 862 863 genpd->status = GPD_STATE_POWER_OFF; 864 865 list_for_each_entry(link, &genpd->slave_links, slave_node) { 866 genpd_sd_counter_dec(link->master); 867 pm_genpd_sync_poweroff(link->master, timed); 868 } 869 } 870 871 /** 872 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 873 * @genpd: PM domain to power on. 874 * @timed: True if latency measurements are allowed. 875 * 876 * This function is only called in "noirq" and "syscore" stages of system power 877 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 878 * executed sequentially, so it is guaranteed that it will never run twice in 879 * parallel). 880 */ 881 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, 882 bool timed) 883 { 884 struct gpd_link *link; 885 886 if (genpd->status != GPD_STATE_POWER_OFF) 887 return; 888 889 list_for_each_entry(link, &genpd->slave_links, slave_node) { 890 pm_genpd_sync_poweron(link->master, timed); 891 genpd_sd_counter_inc(link->master); 892 } 893 894 genpd_power_on(genpd, timed); 895 896 genpd->status = GPD_STATE_ACTIVE; 897 } 898 899 /** 900 * resume_needed - Check whether to resume a device before system suspend. 901 * @dev: Device to check. 902 * @genpd: PM domain the device belongs to. 903 * 904 * There are two cases in which a device that can wake up the system from sleep 905 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 906 * to wake up the system and it has to remain active for this purpose while the 907 * system is in the sleep state and (2) if the device is not enabled to wake up 908 * the system from sleep states and it generally doesn't generate wakeup signals 909 * by itself (those signals are generated on its behalf by other parts of the 910 * system). In the latter case it may be necessary to reconfigure the device's 911 * wakeup settings during system suspend, because it may have been set up to 912 * signal remote wakeup from the system's working state as needed by runtime PM. 913 * Return 'true' in either of the above cases. 914 */ 915 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 916 { 917 bool active_wakeup; 918 919 if (!device_can_wakeup(dev)) 920 return false; 921 922 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 923 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 924 } 925 926 /** 927 * pm_genpd_prepare - Start power transition of a device in a PM domain. 928 * @dev: Device to start the transition of. 929 * 930 * Start a power transition of a device (during a system-wide power transition) 931 * under the assumption that its pm_domain field points to the domain member of 932 * an object of type struct generic_pm_domain representing a PM domain 933 * consisting of I/O devices. 934 */ 935 static int pm_genpd_prepare(struct device *dev) 936 { 937 struct generic_pm_domain *genpd; 938 int ret; 939 940 dev_dbg(dev, "%s()\n", __func__); 941 942 genpd = dev_to_genpd(dev); 943 if (IS_ERR(genpd)) 944 return -EINVAL; 945 946 /* 947 * If a wakeup request is pending for the device, it should be woken up 948 * at this point and a system wakeup event should be reported if it's 949 * set up to wake up the system from sleep states. 950 */ 951 pm_runtime_get_noresume(dev); 952 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 953 pm_wakeup_event(dev, 0); 954 955 if (pm_wakeup_pending()) { 956 pm_runtime_put(dev); 957 return -EBUSY; 958 } 959 960 if (resume_needed(dev, genpd)) 961 pm_runtime_resume(dev); 962 963 genpd_acquire_lock(genpd); 964 965 if (genpd->prepared_count++ == 0) { 966 genpd->suspended_count = 0; 967 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 968 } 969 970 genpd_release_lock(genpd); 971 972 if (genpd->suspend_power_off) { 973 pm_runtime_put_noidle(dev); 974 return 0; 975 } 976 977 /* 978 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 979 * so pm_genpd_poweron() will return immediately, but if the device 980 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 981 * to make it operational. 982 */ 983 pm_runtime_resume(dev); 984 __pm_runtime_disable(dev, false); 985 986 ret = pm_generic_prepare(dev); 987 if (ret) { 988 mutex_lock(&genpd->lock); 989 990 if (--genpd->prepared_count == 0) 991 genpd->suspend_power_off = false; 992 993 mutex_unlock(&genpd->lock); 994 pm_runtime_enable(dev); 995 } 996 997 pm_runtime_put(dev); 998 return ret; 999 } 1000 1001 /** 1002 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 1003 * @dev: Device to suspend. 1004 * 1005 * Suspend a device under the assumption that its pm_domain field points to the 1006 * domain member of an object of type struct generic_pm_domain representing 1007 * a PM domain consisting of I/O devices. 1008 */ 1009 static int pm_genpd_suspend(struct device *dev) 1010 { 1011 struct generic_pm_domain *genpd; 1012 1013 dev_dbg(dev, "%s()\n", __func__); 1014 1015 genpd = dev_to_genpd(dev); 1016 if (IS_ERR(genpd)) 1017 return -EINVAL; 1018 1019 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 1020 } 1021 1022 /** 1023 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 1024 * @dev: Device to suspend. 1025 * 1026 * Carry out a late suspend of a device under the assumption that its 1027 * pm_domain field points to the domain member of an object of type 1028 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1029 */ 1030 static int pm_genpd_suspend_late(struct device *dev) 1031 { 1032 struct generic_pm_domain *genpd; 1033 1034 dev_dbg(dev, "%s()\n", __func__); 1035 1036 genpd = dev_to_genpd(dev); 1037 if (IS_ERR(genpd)) 1038 return -EINVAL; 1039 1040 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 1041 } 1042 1043 /** 1044 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1045 * @dev: Device to suspend. 1046 * 1047 * Stop the device and remove power from the domain if all devices in it have 1048 * been stopped. 1049 */ 1050 static int pm_genpd_suspend_noirq(struct device *dev) 1051 { 1052 struct generic_pm_domain *genpd; 1053 1054 dev_dbg(dev, "%s()\n", __func__); 1055 1056 genpd = dev_to_genpd(dev); 1057 if (IS_ERR(genpd)) 1058 return -EINVAL; 1059 1060 if (genpd->suspend_power_off 1061 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1062 return 0; 1063 1064 genpd_stop_dev(genpd, dev); 1065 1066 /* 1067 * Since all of the "noirq" callbacks are executed sequentially, it is 1068 * guaranteed that this function will never run twice in parallel for 1069 * the same PM domain, so it is not necessary to use locking here. 1070 */ 1071 genpd->suspended_count++; 1072 pm_genpd_sync_poweroff(genpd, true); 1073 1074 return 0; 1075 } 1076 1077 /** 1078 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1079 * @dev: Device to resume. 1080 * 1081 * Restore power to the device's PM domain, if necessary, and start the device. 1082 */ 1083 static int pm_genpd_resume_noirq(struct device *dev) 1084 { 1085 struct generic_pm_domain *genpd; 1086 1087 dev_dbg(dev, "%s()\n", __func__); 1088 1089 genpd = dev_to_genpd(dev); 1090 if (IS_ERR(genpd)) 1091 return -EINVAL; 1092 1093 if (genpd->suspend_power_off 1094 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1095 return 0; 1096 1097 /* 1098 * Since all of the "noirq" callbacks are executed sequentially, it is 1099 * guaranteed that this function will never run twice in parallel for 1100 * the same PM domain, so it is not necessary to use locking here. 1101 */ 1102 pm_genpd_sync_poweron(genpd, true); 1103 genpd->suspended_count--; 1104 1105 return genpd_start_dev(genpd, dev); 1106 } 1107 1108 /** 1109 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 1110 * @dev: Device to resume. 1111 * 1112 * Carry out an early resume of a device under the assumption that its 1113 * pm_domain field points to the domain member of an object of type 1114 * struct generic_pm_domain representing a power domain consisting of I/O 1115 * devices. 1116 */ 1117 static int pm_genpd_resume_early(struct device *dev) 1118 { 1119 struct generic_pm_domain *genpd; 1120 1121 dev_dbg(dev, "%s()\n", __func__); 1122 1123 genpd = dev_to_genpd(dev); 1124 if (IS_ERR(genpd)) 1125 return -EINVAL; 1126 1127 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 1128 } 1129 1130 /** 1131 * pm_genpd_resume - Resume of device in an I/O PM domain. 1132 * @dev: Device to resume. 1133 * 1134 * Resume a device under the assumption that its pm_domain field points to the 1135 * domain member of an object of type struct generic_pm_domain representing 1136 * a power domain consisting of I/O devices. 1137 */ 1138 static int pm_genpd_resume(struct device *dev) 1139 { 1140 struct generic_pm_domain *genpd; 1141 1142 dev_dbg(dev, "%s()\n", __func__); 1143 1144 genpd = dev_to_genpd(dev); 1145 if (IS_ERR(genpd)) 1146 return -EINVAL; 1147 1148 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 1149 } 1150 1151 /** 1152 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 1153 * @dev: Device to freeze. 1154 * 1155 * Freeze a device under the assumption that its pm_domain field points to the 1156 * domain member of an object of type struct generic_pm_domain representing 1157 * a power domain consisting of I/O devices. 1158 */ 1159 static int pm_genpd_freeze(struct device *dev) 1160 { 1161 struct generic_pm_domain *genpd; 1162 1163 dev_dbg(dev, "%s()\n", __func__); 1164 1165 genpd = dev_to_genpd(dev); 1166 if (IS_ERR(genpd)) 1167 return -EINVAL; 1168 1169 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 1170 } 1171 1172 /** 1173 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 1174 * @dev: Device to freeze. 1175 * 1176 * Carry out a late freeze of a device under the assumption that its 1177 * pm_domain field points to the domain member of an object of type 1178 * struct generic_pm_domain representing a power domain consisting of I/O 1179 * devices. 1180 */ 1181 static int pm_genpd_freeze_late(struct device *dev) 1182 { 1183 struct generic_pm_domain *genpd; 1184 1185 dev_dbg(dev, "%s()\n", __func__); 1186 1187 genpd = dev_to_genpd(dev); 1188 if (IS_ERR(genpd)) 1189 return -EINVAL; 1190 1191 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 1192 } 1193 1194 /** 1195 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1196 * @dev: Device to freeze. 1197 * 1198 * Carry out a late freeze of a device under the assumption that its 1199 * pm_domain field points to the domain member of an object of type 1200 * struct generic_pm_domain representing a power domain consisting of I/O 1201 * devices. 1202 */ 1203 static int pm_genpd_freeze_noirq(struct device *dev) 1204 { 1205 struct generic_pm_domain *genpd; 1206 1207 dev_dbg(dev, "%s()\n", __func__); 1208 1209 genpd = dev_to_genpd(dev); 1210 if (IS_ERR(genpd)) 1211 return -EINVAL; 1212 1213 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 1214 } 1215 1216 /** 1217 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1218 * @dev: Device to thaw. 1219 * 1220 * Start the device, unless power has been removed from the domain already 1221 * before the system transition. 1222 */ 1223 static int pm_genpd_thaw_noirq(struct device *dev) 1224 { 1225 struct generic_pm_domain *genpd; 1226 1227 dev_dbg(dev, "%s()\n", __func__); 1228 1229 genpd = dev_to_genpd(dev); 1230 if (IS_ERR(genpd)) 1231 return -EINVAL; 1232 1233 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); 1234 } 1235 1236 /** 1237 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1238 * @dev: Device to thaw. 1239 * 1240 * Carry out an early thaw of a device under the assumption that its 1241 * pm_domain field points to the domain member of an object of type 1242 * struct generic_pm_domain representing a power domain consisting of I/O 1243 * devices. 1244 */ 1245 static int pm_genpd_thaw_early(struct device *dev) 1246 { 1247 struct generic_pm_domain *genpd; 1248 1249 dev_dbg(dev, "%s()\n", __func__); 1250 1251 genpd = dev_to_genpd(dev); 1252 if (IS_ERR(genpd)) 1253 return -EINVAL; 1254 1255 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 1256 } 1257 1258 /** 1259 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1260 * @dev: Device to thaw. 1261 * 1262 * Thaw a device under the assumption that its pm_domain field points to the 1263 * domain member of an object of type struct generic_pm_domain representing 1264 * a power domain consisting of I/O devices. 1265 */ 1266 static int pm_genpd_thaw(struct device *dev) 1267 { 1268 struct generic_pm_domain *genpd; 1269 1270 dev_dbg(dev, "%s()\n", __func__); 1271 1272 genpd = dev_to_genpd(dev); 1273 if (IS_ERR(genpd)) 1274 return -EINVAL; 1275 1276 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1277 } 1278 1279 /** 1280 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1281 * @dev: Device to resume. 1282 * 1283 * Make sure the domain will be in the same power state as before the 1284 * hibernation the system is resuming from and start the device if necessary. 1285 */ 1286 static int pm_genpd_restore_noirq(struct device *dev) 1287 { 1288 struct generic_pm_domain *genpd; 1289 1290 dev_dbg(dev, "%s()\n", __func__); 1291 1292 genpd = dev_to_genpd(dev); 1293 if (IS_ERR(genpd)) 1294 return -EINVAL; 1295 1296 /* 1297 * Since all of the "noirq" callbacks are executed sequentially, it is 1298 * guaranteed that this function will never run twice in parallel for 1299 * the same PM domain, so it is not necessary to use locking here. 1300 * 1301 * At this point suspended_count == 0 means we are being run for the 1302 * first time for the given domain in the present cycle. 1303 */ 1304 if (genpd->suspended_count++ == 0) { 1305 /* 1306 * The boot kernel might put the domain into arbitrary state, 1307 * so make it appear as powered off to pm_genpd_sync_poweron(), 1308 * so that it tries to power it on in case it was really off. 1309 */ 1310 genpd->status = GPD_STATE_POWER_OFF; 1311 if (genpd->suspend_power_off) { 1312 /* 1313 * If the domain was off before the hibernation, make 1314 * sure it will be off going forward. 1315 */ 1316 genpd_power_off(genpd, true); 1317 1318 return 0; 1319 } 1320 } 1321 1322 if (genpd->suspend_power_off) 1323 return 0; 1324 1325 pm_genpd_sync_poweron(genpd, true); 1326 1327 return genpd_start_dev(genpd, dev); 1328 } 1329 1330 /** 1331 * pm_genpd_complete - Complete power transition of a device in a power domain. 1332 * @dev: Device to complete the transition of. 1333 * 1334 * Complete a power transition of a device (during a system-wide power 1335 * transition) under the assumption that its pm_domain field points to the 1336 * domain member of an object of type struct generic_pm_domain representing 1337 * a power domain consisting of I/O devices. 1338 */ 1339 static void pm_genpd_complete(struct device *dev) 1340 { 1341 struct generic_pm_domain *genpd; 1342 bool run_complete; 1343 1344 dev_dbg(dev, "%s()\n", __func__); 1345 1346 genpd = dev_to_genpd(dev); 1347 if (IS_ERR(genpd)) 1348 return; 1349 1350 mutex_lock(&genpd->lock); 1351 1352 run_complete = !genpd->suspend_power_off; 1353 if (--genpd->prepared_count == 0) 1354 genpd->suspend_power_off = false; 1355 1356 mutex_unlock(&genpd->lock); 1357 1358 if (run_complete) { 1359 pm_generic_complete(dev); 1360 pm_runtime_set_active(dev); 1361 pm_runtime_enable(dev); 1362 pm_request_idle(dev); 1363 } 1364 } 1365 1366 /** 1367 * genpd_syscore_switch - Switch power during system core suspend or resume. 1368 * @dev: Device that normally is marked as "always on" to switch power for. 1369 * 1370 * This routine may only be called during the system core (syscore) suspend or 1371 * resume phase for devices whose "always on" flags are set. 1372 */ 1373 static void genpd_syscore_switch(struct device *dev, bool suspend) 1374 { 1375 struct generic_pm_domain *genpd; 1376 1377 genpd = dev_to_genpd(dev); 1378 if (!pm_genpd_present(genpd)) 1379 return; 1380 1381 if (suspend) { 1382 genpd->suspended_count++; 1383 pm_genpd_sync_poweroff(genpd, false); 1384 } else { 1385 pm_genpd_sync_poweron(genpd, false); 1386 genpd->suspended_count--; 1387 } 1388 } 1389 1390 void pm_genpd_syscore_poweroff(struct device *dev) 1391 { 1392 genpd_syscore_switch(dev, true); 1393 } 1394 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1395 1396 void pm_genpd_syscore_poweron(struct device *dev) 1397 { 1398 genpd_syscore_switch(dev, false); 1399 } 1400 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1401 1402 #else /* !CONFIG_PM_SLEEP */ 1403 1404 #define pm_genpd_prepare NULL 1405 #define pm_genpd_suspend NULL 1406 #define pm_genpd_suspend_late NULL 1407 #define pm_genpd_suspend_noirq NULL 1408 #define pm_genpd_resume_early NULL 1409 #define pm_genpd_resume_noirq NULL 1410 #define pm_genpd_resume NULL 1411 #define pm_genpd_freeze NULL 1412 #define pm_genpd_freeze_late NULL 1413 #define pm_genpd_freeze_noirq NULL 1414 #define pm_genpd_thaw_early NULL 1415 #define pm_genpd_thaw_noirq NULL 1416 #define pm_genpd_thaw NULL 1417 #define pm_genpd_restore_noirq NULL 1418 #define pm_genpd_complete NULL 1419 1420 #endif /* CONFIG_PM_SLEEP */ 1421 1422 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1423 struct generic_pm_domain *genpd, 1424 struct gpd_timing_data *td) 1425 { 1426 struct generic_pm_domain_data *gpd_data; 1427 int ret; 1428 1429 ret = dev_pm_get_subsys_data(dev); 1430 if (ret) 1431 return ERR_PTR(ret); 1432 1433 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1434 if (!gpd_data) { 1435 ret = -ENOMEM; 1436 goto err_put; 1437 } 1438 1439 if (td) 1440 gpd_data->td = *td; 1441 1442 gpd_data->base.dev = dev; 1443 gpd_data->need_restore = -1; 1444 gpd_data->td.constraint_changed = true; 1445 gpd_data->td.effective_constraint_ns = -1; 1446 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1447 1448 spin_lock_irq(&dev->power.lock); 1449 1450 if (dev->power.subsys_data->domain_data) { 1451 ret = -EINVAL; 1452 goto err_free; 1453 } 1454 1455 dev->power.subsys_data->domain_data = &gpd_data->base; 1456 dev->pm_domain = &genpd->domain; 1457 1458 spin_unlock_irq(&dev->power.lock); 1459 1460 return gpd_data; 1461 1462 err_free: 1463 spin_unlock_irq(&dev->power.lock); 1464 kfree(gpd_data); 1465 err_put: 1466 dev_pm_put_subsys_data(dev); 1467 return ERR_PTR(ret); 1468 } 1469 1470 static void genpd_free_dev_data(struct device *dev, 1471 struct generic_pm_domain_data *gpd_data) 1472 { 1473 spin_lock_irq(&dev->power.lock); 1474 1475 dev->pm_domain = NULL; 1476 dev->power.subsys_data->domain_data = NULL; 1477 1478 spin_unlock_irq(&dev->power.lock); 1479 1480 kfree(gpd_data); 1481 dev_pm_put_subsys_data(dev); 1482 } 1483 1484 /** 1485 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1486 * @genpd: PM domain to add the device to. 1487 * @dev: Device to be added. 1488 * @td: Set of PM QoS timing parameters to attach to the device. 1489 */ 1490 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1491 struct gpd_timing_data *td) 1492 { 1493 struct generic_pm_domain_data *gpd_data; 1494 int ret = 0; 1495 1496 dev_dbg(dev, "%s()\n", __func__); 1497 1498 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1499 return -EINVAL; 1500 1501 gpd_data = genpd_alloc_dev_data(dev, genpd, td); 1502 if (IS_ERR(gpd_data)) 1503 return PTR_ERR(gpd_data); 1504 1505 genpd_acquire_lock(genpd); 1506 1507 if (genpd->prepared_count > 0) { 1508 ret = -EAGAIN; 1509 goto out; 1510 } 1511 1512 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1513 if (ret) 1514 goto out; 1515 1516 genpd->device_count++; 1517 genpd->max_off_time_changed = true; 1518 1519 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1520 1521 out: 1522 genpd_release_lock(genpd); 1523 1524 if (ret) 1525 genpd_free_dev_data(dev, gpd_data); 1526 else 1527 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1528 1529 return ret; 1530 } 1531 1532 /** 1533 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1534 * @domain_name: Name of the PM domain to add the device to. 1535 * @dev: Device to be added. 1536 * @td: Set of PM QoS timing parameters to attach to the device. 1537 */ 1538 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, 1539 struct gpd_timing_data *td) 1540 { 1541 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); 1542 } 1543 1544 /** 1545 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1546 * @genpd: PM domain to remove the device from. 1547 * @dev: Device to be removed. 1548 */ 1549 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1550 struct device *dev) 1551 { 1552 struct generic_pm_domain_data *gpd_data; 1553 struct pm_domain_data *pdd; 1554 int ret = 0; 1555 1556 dev_dbg(dev, "%s()\n", __func__); 1557 1558 if (!genpd || genpd != pm_genpd_lookup_dev(dev)) 1559 return -EINVAL; 1560 1561 /* The above validation also means we have existing domain_data. */ 1562 pdd = dev->power.subsys_data->domain_data; 1563 gpd_data = to_gpd_data(pdd); 1564 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1565 1566 genpd_acquire_lock(genpd); 1567 1568 if (genpd->prepared_count > 0) { 1569 ret = -EAGAIN; 1570 goto out; 1571 } 1572 1573 genpd->device_count--; 1574 genpd->max_off_time_changed = true; 1575 1576 if (genpd->detach_dev) 1577 genpd->detach_dev(genpd, dev); 1578 1579 list_del_init(&pdd->list_node); 1580 1581 genpd_release_lock(genpd); 1582 1583 genpd_free_dev_data(dev, gpd_data); 1584 1585 return 0; 1586 1587 out: 1588 genpd_release_lock(genpd); 1589 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1590 1591 return ret; 1592 } 1593 1594 /** 1595 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1596 * @genpd: Master PM domain to add the subdomain to. 1597 * @subdomain: Subdomain to be added. 1598 */ 1599 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1600 struct generic_pm_domain *subdomain) 1601 { 1602 struct gpd_link *link; 1603 int ret = 0; 1604 1605 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1606 || genpd == subdomain) 1607 return -EINVAL; 1608 1609 start: 1610 genpd_acquire_lock(genpd); 1611 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1612 1613 if (subdomain->status != GPD_STATE_POWER_OFF 1614 && subdomain->status != GPD_STATE_ACTIVE) { 1615 mutex_unlock(&subdomain->lock); 1616 genpd_release_lock(genpd); 1617 goto start; 1618 } 1619 1620 if (genpd->status == GPD_STATE_POWER_OFF 1621 && subdomain->status != GPD_STATE_POWER_OFF) { 1622 ret = -EINVAL; 1623 goto out; 1624 } 1625 1626 list_for_each_entry(link, &genpd->master_links, master_node) { 1627 if (link->slave == subdomain && link->master == genpd) { 1628 ret = -EINVAL; 1629 goto out; 1630 } 1631 } 1632 1633 link = kzalloc(sizeof(*link), GFP_KERNEL); 1634 if (!link) { 1635 ret = -ENOMEM; 1636 goto out; 1637 } 1638 link->master = genpd; 1639 list_add_tail(&link->master_node, &genpd->master_links); 1640 link->slave = subdomain; 1641 list_add_tail(&link->slave_node, &subdomain->slave_links); 1642 if (subdomain->status != GPD_STATE_POWER_OFF) 1643 genpd_sd_counter_inc(genpd); 1644 1645 out: 1646 mutex_unlock(&subdomain->lock); 1647 genpd_release_lock(genpd); 1648 1649 return ret; 1650 } 1651 1652 /** 1653 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. 1654 * @master_name: Name of the master PM domain to add the subdomain to. 1655 * @subdomain_name: Name of the subdomain to be added. 1656 */ 1657 int pm_genpd_add_subdomain_names(const char *master_name, 1658 const char *subdomain_name) 1659 { 1660 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; 1661 1662 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) 1663 return -EINVAL; 1664 1665 mutex_lock(&gpd_list_lock); 1666 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1667 if (!master && !strcmp(gpd->name, master_name)) 1668 master = gpd; 1669 1670 if (!subdomain && !strcmp(gpd->name, subdomain_name)) 1671 subdomain = gpd; 1672 1673 if (master && subdomain) 1674 break; 1675 } 1676 mutex_unlock(&gpd_list_lock); 1677 1678 return pm_genpd_add_subdomain(master, subdomain); 1679 } 1680 1681 /** 1682 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1683 * @genpd: Master PM domain to remove the subdomain from. 1684 * @subdomain: Subdomain to be removed. 1685 */ 1686 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1687 struct generic_pm_domain *subdomain) 1688 { 1689 struct gpd_link *link; 1690 int ret = -EINVAL; 1691 1692 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1693 return -EINVAL; 1694 1695 start: 1696 genpd_acquire_lock(genpd); 1697 1698 list_for_each_entry(link, &genpd->master_links, master_node) { 1699 if (link->slave != subdomain) 1700 continue; 1701 1702 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1703 1704 if (subdomain->status != GPD_STATE_POWER_OFF 1705 && subdomain->status != GPD_STATE_ACTIVE) { 1706 mutex_unlock(&subdomain->lock); 1707 genpd_release_lock(genpd); 1708 goto start; 1709 } 1710 1711 list_del(&link->master_node); 1712 list_del(&link->slave_node); 1713 kfree(link); 1714 if (subdomain->status != GPD_STATE_POWER_OFF) 1715 genpd_sd_counter_dec(genpd); 1716 1717 mutex_unlock(&subdomain->lock); 1718 1719 ret = 0; 1720 break; 1721 } 1722 1723 genpd_release_lock(genpd); 1724 1725 return ret; 1726 } 1727 1728 /** 1729 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1730 * @genpd: PM domain to be connected with cpuidle. 1731 * @state: cpuidle state this domain can disable/enable. 1732 * 1733 * Make a PM domain behave as though it contained a CPU core, that is, instead 1734 * of calling its power down routine it will enable the given cpuidle state so 1735 * that the cpuidle subsystem can power it down (if possible and desirable). 1736 */ 1737 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1738 { 1739 struct cpuidle_driver *cpuidle_drv; 1740 struct gpd_cpuidle_data *cpuidle_data; 1741 struct cpuidle_state *idle_state; 1742 int ret = 0; 1743 1744 if (IS_ERR_OR_NULL(genpd) || state < 0) 1745 return -EINVAL; 1746 1747 genpd_acquire_lock(genpd); 1748 1749 if (genpd->cpuidle_data) { 1750 ret = -EEXIST; 1751 goto out; 1752 } 1753 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); 1754 if (!cpuidle_data) { 1755 ret = -ENOMEM; 1756 goto out; 1757 } 1758 cpuidle_drv = cpuidle_driver_ref(); 1759 if (!cpuidle_drv) { 1760 ret = -ENODEV; 1761 goto err_drv; 1762 } 1763 if (cpuidle_drv->state_count <= state) { 1764 ret = -EINVAL; 1765 goto err; 1766 } 1767 idle_state = &cpuidle_drv->states[state]; 1768 if (!idle_state->disabled) { 1769 ret = -EAGAIN; 1770 goto err; 1771 } 1772 cpuidle_data->idle_state = idle_state; 1773 cpuidle_data->saved_exit_latency = idle_state->exit_latency; 1774 genpd->cpuidle_data = cpuidle_data; 1775 genpd_recalc_cpu_exit_latency(genpd); 1776 1777 out: 1778 genpd_release_lock(genpd); 1779 return ret; 1780 1781 err: 1782 cpuidle_driver_unref(); 1783 1784 err_drv: 1785 kfree(cpuidle_data); 1786 goto out; 1787 } 1788 1789 /** 1790 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. 1791 * @name: Name of the domain to connect to cpuidle. 1792 * @state: cpuidle state this domain can manipulate. 1793 */ 1794 int pm_genpd_name_attach_cpuidle(const char *name, int state) 1795 { 1796 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); 1797 } 1798 1799 /** 1800 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. 1801 * @genpd: PM domain to remove the cpuidle connection from. 1802 * 1803 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the 1804 * given PM domain. 1805 */ 1806 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1807 { 1808 struct gpd_cpuidle_data *cpuidle_data; 1809 struct cpuidle_state *idle_state; 1810 int ret = 0; 1811 1812 if (IS_ERR_OR_NULL(genpd)) 1813 return -EINVAL; 1814 1815 genpd_acquire_lock(genpd); 1816 1817 cpuidle_data = genpd->cpuidle_data; 1818 if (!cpuidle_data) { 1819 ret = -ENODEV; 1820 goto out; 1821 } 1822 idle_state = cpuidle_data->idle_state; 1823 if (!idle_state->disabled) { 1824 ret = -EAGAIN; 1825 goto out; 1826 } 1827 idle_state->exit_latency = cpuidle_data->saved_exit_latency; 1828 cpuidle_driver_unref(); 1829 genpd->cpuidle_data = NULL; 1830 kfree(cpuidle_data); 1831 1832 out: 1833 genpd_release_lock(genpd); 1834 return ret; 1835 } 1836 1837 /** 1838 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. 1839 * @name: Name of the domain to disconnect cpuidle from. 1840 */ 1841 int pm_genpd_name_detach_cpuidle(const char *name) 1842 { 1843 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); 1844 } 1845 1846 /* Default device callbacks for generic PM domains. */ 1847 1848 /** 1849 * pm_genpd_default_save_state - Default "save device state" for PM domains. 1850 * @dev: Device to handle. 1851 */ 1852 static int pm_genpd_default_save_state(struct device *dev) 1853 { 1854 int (*cb)(struct device *__dev); 1855 1856 if (dev->type && dev->type->pm) 1857 cb = dev->type->pm->runtime_suspend; 1858 else if (dev->class && dev->class->pm) 1859 cb = dev->class->pm->runtime_suspend; 1860 else if (dev->bus && dev->bus->pm) 1861 cb = dev->bus->pm->runtime_suspend; 1862 else 1863 cb = NULL; 1864 1865 if (!cb && dev->driver && dev->driver->pm) 1866 cb = dev->driver->pm->runtime_suspend; 1867 1868 return cb ? cb(dev) : 0; 1869 } 1870 1871 /** 1872 * pm_genpd_default_restore_state - Default PM domains "restore device state". 1873 * @dev: Device to handle. 1874 */ 1875 static int pm_genpd_default_restore_state(struct device *dev) 1876 { 1877 int (*cb)(struct device *__dev); 1878 1879 if (dev->type && dev->type->pm) 1880 cb = dev->type->pm->runtime_resume; 1881 else if (dev->class && dev->class->pm) 1882 cb = dev->class->pm->runtime_resume; 1883 else if (dev->bus && dev->bus->pm) 1884 cb = dev->bus->pm->runtime_resume; 1885 else 1886 cb = NULL; 1887 1888 if (!cb && dev->driver && dev->driver->pm) 1889 cb = dev->driver->pm->runtime_resume; 1890 1891 return cb ? cb(dev) : 0; 1892 } 1893 1894 /** 1895 * pm_genpd_init - Initialize a generic I/O PM domain object. 1896 * @genpd: PM domain object to initialize. 1897 * @gov: PM domain governor to associate with the domain (may be NULL). 1898 * @is_off: Initial value of the domain's power_is_off field. 1899 */ 1900 void pm_genpd_init(struct generic_pm_domain *genpd, 1901 struct dev_power_governor *gov, bool is_off) 1902 { 1903 if (IS_ERR_OR_NULL(genpd)) 1904 return; 1905 1906 INIT_LIST_HEAD(&genpd->master_links); 1907 INIT_LIST_HEAD(&genpd->slave_links); 1908 INIT_LIST_HEAD(&genpd->dev_list); 1909 mutex_init(&genpd->lock); 1910 genpd->gov = gov; 1911 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1912 genpd->in_progress = 0; 1913 atomic_set(&genpd->sd_count, 0); 1914 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1915 init_waitqueue_head(&genpd->status_wait_queue); 1916 genpd->poweroff_task = NULL; 1917 genpd->resume_count = 0; 1918 genpd->device_count = 0; 1919 genpd->max_off_time_ns = -1; 1920 genpd->max_off_time_changed = true; 1921 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1922 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1923 genpd->domain.ops.prepare = pm_genpd_prepare; 1924 genpd->domain.ops.suspend = pm_genpd_suspend; 1925 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1926 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1927 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1928 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1929 genpd->domain.ops.resume = pm_genpd_resume; 1930 genpd->domain.ops.freeze = pm_genpd_freeze; 1931 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1932 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1933 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1934 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1935 genpd->domain.ops.thaw = pm_genpd_thaw; 1936 genpd->domain.ops.poweroff = pm_genpd_suspend; 1937 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1938 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1939 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1940 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1941 genpd->domain.ops.restore = pm_genpd_resume; 1942 genpd->domain.ops.complete = pm_genpd_complete; 1943 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1944 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1945 1946 if (genpd->flags & GENPD_FLAG_PM_CLK) { 1947 genpd->dev_ops.stop = pm_clk_suspend; 1948 genpd->dev_ops.start = pm_clk_resume; 1949 } 1950 1951 mutex_lock(&gpd_list_lock); 1952 list_add(&genpd->gpd_list_node, &gpd_list); 1953 mutex_unlock(&gpd_list_lock); 1954 } 1955 1956 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1957 /* 1958 * Device Tree based PM domain providers. 1959 * 1960 * The code below implements generic device tree based PM domain providers that 1961 * bind device tree nodes with generic PM domains registered in the system. 1962 * 1963 * Any driver that registers generic PM domains and needs to support binding of 1964 * devices to these domains is supposed to register a PM domain provider, which 1965 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1966 * 1967 * Two simple mapping functions have been provided for convenience: 1968 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1969 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1970 * index. 1971 */ 1972 1973 /** 1974 * struct of_genpd_provider - PM domain provider registration structure 1975 * @link: Entry in global list of PM domain providers 1976 * @node: Pointer to device tree node of PM domain provider 1977 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1978 * into a PM domain. 1979 * @data: context pointer to be passed into @xlate callback 1980 */ 1981 struct of_genpd_provider { 1982 struct list_head link; 1983 struct device_node *node; 1984 genpd_xlate_t xlate; 1985 void *data; 1986 }; 1987 1988 /* List of registered PM domain providers. */ 1989 static LIST_HEAD(of_genpd_providers); 1990 /* Mutex to protect the list above. */ 1991 static DEFINE_MUTEX(of_genpd_mutex); 1992 1993 /** 1994 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1995 * @genpdspec: OF phandle args to map into a PM domain 1996 * @data: xlate function private data - pointer to struct generic_pm_domain 1997 * 1998 * This is a generic xlate function that can be used to model PM domains that 1999 * have their own device tree nodes. The private data of xlate function needs 2000 * to be a valid pointer to struct generic_pm_domain. 2001 */ 2002 struct generic_pm_domain *__of_genpd_xlate_simple( 2003 struct of_phandle_args *genpdspec, 2004 void *data) 2005 { 2006 if (genpdspec->args_count != 0) 2007 return ERR_PTR(-EINVAL); 2008 return data; 2009 } 2010 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 2011 2012 /** 2013 * __of_genpd_xlate_onecell() - Xlate function using a single index. 2014 * @genpdspec: OF phandle args to map into a PM domain 2015 * @data: xlate function private data - pointer to struct genpd_onecell_data 2016 * 2017 * This is a generic xlate function that can be used to model simple PM domain 2018 * controllers that have one device tree node and provide multiple PM domains. 2019 * A single cell is used as an index into an array of PM domains specified in 2020 * the genpd_onecell_data struct when registering the provider. 2021 */ 2022 struct generic_pm_domain *__of_genpd_xlate_onecell( 2023 struct of_phandle_args *genpdspec, 2024 void *data) 2025 { 2026 struct genpd_onecell_data *genpd_data = data; 2027 unsigned int idx = genpdspec->args[0]; 2028 2029 if (genpdspec->args_count != 1) 2030 return ERR_PTR(-EINVAL); 2031 2032 if (idx >= genpd_data->num_domains) { 2033 pr_err("%s: invalid domain index %u\n", __func__, idx); 2034 return ERR_PTR(-EINVAL); 2035 } 2036 2037 if (!genpd_data->domains[idx]) 2038 return ERR_PTR(-ENOENT); 2039 2040 return genpd_data->domains[idx]; 2041 } 2042 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 2043 2044 /** 2045 * __of_genpd_add_provider() - Register a PM domain provider for a node 2046 * @np: Device node pointer associated with the PM domain provider. 2047 * @xlate: Callback for decoding PM domain from phandle arguments. 2048 * @data: Context pointer for @xlate callback. 2049 */ 2050 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2051 void *data) 2052 { 2053 struct of_genpd_provider *cp; 2054 2055 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2056 if (!cp) 2057 return -ENOMEM; 2058 2059 cp->node = of_node_get(np); 2060 cp->data = data; 2061 cp->xlate = xlate; 2062 2063 mutex_lock(&of_genpd_mutex); 2064 list_add(&cp->link, &of_genpd_providers); 2065 mutex_unlock(&of_genpd_mutex); 2066 pr_debug("Added domain provider from %s\n", np->full_name); 2067 2068 return 0; 2069 } 2070 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 2071 2072 /** 2073 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2074 * @np: Device node pointer associated with the PM domain provider 2075 */ 2076 void of_genpd_del_provider(struct device_node *np) 2077 { 2078 struct of_genpd_provider *cp; 2079 2080 mutex_lock(&of_genpd_mutex); 2081 list_for_each_entry(cp, &of_genpd_providers, link) { 2082 if (cp->node == np) { 2083 list_del(&cp->link); 2084 of_node_put(cp->node); 2085 kfree(cp); 2086 break; 2087 } 2088 } 2089 mutex_unlock(&of_genpd_mutex); 2090 } 2091 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2092 2093 /** 2094 * of_genpd_get_from_provider() - Look-up PM domain 2095 * @genpdspec: OF phandle args to use for look-up 2096 * 2097 * Looks for a PM domain provider under the node specified by @genpdspec and if 2098 * found, uses xlate function of the provider to map phandle args to a PM 2099 * domain. 2100 * 2101 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2102 * on failure. 2103 */ 2104 struct generic_pm_domain *of_genpd_get_from_provider( 2105 struct of_phandle_args *genpdspec) 2106 { 2107 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2108 struct of_genpd_provider *provider; 2109 2110 mutex_lock(&of_genpd_mutex); 2111 2112 /* Check if we have such a provider in our array */ 2113 list_for_each_entry(provider, &of_genpd_providers, link) { 2114 if (provider->node == genpdspec->np) 2115 genpd = provider->xlate(genpdspec, provider->data); 2116 if (!IS_ERR(genpd)) 2117 break; 2118 } 2119 2120 mutex_unlock(&of_genpd_mutex); 2121 2122 return genpd; 2123 } 2124 EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); 2125 2126 /** 2127 * genpd_dev_pm_detach - Detach a device from its PM domain. 2128 * @dev: Device to attach. 2129 * @power_off: Currently not used 2130 * 2131 * Try to locate a corresponding generic PM domain, which the device was 2132 * attached to previously. If such is found, the device is detached from it. 2133 */ 2134 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2135 { 2136 struct generic_pm_domain *pd; 2137 unsigned int i; 2138 int ret = 0; 2139 2140 pd = pm_genpd_lookup_dev(dev); 2141 if (!pd) 2142 return; 2143 2144 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2145 2146 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2147 ret = pm_genpd_remove_device(pd, dev); 2148 if (ret != -EAGAIN) 2149 break; 2150 2151 mdelay(i); 2152 cond_resched(); 2153 } 2154 2155 if (ret < 0) { 2156 dev_err(dev, "failed to remove from PM domain %s: %d", 2157 pd->name, ret); 2158 return; 2159 } 2160 2161 /* Check if PM domain can be powered off after removing this device. */ 2162 genpd_queue_power_off_work(pd); 2163 } 2164 2165 static void genpd_dev_pm_sync(struct device *dev) 2166 { 2167 struct generic_pm_domain *pd; 2168 2169 pd = dev_to_genpd(dev); 2170 if (IS_ERR(pd)) 2171 return; 2172 2173 genpd_queue_power_off_work(pd); 2174 } 2175 2176 /** 2177 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2178 * @dev: Device to attach. 2179 * 2180 * Parse device's OF node to find a PM domain specifier. If such is found, 2181 * attaches the device to retrieved pm_domain ops. 2182 * 2183 * Both generic and legacy Samsung-specific DT bindings are supported to keep 2184 * backwards compatibility with existing DTBs. 2185 * 2186 * Returns 0 on successfully attached PM domain or negative error code. 2187 */ 2188 int genpd_dev_pm_attach(struct device *dev) 2189 { 2190 struct of_phandle_args pd_args; 2191 struct generic_pm_domain *pd; 2192 unsigned int i; 2193 int ret; 2194 2195 if (!dev->of_node) 2196 return -ENODEV; 2197 2198 if (dev->pm_domain) 2199 return -EEXIST; 2200 2201 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2202 "#power-domain-cells", 0, &pd_args); 2203 if (ret < 0) { 2204 if (ret != -ENOENT) 2205 return ret; 2206 2207 /* 2208 * Try legacy Samsung-specific bindings 2209 * (for backwards compatibility of DT ABI) 2210 */ 2211 pd_args.args_count = 0; 2212 pd_args.np = of_parse_phandle(dev->of_node, 2213 "samsung,power-domain", 0); 2214 if (!pd_args.np) 2215 return -ENOENT; 2216 } 2217 2218 pd = of_genpd_get_from_provider(&pd_args); 2219 if (IS_ERR(pd)) { 2220 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2221 __func__, PTR_ERR(pd)); 2222 of_node_put(dev->of_node); 2223 return PTR_ERR(pd); 2224 } 2225 2226 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2227 2228 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2229 ret = pm_genpd_add_device(pd, dev); 2230 if (ret != -EAGAIN) 2231 break; 2232 2233 mdelay(i); 2234 cond_resched(); 2235 } 2236 2237 if (ret < 0) { 2238 dev_err(dev, "failed to add to PM domain %s: %d", 2239 pd->name, ret); 2240 of_node_put(dev->of_node); 2241 return ret; 2242 } 2243 2244 dev->pm_domain->detach = genpd_dev_pm_detach; 2245 dev->pm_domain->sync = genpd_dev_pm_sync; 2246 pm_genpd_poweron(pd); 2247 2248 return 0; 2249 } 2250 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2251 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2252 2253 2254 /*** debugfs support ***/ 2255 2256 #ifdef CONFIG_PM_ADVANCED_DEBUG 2257 #include <linux/pm.h> 2258 #include <linux/device.h> 2259 #include <linux/debugfs.h> 2260 #include <linux/seq_file.h> 2261 #include <linux/init.h> 2262 #include <linux/kobject.h> 2263 static struct dentry *pm_genpd_debugfs_dir; 2264 2265 /* 2266 * TODO: This function is a slightly modified version of rtpm_status_show 2267 * from sysfs.c, so generalize it. 2268 */ 2269 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2270 { 2271 static const char * const status_lookup[] = { 2272 [RPM_ACTIVE] = "active", 2273 [RPM_RESUMING] = "resuming", 2274 [RPM_SUSPENDED] = "suspended", 2275 [RPM_SUSPENDING] = "suspending" 2276 }; 2277 const char *p = ""; 2278 2279 if (dev->power.runtime_error) 2280 p = "error"; 2281 else if (dev->power.disable_depth) 2282 p = "unsupported"; 2283 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 2284 p = status_lookup[dev->power.runtime_status]; 2285 else 2286 WARN_ON(1); 2287 2288 seq_puts(s, p); 2289 } 2290 2291 static int pm_genpd_summary_one(struct seq_file *s, 2292 struct generic_pm_domain *genpd) 2293 { 2294 static const char * const status_lookup[] = { 2295 [GPD_STATE_ACTIVE] = "on", 2296 [GPD_STATE_WAIT_MASTER] = "wait-master", 2297 [GPD_STATE_BUSY] = "busy", 2298 [GPD_STATE_REPEAT] = "off-in-progress", 2299 [GPD_STATE_POWER_OFF] = "off" 2300 }; 2301 struct pm_domain_data *pm_data; 2302 const char *kobj_path; 2303 struct gpd_link *link; 2304 int ret; 2305 2306 ret = mutex_lock_interruptible(&genpd->lock); 2307 if (ret) 2308 return -ERESTARTSYS; 2309 2310 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 2311 goto exit; 2312 seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); 2313 2314 /* 2315 * Modifications on the list require holding locks on both 2316 * master and slave, so we are safe. 2317 * Also genpd->name is immutable. 2318 */ 2319 list_for_each_entry(link, &genpd->master_links, master_node) { 2320 seq_printf(s, "%s", link->slave->name); 2321 if (!list_is_last(&link->master_node, &genpd->master_links)) 2322 seq_puts(s, ", "); 2323 } 2324 2325 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 2326 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 2327 if (kobj_path == NULL) 2328 continue; 2329 2330 seq_printf(s, "\n %-50s ", kobj_path); 2331 rtpm_status_str(s, pm_data->dev); 2332 kfree(kobj_path); 2333 } 2334 2335 seq_puts(s, "\n"); 2336 exit: 2337 mutex_unlock(&genpd->lock); 2338 2339 return 0; 2340 } 2341 2342 static int pm_genpd_summary_show(struct seq_file *s, void *data) 2343 { 2344 struct generic_pm_domain *genpd; 2345 int ret = 0; 2346 2347 seq_puts(s, " domain status slaves\n"); 2348 seq_puts(s, " /device runtime status\n"); 2349 seq_puts(s, "----------------------------------------------------------------------\n"); 2350 2351 ret = mutex_lock_interruptible(&gpd_list_lock); 2352 if (ret) 2353 return -ERESTARTSYS; 2354 2355 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 2356 ret = pm_genpd_summary_one(s, genpd); 2357 if (ret) 2358 break; 2359 } 2360 mutex_unlock(&gpd_list_lock); 2361 2362 return ret; 2363 } 2364 2365 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 2366 { 2367 return single_open(file, pm_genpd_summary_show, NULL); 2368 } 2369 2370 static const struct file_operations pm_genpd_summary_fops = { 2371 .open = pm_genpd_summary_open, 2372 .read = seq_read, 2373 .llseek = seq_lseek, 2374 .release = single_release, 2375 }; 2376 2377 static int __init pm_genpd_debug_init(void) 2378 { 2379 struct dentry *d; 2380 2381 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 2382 2383 if (!pm_genpd_debugfs_dir) 2384 return -ENOMEM; 2385 2386 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 2387 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 2388 if (!d) 2389 return -ENOMEM; 2390 2391 return 0; 2392 } 2393 late_initcall(pm_genpd_debug_init); 2394 2395 static void __exit pm_genpd_debug_exit(void) 2396 { 2397 debugfs_remove_recursive(pm_genpd_debugfs_dir); 2398 } 2399 __exitcall(pm_genpd_debug_exit); 2400 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 2401