1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/platform_device.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_domain.h> 14 #include <linux/pm_qos.h> 15 #include <linux/slab.h> 16 #include <linux/err.h> 17 #include <linux/sched.h> 18 #include <linux/suspend.h> 19 #include <linux/export.h> 20 21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 22 ({ \ 23 type (*__routine)(struct device *__d); \ 24 type __ret = (type)0; \ 25 \ 26 __routine = genpd->dev_ops.callback; \ 27 if (__routine) { \ 28 __ret = __routine(dev); \ 29 } \ 30 __ret; \ 31 }) 32 33 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ 34 ({ \ 35 ktime_t __start = ktime_get(); \ 36 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 37 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 38 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 39 if (!__retval && __elapsed > __td->field) { \ 40 __td->field = __elapsed; \ 41 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ 42 __elapsed); \ 43 genpd->max_off_time_changed = true; \ 44 __td->constraint_changed = true; \ 45 } \ 46 __retval; \ 47 }) 48 49 static LIST_HEAD(gpd_list); 50 static DEFINE_MUTEX(gpd_list_lock); 51 52 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) 53 { 54 struct generic_pm_domain *genpd = NULL, *gpd; 55 56 if (IS_ERR_OR_NULL(domain_name)) 57 return NULL; 58 59 mutex_lock(&gpd_list_lock); 60 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 61 if (!strcmp(gpd->name, domain_name)) { 62 genpd = gpd; 63 break; 64 } 65 } 66 mutex_unlock(&gpd_list_lock); 67 return genpd; 68 } 69 70 struct generic_pm_domain *dev_to_genpd(struct device *dev) 71 { 72 if (IS_ERR_OR_NULL(dev->pm_domain)) 73 return ERR_PTR(-EINVAL); 74 75 return pd_to_genpd(dev->pm_domain); 76 } 77 78 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 79 { 80 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, 81 stop_latency_ns, "stop"); 82 } 83 84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 85 { 86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 87 start_latency_ns, "start"); 88 } 89 90 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 91 { 92 bool ret = false; 93 94 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 95 ret = !!atomic_dec_and_test(&genpd->sd_count); 96 97 return ret; 98 } 99 100 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 101 { 102 atomic_inc(&genpd->sd_count); 103 smp_mb__after_atomic(); 104 } 105 106 static void genpd_acquire_lock(struct generic_pm_domain *genpd) 107 { 108 DEFINE_WAIT(wait); 109 110 mutex_lock(&genpd->lock); 111 /* 112 * Wait for the domain to transition into either the active, 113 * or the power off state. 114 */ 115 for (;;) { 116 prepare_to_wait(&genpd->status_wait_queue, &wait, 117 TASK_UNINTERRUPTIBLE); 118 if (genpd->status == GPD_STATE_ACTIVE 119 || genpd->status == GPD_STATE_POWER_OFF) 120 break; 121 mutex_unlock(&genpd->lock); 122 123 schedule(); 124 125 mutex_lock(&genpd->lock); 126 } 127 finish_wait(&genpd->status_wait_queue, &wait); 128 } 129 130 static void genpd_release_lock(struct generic_pm_domain *genpd) 131 { 132 mutex_unlock(&genpd->lock); 133 } 134 135 static void genpd_set_active(struct generic_pm_domain *genpd) 136 { 137 if (genpd->resume_count == 0) 138 genpd->status = GPD_STATE_ACTIVE; 139 } 140 141 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 142 { 143 s64 usecs64; 144 145 if (!genpd->cpuidle_data) 146 return; 147 148 usecs64 = genpd->power_on_latency_ns; 149 do_div(usecs64, NSEC_PER_USEC); 150 usecs64 += genpd->cpuidle_data->saved_exit_latency; 151 genpd->cpuidle_data->idle_state->exit_latency = usecs64; 152 } 153 154 /** 155 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 156 * @genpd: PM domain to power up. 157 * 158 * Restore power to @genpd and all of its masters so that it is possible to 159 * resume a device belonging to it. 160 */ 161 static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 162 __releases(&genpd->lock) __acquires(&genpd->lock) 163 { 164 struct gpd_link *link; 165 DEFINE_WAIT(wait); 166 int ret = 0; 167 168 /* If the domain's master is being waited for, we have to wait too. */ 169 for (;;) { 170 prepare_to_wait(&genpd->status_wait_queue, &wait, 171 TASK_UNINTERRUPTIBLE); 172 if (genpd->status != GPD_STATE_WAIT_MASTER) 173 break; 174 mutex_unlock(&genpd->lock); 175 176 schedule(); 177 178 mutex_lock(&genpd->lock); 179 } 180 finish_wait(&genpd->status_wait_queue, &wait); 181 182 if (genpd->status == GPD_STATE_ACTIVE 183 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 184 return 0; 185 186 if (genpd->status != GPD_STATE_POWER_OFF) { 187 genpd_set_active(genpd); 188 return 0; 189 } 190 191 if (genpd->cpuidle_data) { 192 cpuidle_pause_and_lock(); 193 genpd->cpuidle_data->idle_state->disabled = true; 194 cpuidle_resume_and_unlock(); 195 goto out; 196 } 197 198 /* 199 * The list is guaranteed not to change while the loop below is being 200 * executed, unless one of the masters' .power_on() callbacks fiddles 201 * with it. 202 */ 203 list_for_each_entry(link, &genpd->slave_links, slave_node) { 204 genpd_sd_counter_inc(link->master); 205 genpd->status = GPD_STATE_WAIT_MASTER; 206 207 mutex_unlock(&genpd->lock); 208 209 ret = pm_genpd_poweron(link->master); 210 211 mutex_lock(&genpd->lock); 212 213 /* 214 * The "wait for parent" status is guaranteed not to change 215 * while the master is powering on. 216 */ 217 genpd->status = GPD_STATE_POWER_OFF; 218 wake_up_all(&genpd->status_wait_queue); 219 if (ret) { 220 genpd_sd_counter_dec(link->master); 221 goto err; 222 } 223 } 224 225 if (genpd->power_on) { 226 ktime_t time_start = ktime_get(); 227 s64 elapsed_ns; 228 229 ret = genpd->power_on(genpd); 230 if (ret) 231 goto err; 232 233 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 234 if (elapsed_ns > genpd->power_on_latency_ns) { 235 genpd->power_on_latency_ns = elapsed_ns; 236 genpd->max_off_time_changed = true; 237 genpd_recalc_cpu_exit_latency(genpd); 238 if (genpd->name) 239 pr_warning("%s: Power-on latency exceeded, " 240 "new value %lld ns\n", genpd->name, 241 elapsed_ns); 242 } 243 } 244 245 out: 246 genpd_set_active(genpd); 247 248 return 0; 249 250 err: 251 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 252 genpd_sd_counter_dec(link->master); 253 254 return ret; 255 } 256 257 /** 258 * pm_genpd_poweron - Restore power to a given PM domain and its masters. 259 * @genpd: PM domain to power up. 260 */ 261 int pm_genpd_poweron(struct generic_pm_domain *genpd) 262 { 263 int ret; 264 265 mutex_lock(&genpd->lock); 266 ret = __pm_genpd_poweron(genpd); 267 mutex_unlock(&genpd->lock); 268 return ret; 269 } 270 271 /** 272 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. 273 * @domain_name: Name of the PM domain to power up. 274 */ 275 int pm_genpd_name_poweron(const char *domain_name) 276 { 277 struct generic_pm_domain *genpd; 278 279 genpd = pm_genpd_lookup_name(domain_name); 280 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 281 } 282 283 #ifdef CONFIG_PM_RUNTIME 284 285 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 286 struct device *dev) 287 { 288 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 289 } 290 291 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 292 { 293 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 294 save_state_latency_ns, "state save"); 295 } 296 297 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) 298 { 299 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 300 restore_state_latency_ns, 301 "state restore"); 302 } 303 304 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 305 unsigned long val, void *ptr) 306 { 307 struct generic_pm_domain_data *gpd_data; 308 struct device *dev; 309 310 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 311 312 mutex_lock(&gpd_data->lock); 313 dev = gpd_data->base.dev; 314 if (!dev) { 315 mutex_unlock(&gpd_data->lock); 316 return NOTIFY_DONE; 317 } 318 mutex_unlock(&gpd_data->lock); 319 320 for (;;) { 321 struct generic_pm_domain *genpd; 322 struct pm_domain_data *pdd; 323 324 spin_lock_irq(&dev->power.lock); 325 326 pdd = dev->power.subsys_data ? 327 dev->power.subsys_data->domain_data : NULL; 328 if (pdd && pdd->dev) { 329 to_gpd_data(pdd)->td.constraint_changed = true; 330 genpd = dev_to_genpd(dev); 331 } else { 332 genpd = ERR_PTR(-ENODATA); 333 } 334 335 spin_unlock_irq(&dev->power.lock); 336 337 if (!IS_ERR(genpd)) { 338 mutex_lock(&genpd->lock); 339 genpd->max_off_time_changed = true; 340 mutex_unlock(&genpd->lock); 341 } 342 343 dev = dev->parent; 344 if (!dev || dev->power.ignore_children) 345 break; 346 } 347 348 return NOTIFY_DONE; 349 } 350 351 /** 352 * __pm_genpd_save_device - Save the pre-suspend state of a device. 353 * @pdd: Domain data of the device to save the state of. 354 * @genpd: PM domain the device belongs to. 355 */ 356 static int __pm_genpd_save_device(struct pm_domain_data *pdd, 357 struct generic_pm_domain *genpd) 358 __releases(&genpd->lock) __acquires(&genpd->lock) 359 { 360 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 361 struct device *dev = pdd->dev; 362 int ret = 0; 363 364 if (gpd_data->need_restore > 0) 365 return 0; 366 367 /* 368 * If the value of the need_restore flag is still unknown at this point, 369 * we trust that pm_genpd_poweroff() has verified that the device is 370 * already runtime PM suspended. 371 */ 372 if (gpd_data->need_restore < 0) { 373 gpd_data->need_restore = 1; 374 return 0; 375 } 376 377 mutex_unlock(&genpd->lock); 378 379 genpd_start_dev(genpd, dev); 380 ret = genpd_save_dev(genpd, dev); 381 genpd_stop_dev(genpd, dev); 382 383 mutex_lock(&genpd->lock); 384 385 if (!ret) 386 gpd_data->need_restore = 1; 387 388 return ret; 389 } 390 391 /** 392 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 393 * @pdd: Domain data of the device to restore the state of. 394 * @genpd: PM domain the device belongs to. 395 */ 396 static void __pm_genpd_restore_device(struct pm_domain_data *pdd, 397 struct generic_pm_domain *genpd) 398 __releases(&genpd->lock) __acquires(&genpd->lock) 399 { 400 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 401 struct device *dev = pdd->dev; 402 int need_restore = gpd_data->need_restore; 403 404 gpd_data->need_restore = 0; 405 mutex_unlock(&genpd->lock); 406 407 genpd_start_dev(genpd, dev); 408 409 /* 410 * Call genpd_restore_dev() for recently added devices too (need_restore 411 * is negative then). 412 */ 413 if (need_restore) 414 genpd_restore_dev(genpd, dev); 415 416 mutex_lock(&genpd->lock); 417 } 418 419 /** 420 * genpd_abort_poweroff - Check if a PM domain power off should be aborted. 421 * @genpd: PM domain to check. 422 * 423 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during 424 * a "power off" operation, which means that a "power on" has occured in the 425 * meantime, or if its resume_count field is different from zero, which means 426 * that one of its devices has been resumed in the meantime. 427 */ 428 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 429 { 430 return genpd->status == GPD_STATE_WAIT_MASTER 431 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 432 } 433 434 /** 435 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 436 * @genpd: PM domait to power off. 437 * 438 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 439 * before. 440 */ 441 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 442 { 443 queue_work(pm_wq, &genpd->power_off_work); 444 } 445 446 /** 447 * pm_genpd_poweroff - Remove power from a given PM domain. 448 * @genpd: PM domain to power down. 449 * 450 * If all of the @genpd's devices have been suspended and all of its subdomains 451 * have been powered down, run the runtime suspend callbacks provided by all of 452 * the @genpd's devices' drivers and remove power from @genpd. 453 */ 454 static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 455 __releases(&genpd->lock) __acquires(&genpd->lock) 456 { 457 struct pm_domain_data *pdd; 458 struct gpd_link *link; 459 unsigned int not_suspended; 460 int ret = 0; 461 462 start: 463 /* 464 * Do not try to power off the domain in the following situations: 465 * (1) The domain is already in the "power off" state. 466 * (2) The domain is waiting for its master to power up. 467 * (3) One of the domain's devices is being resumed right now. 468 * (4) System suspend is in progress. 469 */ 470 if (genpd->status == GPD_STATE_POWER_OFF 471 || genpd->status == GPD_STATE_WAIT_MASTER 472 || genpd->resume_count > 0 || genpd->prepared_count > 0) 473 return 0; 474 475 if (atomic_read(&genpd->sd_count) > 0) 476 return -EBUSY; 477 478 not_suspended = 0; 479 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 480 enum pm_qos_flags_status stat; 481 482 stat = dev_pm_qos_flags(pdd->dev, 483 PM_QOS_FLAG_NO_POWER_OFF 484 | PM_QOS_FLAG_REMOTE_WAKEUP); 485 if (stat > PM_QOS_FLAGS_NONE) 486 return -EBUSY; 487 488 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 489 || pdd->dev->power.irq_safe)) 490 not_suspended++; 491 } 492 493 if (not_suspended > genpd->in_progress) 494 return -EBUSY; 495 496 if (genpd->poweroff_task) { 497 /* 498 * Another instance of pm_genpd_poweroff() is executing 499 * callbacks, so tell it to start over and return. 500 */ 501 genpd->status = GPD_STATE_REPEAT; 502 return 0; 503 } 504 505 if (genpd->gov && genpd->gov->power_down_ok) { 506 if (!genpd->gov->power_down_ok(&genpd->domain)) 507 return -EAGAIN; 508 } 509 510 genpd->status = GPD_STATE_BUSY; 511 genpd->poweroff_task = current; 512 513 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { 514 ret = atomic_read(&genpd->sd_count) == 0 ? 515 __pm_genpd_save_device(pdd, genpd) : -EBUSY; 516 517 if (genpd_abort_poweroff(genpd)) 518 goto out; 519 520 if (ret) { 521 genpd_set_active(genpd); 522 goto out; 523 } 524 525 if (genpd->status == GPD_STATE_REPEAT) { 526 genpd->poweroff_task = NULL; 527 goto start; 528 } 529 } 530 531 if (genpd->cpuidle_data) { 532 /* 533 * If cpuidle_data is set, cpuidle should turn the domain off 534 * when the CPU in it is idle. In that case we don't decrement 535 * the subdomain counts of the master domains, so that power is 536 * not removed from the current domain prematurely as a result 537 * of cutting off the masters' power. 538 */ 539 genpd->status = GPD_STATE_POWER_OFF; 540 cpuidle_pause_and_lock(); 541 genpd->cpuidle_data->idle_state->disabled = false; 542 cpuidle_resume_and_unlock(); 543 goto out; 544 } 545 546 if (genpd->power_off) { 547 ktime_t time_start; 548 s64 elapsed_ns; 549 550 if (atomic_read(&genpd->sd_count) > 0) { 551 ret = -EBUSY; 552 goto out; 553 } 554 555 time_start = ktime_get(); 556 557 /* 558 * If sd_count > 0 at this point, one of the subdomains hasn't 559 * managed to call pm_genpd_poweron() for the master yet after 560 * incrementing it. In that case pm_genpd_poweron() will wait 561 * for us to drop the lock, so we can call .power_off() and let 562 * the pm_genpd_poweron() restore power for us (this shouldn't 563 * happen very often). 564 */ 565 ret = genpd->power_off(genpd); 566 if (ret == -EBUSY) { 567 genpd_set_active(genpd); 568 goto out; 569 } 570 571 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 572 if (elapsed_ns > genpd->power_off_latency_ns) { 573 genpd->power_off_latency_ns = elapsed_ns; 574 genpd->max_off_time_changed = true; 575 if (genpd->name) 576 pr_warning("%s: Power-off latency exceeded, " 577 "new value %lld ns\n", genpd->name, 578 elapsed_ns); 579 } 580 } 581 582 genpd->status = GPD_STATE_POWER_OFF; 583 584 list_for_each_entry(link, &genpd->slave_links, slave_node) { 585 genpd_sd_counter_dec(link->master); 586 genpd_queue_power_off_work(link->master); 587 } 588 589 out: 590 genpd->poweroff_task = NULL; 591 wake_up_all(&genpd->status_wait_queue); 592 return ret; 593 } 594 595 /** 596 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 597 * @work: Work structure used for scheduling the execution of this function. 598 */ 599 static void genpd_power_off_work_fn(struct work_struct *work) 600 { 601 struct generic_pm_domain *genpd; 602 603 genpd = container_of(work, struct generic_pm_domain, power_off_work); 604 605 genpd_acquire_lock(genpd); 606 pm_genpd_poweroff(genpd); 607 genpd_release_lock(genpd); 608 } 609 610 /** 611 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 612 * @dev: Device to suspend. 613 * 614 * Carry out a runtime suspend of a device under the assumption that its 615 * pm_domain field points to the domain member of an object of type 616 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 617 */ 618 static int pm_genpd_runtime_suspend(struct device *dev) 619 { 620 struct generic_pm_domain *genpd; 621 struct generic_pm_domain_data *gpd_data; 622 bool (*stop_ok)(struct device *__dev); 623 int ret; 624 625 dev_dbg(dev, "%s()\n", __func__); 626 627 genpd = dev_to_genpd(dev); 628 if (IS_ERR(genpd)) 629 return -EINVAL; 630 631 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 632 if (stop_ok && !stop_ok(dev)) 633 return -EBUSY; 634 635 ret = genpd_stop_dev(genpd, dev); 636 if (ret) 637 return ret; 638 639 /* 640 * If power.irq_safe is set, this routine will be run with interrupts 641 * off, so it can't use mutexes. 642 */ 643 if (dev->power.irq_safe) 644 return 0; 645 646 mutex_lock(&genpd->lock); 647 648 /* 649 * If we have an unknown state of the need_restore flag, it means none 650 * of the runtime PM callbacks has been invoked yet. Let's update the 651 * flag to reflect that the current state is active. 652 */ 653 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 654 if (gpd_data->need_restore < 0) 655 gpd_data->need_restore = 0; 656 657 genpd->in_progress++; 658 pm_genpd_poweroff(genpd); 659 genpd->in_progress--; 660 mutex_unlock(&genpd->lock); 661 662 return 0; 663 } 664 665 /** 666 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 667 * @dev: Device to resume. 668 * 669 * Carry out a runtime resume of a device under the assumption that its 670 * pm_domain field points to the domain member of an object of type 671 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 672 */ 673 static int pm_genpd_runtime_resume(struct device *dev) 674 { 675 struct generic_pm_domain *genpd; 676 DEFINE_WAIT(wait); 677 int ret; 678 679 dev_dbg(dev, "%s()\n", __func__); 680 681 genpd = dev_to_genpd(dev); 682 if (IS_ERR(genpd)) 683 return -EINVAL; 684 685 /* If power.irq_safe, the PM domain is never powered off. */ 686 if (dev->power.irq_safe) 687 return genpd_start_dev_no_timing(genpd, dev); 688 689 mutex_lock(&genpd->lock); 690 ret = __pm_genpd_poweron(genpd); 691 if (ret) { 692 mutex_unlock(&genpd->lock); 693 return ret; 694 } 695 genpd->status = GPD_STATE_BUSY; 696 genpd->resume_count++; 697 for (;;) { 698 prepare_to_wait(&genpd->status_wait_queue, &wait, 699 TASK_UNINTERRUPTIBLE); 700 /* 701 * If current is the powering off task, we have been called 702 * reentrantly from one of the device callbacks, so we should 703 * not wait. 704 */ 705 if (!genpd->poweroff_task || genpd->poweroff_task == current) 706 break; 707 mutex_unlock(&genpd->lock); 708 709 schedule(); 710 711 mutex_lock(&genpd->lock); 712 } 713 finish_wait(&genpd->status_wait_queue, &wait); 714 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); 715 genpd->resume_count--; 716 genpd_set_active(genpd); 717 wake_up_all(&genpd->status_wait_queue); 718 mutex_unlock(&genpd->lock); 719 720 return 0; 721 } 722 723 static bool pd_ignore_unused; 724 static int __init pd_ignore_unused_setup(char *__unused) 725 { 726 pd_ignore_unused = true; 727 return 1; 728 } 729 __setup("pd_ignore_unused", pd_ignore_unused_setup); 730 731 /** 732 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. 733 */ 734 void pm_genpd_poweroff_unused(void) 735 { 736 struct generic_pm_domain *genpd; 737 738 if (pd_ignore_unused) { 739 pr_warn("genpd: Not disabling unused power domains\n"); 740 return; 741 } 742 743 mutex_lock(&gpd_list_lock); 744 745 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 746 genpd_queue_power_off_work(genpd); 747 748 mutex_unlock(&gpd_list_lock); 749 } 750 751 static int __init genpd_poweroff_unused(void) 752 { 753 pm_genpd_poweroff_unused(); 754 return 0; 755 } 756 late_initcall(genpd_poweroff_unused); 757 758 #else 759 760 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 761 unsigned long val, void *ptr) 762 { 763 return NOTIFY_DONE; 764 } 765 766 static inline void 767 genpd_queue_power_off_work(struct generic_pm_domain *genpd) {} 768 769 static inline void genpd_power_off_work_fn(struct work_struct *work) {} 770 771 #define pm_genpd_runtime_suspend NULL 772 #define pm_genpd_runtime_resume NULL 773 774 #endif /* CONFIG_PM_RUNTIME */ 775 776 #ifdef CONFIG_PM_SLEEP 777 778 /** 779 * pm_genpd_present - Check if the given PM domain has been initialized. 780 * @genpd: PM domain to check. 781 */ 782 static bool pm_genpd_present(struct generic_pm_domain *genpd) 783 { 784 struct generic_pm_domain *gpd; 785 786 if (IS_ERR_OR_NULL(genpd)) 787 return false; 788 789 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 790 if (gpd == genpd) 791 return true; 792 793 return false; 794 } 795 796 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 797 struct device *dev) 798 { 799 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 800 } 801 802 /** 803 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 804 * @genpd: PM domain to power off, if possible. 805 * 806 * Check if the given PM domain can be powered off (during system suspend or 807 * hibernation) and do that if so. Also, in that case propagate to its masters. 808 * 809 * This function is only called in "noirq" and "syscore" stages of system power 810 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 811 * executed sequentially, so it is guaranteed that it will never run twice in 812 * parallel). 813 */ 814 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 815 { 816 struct gpd_link *link; 817 818 if (genpd->status == GPD_STATE_POWER_OFF) 819 return; 820 821 if (genpd->suspended_count != genpd->device_count 822 || atomic_read(&genpd->sd_count) > 0) 823 return; 824 825 if (genpd->power_off) 826 genpd->power_off(genpd); 827 828 genpd->status = GPD_STATE_POWER_OFF; 829 830 list_for_each_entry(link, &genpd->slave_links, slave_node) { 831 genpd_sd_counter_dec(link->master); 832 pm_genpd_sync_poweroff(link->master); 833 } 834 } 835 836 /** 837 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 838 * @genpd: PM domain to power on. 839 * 840 * This function is only called in "noirq" and "syscore" stages of system power 841 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 842 * executed sequentially, so it is guaranteed that it will never run twice in 843 * parallel). 844 */ 845 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) 846 { 847 struct gpd_link *link; 848 849 if (genpd->status != GPD_STATE_POWER_OFF) 850 return; 851 852 list_for_each_entry(link, &genpd->slave_links, slave_node) { 853 pm_genpd_sync_poweron(link->master); 854 genpd_sd_counter_inc(link->master); 855 } 856 857 if (genpd->power_on) 858 genpd->power_on(genpd); 859 860 genpd->status = GPD_STATE_ACTIVE; 861 } 862 863 /** 864 * resume_needed - Check whether to resume a device before system suspend. 865 * @dev: Device to check. 866 * @genpd: PM domain the device belongs to. 867 * 868 * There are two cases in which a device that can wake up the system from sleep 869 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 870 * to wake up the system and it has to remain active for this purpose while the 871 * system is in the sleep state and (2) if the device is not enabled to wake up 872 * the system from sleep states and it generally doesn't generate wakeup signals 873 * by itself (those signals are generated on its behalf by other parts of the 874 * system). In the latter case it may be necessary to reconfigure the device's 875 * wakeup settings during system suspend, because it may have been set up to 876 * signal remote wakeup from the system's working state as needed by runtime PM. 877 * Return 'true' in either of the above cases. 878 */ 879 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 880 { 881 bool active_wakeup; 882 883 if (!device_can_wakeup(dev)) 884 return false; 885 886 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 887 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 888 } 889 890 /** 891 * pm_genpd_prepare - Start power transition of a device in a PM domain. 892 * @dev: Device to start the transition of. 893 * 894 * Start a power transition of a device (during a system-wide power transition) 895 * under the assumption that its pm_domain field points to the domain member of 896 * an object of type struct generic_pm_domain representing a PM domain 897 * consisting of I/O devices. 898 */ 899 static int pm_genpd_prepare(struct device *dev) 900 { 901 struct generic_pm_domain *genpd; 902 int ret; 903 904 dev_dbg(dev, "%s()\n", __func__); 905 906 genpd = dev_to_genpd(dev); 907 if (IS_ERR(genpd)) 908 return -EINVAL; 909 910 /* 911 * If a wakeup request is pending for the device, it should be woken up 912 * at this point and a system wakeup event should be reported if it's 913 * set up to wake up the system from sleep states. 914 */ 915 pm_runtime_get_noresume(dev); 916 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 917 pm_wakeup_event(dev, 0); 918 919 if (pm_wakeup_pending()) { 920 pm_runtime_put(dev); 921 return -EBUSY; 922 } 923 924 if (resume_needed(dev, genpd)) 925 pm_runtime_resume(dev); 926 927 genpd_acquire_lock(genpd); 928 929 if (genpd->prepared_count++ == 0) { 930 genpd->suspended_count = 0; 931 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 932 } 933 934 genpd_release_lock(genpd); 935 936 if (genpd->suspend_power_off) { 937 pm_runtime_put_noidle(dev); 938 return 0; 939 } 940 941 /* 942 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 943 * so pm_genpd_poweron() will return immediately, but if the device 944 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 945 * to make it operational. 946 */ 947 pm_runtime_resume(dev); 948 __pm_runtime_disable(dev, false); 949 950 ret = pm_generic_prepare(dev); 951 if (ret) { 952 mutex_lock(&genpd->lock); 953 954 if (--genpd->prepared_count == 0) 955 genpd->suspend_power_off = false; 956 957 mutex_unlock(&genpd->lock); 958 pm_runtime_enable(dev); 959 } 960 961 pm_runtime_put(dev); 962 return ret; 963 } 964 965 /** 966 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 967 * @dev: Device to suspend. 968 * 969 * Suspend a device under the assumption that its pm_domain field points to the 970 * domain member of an object of type struct generic_pm_domain representing 971 * a PM domain consisting of I/O devices. 972 */ 973 static int pm_genpd_suspend(struct device *dev) 974 { 975 struct generic_pm_domain *genpd; 976 977 dev_dbg(dev, "%s()\n", __func__); 978 979 genpd = dev_to_genpd(dev); 980 if (IS_ERR(genpd)) 981 return -EINVAL; 982 983 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 984 } 985 986 /** 987 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 988 * @dev: Device to suspend. 989 * 990 * Carry out a late suspend of a device under the assumption that its 991 * pm_domain field points to the domain member of an object of type 992 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 993 */ 994 static int pm_genpd_suspend_late(struct device *dev) 995 { 996 struct generic_pm_domain *genpd; 997 998 dev_dbg(dev, "%s()\n", __func__); 999 1000 genpd = dev_to_genpd(dev); 1001 if (IS_ERR(genpd)) 1002 return -EINVAL; 1003 1004 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 1005 } 1006 1007 /** 1008 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1009 * @dev: Device to suspend. 1010 * 1011 * Stop the device and remove power from the domain if all devices in it have 1012 * been stopped. 1013 */ 1014 static int pm_genpd_suspend_noirq(struct device *dev) 1015 { 1016 struct generic_pm_domain *genpd; 1017 1018 dev_dbg(dev, "%s()\n", __func__); 1019 1020 genpd = dev_to_genpd(dev); 1021 if (IS_ERR(genpd)) 1022 return -EINVAL; 1023 1024 if (genpd->suspend_power_off 1025 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1026 return 0; 1027 1028 genpd_stop_dev(genpd, dev); 1029 1030 /* 1031 * Since all of the "noirq" callbacks are executed sequentially, it is 1032 * guaranteed that this function will never run twice in parallel for 1033 * the same PM domain, so it is not necessary to use locking here. 1034 */ 1035 genpd->suspended_count++; 1036 pm_genpd_sync_poweroff(genpd); 1037 1038 return 0; 1039 } 1040 1041 /** 1042 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1043 * @dev: Device to resume. 1044 * 1045 * Restore power to the device's PM domain, if necessary, and start the device. 1046 */ 1047 static int pm_genpd_resume_noirq(struct device *dev) 1048 { 1049 struct generic_pm_domain *genpd; 1050 1051 dev_dbg(dev, "%s()\n", __func__); 1052 1053 genpd = dev_to_genpd(dev); 1054 if (IS_ERR(genpd)) 1055 return -EINVAL; 1056 1057 if (genpd->suspend_power_off 1058 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1059 return 0; 1060 1061 /* 1062 * Since all of the "noirq" callbacks are executed sequentially, it is 1063 * guaranteed that this function will never run twice in parallel for 1064 * the same PM domain, so it is not necessary to use locking here. 1065 */ 1066 pm_genpd_sync_poweron(genpd); 1067 genpd->suspended_count--; 1068 1069 return genpd_start_dev(genpd, dev); 1070 } 1071 1072 /** 1073 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 1074 * @dev: Device to resume. 1075 * 1076 * Carry out an early resume of a device under the assumption that its 1077 * pm_domain field points to the domain member of an object of type 1078 * struct generic_pm_domain representing a power domain consisting of I/O 1079 * devices. 1080 */ 1081 static int pm_genpd_resume_early(struct device *dev) 1082 { 1083 struct generic_pm_domain *genpd; 1084 1085 dev_dbg(dev, "%s()\n", __func__); 1086 1087 genpd = dev_to_genpd(dev); 1088 if (IS_ERR(genpd)) 1089 return -EINVAL; 1090 1091 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 1092 } 1093 1094 /** 1095 * pm_genpd_resume - Resume of device in an I/O PM domain. 1096 * @dev: Device to resume. 1097 * 1098 * Resume a device under the assumption that its pm_domain field points to the 1099 * domain member of an object of type struct generic_pm_domain representing 1100 * a power domain consisting of I/O devices. 1101 */ 1102 static int pm_genpd_resume(struct device *dev) 1103 { 1104 struct generic_pm_domain *genpd; 1105 1106 dev_dbg(dev, "%s()\n", __func__); 1107 1108 genpd = dev_to_genpd(dev); 1109 if (IS_ERR(genpd)) 1110 return -EINVAL; 1111 1112 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 1113 } 1114 1115 /** 1116 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 1117 * @dev: Device to freeze. 1118 * 1119 * Freeze a device under the assumption that its pm_domain field points to the 1120 * domain member of an object of type struct generic_pm_domain representing 1121 * a power domain consisting of I/O devices. 1122 */ 1123 static int pm_genpd_freeze(struct device *dev) 1124 { 1125 struct generic_pm_domain *genpd; 1126 1127 dev_dbg(dev, "%s()\n", __func__); 1128 1129 genpd = dev_to_genpd(dev); 1130 if (IS_ERR(genpd)) 1131 return -EINVAL; 1132 1133 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 1134 } 1135 1136 /** 1137 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 1138 * @dev: Device to freeze. 1139 * 1140 * Carry out a late freeze of a device under the assumption that its 1141 * pm_domain field points to the domain member of an object of type 1142 * struct generic_pm_domain representing a power domain consisting of I/O 1143 * devices. 1144 */ 1145 static int pm_genpd_freeze_late(struct device *dev) 1146 { 1147 struct generic_pm_domain *genpd; 1148 1149 dev_dbg(dev, "%s()\n", __func__); 1150 1151 genpd = dev_to_genpd(dev); 1152 if (IS_ERR(genpd)) 1153 return -EINVAL; 1154 1155 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 1156 } 1157 1158 /** 1159 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1160 * @dev: Device to freeze. 1161 * 1162 * Carry out a late freeze of a device under the assumption that its 1163 * pm_domain field points to the domain member of an object of type 1164 * struct generic_pm_domain representing a power domain consisting of I/O 1165 * devices. 1166 */ 1167 static int pm_genpd_freeze_noirq(struct device *dev) 1168 { 1169 struct generic_pm_domain *genpd; 1170 1171 dev_dbg(dev, "%s()\n", __func__); 1172 1173 genpd = dev_to_genpd(dev); 1174 if (IS_ERR(genpd)) 1175 return -EINVAL; 1176 1177 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 1178 } 1179 1180 /** 1181 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1182 * @dev: Device to thaw. 1183 * 1184 * Start the device, unless power has been removed from the domain already 1185 * before the system transition. 1186 */ 1187 static int pm_genpd_thaw_noirq(struct device *dev) 1188 { 1189 struct generic_pm_domain *genpd; 1190 1191 dev_dbg(dev, "%s()\n", __func__); 1192 1193 genpd = dev_to_genpd(dev); 1194 if (IS_ERR(genpd)) 1195 return -EINVAL; 1196 1197 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); 1198 } 1199 1200 /** 1201 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1202 * @dev: Device to thaw. 1203 * 1204 * Carry out an early thaw of a device under the assumption that its 1205 * pm_domain field points to the domain member of an object of type 1206 * struct generic_pm_domain representing a power domain consisting of I/O 1207 * devices. 1208 */ 1209 static int pm_genpd_thaw_early(struct device *dev) 1210 { 1211 struct generic_pm_domain *genpd; 1212 1213 dev_dbg(dev, "%s()\n", __func__); 1214 1215 genpd = dev_to_genpd(dev); 1216 if (IS_ERR(genpd)) 1217 return -EINVAL; 1218 1219 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 1220 } 1221 1222 /** 1223 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1224 * @dev: Device to thaw. 1225 * 1226 * Thaw a device under the assumption that its pm_domain field points to the 1227 * domain member of an object of type struct generic_pm_domain representing 1228 * a power domain consisting of I/O devices. 1229 */ 1230 static int pm_genpd_thaw(struct device *dev) 1231 { 1232 struct generic_pm_domain *genpd; 1233 1234 dev_dbg(dev, "%s()\n", __func__); 1235 1236 genpd = dev_to_genpd(dev); 1237 if (IS_ERR(genpd)) 1238 return -EINVAL; 1239 1240 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1241 } 1242 1243 /** 1244 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1245 * @dev: Device to resume. 1246 * 1247 * Make sure the domain will be in the same power state as before the 1248 * hibernation the system is resuming from and start the device if necessary. 1249 */ 1250 static int pm_genpd_restore_noirq(struct device *dev) 1251 { 1252 struct generic_pm_domain *genpd; 1253 1254 dev_dbg(dev, "%s()\n", __func__); 1255 1256 genpd = dev_to_genpd(dev); 1257 if (IS_ERR(genpd)) 1258 return -EINVAL; 1259 1260 /* 1261 * Since all of the "noirq" callbacks are executed sequentially, it is 1262 * guaranteed that this function will never run twice in parallel for 1263 * the same PM domain, so it is not necessary to use locking here. 1264 * 1265 * At this point suspended_count == 0 means we are being run for the 1266 * first time for the given domain in the present cycle. 1267 */ 1268 if (genpd->suspended_count++ == 0) { 1269 /* 1270 * The boot kernel might put the domain into arbitrary state, 1271 * so make it appear as powered off to pm_genpd_sync_poweron(), 1272 * so that it tries to power it on in case it was really off. 1273 */ 1274 genpd->status = GPD_STATE_POWER_OFF; 1275 if (genpd->suspend_power_off) { 1276 /* 1277 * If the domain was off before the hibernation, make 1278 * sure it will be off going forward. 1279 */ 1280 if (genpd->power_off) 1281 genpd->power_off(genpd); 1282 1283 return 0; 1284 } 1285 } 1286 1287 if (genpd->suspend_power_off) 1288 return 0; 1289 1290 pm_genpd_sync_poweron(genpd); 1291 1292 return genpd_start_dev(genpd, dev); 1293 } 1294 1295 /** 1296 * pm_genpd_complete - Complete power transition of a device in a power domain. 1297 * @dev: Device to complete the transition of. 1298 * 1299 * Complete a power transition of a device (during a system-wide power 1300 * transition) under the assumption that its pm_domain field points to the 1301 * domain member of an object of type struct generic_pm_domain representing 1302 * a power domain consisting of I/O devices. 1303 */ 1304 static void pm_genpd_complete(struct device *dev) 1305 { 1306 struct generic_pm_domain *genpd; 1307 bool run_complete; 1308 1309 dev_dbg(dev, "%s()\n", __func__); 1310 1311 genpd = dev_to_genpd(dev); 1312 if (IS_ERR(genpd)) 1313 return; 1314 1315 mutex_lock(&genpd->lock); 1316 1317 run_complete = !genpd->suspend_power_off; 1318 if (--genpd->prepared_count == 0) 1319 genpd->suspend_power_off = false; 1320 1321 mutex_unlock(&genpd->lock); 1322 1323 if (run_complete) { 1324 pm_generic_complete(dev); 1325 pm_runtime_set_active(dev); 1326 pm_runtime_enable(dev); 1327 pm_request_idle(dev); 1328 } 1329 } 1330 1331 /** 1332 * genpd_syscore_switch - Switch power during system core suspend or resume. 1333 * @dev: Device that normally is marked as "always on" to switch power for. 1334 * 1335 * This routine may only be called during the system core (syscore) suspend or 1336 * resume phase for devices whose "always on" flags are set. 1337 */ 1338 static void genpd_syscore_switch(struct device *dev, bool suspend) 1339 { 1340 struct generic_pm_domain *genpd; 1341 1342 genpd = dev_to_genpd(dev); 1343 if (!pm_genpd_present(genpd)) 1344 return; 1345 1346 if (suspend) { 1347 genpd->suspended_count++; 1348 pm_genpd_sync_poweroff(genpd); 1349 } else { 1350 pm_genpd_sync_poweron(genpd); 1351 genpd->suspended_count--; 1352 } 1353 } 1354 1355 void pm_genpd_syscore_poweroff(struct device *dev) 1356 { 1357 genpd_syscore_switch(dev, true); 1358 } 1359 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1360 1361 void pm_genpd_syscore_poweron(struct device *dev) 1362 { 1363 genpd_syscore_switch(dev, false); 1364 } 1365 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1366 1367 #else 1368 1369 #define pm_genpd_prepare NULL 1370 #define pm_genpd_suspend NULL 1371 #define pm_genpd_suspend_late NULL 1372 #define pm_genpd_suspend_noirq NULL 1373 #define pm_genpd_resume_early NULL 1374 #define pm_genpd_resume_noirq NULL 1375 #define pm_genpd_resume NULL 1376 #define pm_genpd_freeze NULL 1377 #define pm_genpd_freeze_late NULL 1378 #define pm_genpd_freeze_noirq NULL 1379 #define pm_genpd_thaw_early NULL 1380 #define pm_genpd_thaw_noirq NULL 1381 #define pm_genpd_thaw NULL 1382 #define pm_genpd_restore_noirq NULL 1383 #define pm_genpd_complete NULL 1384 1385 #endif /* CONFIG_PM_SLEEP */ 1386 1387 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) 1388 { 1389 struct generic_pm_domain_data *gpd_data; 1390 1391 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1392 if (!gpd_data) 1393 return NULL; 1394 1395 mutex_init(&gpd_data->lock); 1396 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1397 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1398 return gpd_data; 1399 } 1400 1401 static void __pm_genpd_free_dev_data(struct device *dev, 1402 struct generic_pm_domain_data *gpd_data) 1403 { 1404 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1405 kfree(gpd_data); 1406 } 1407 1408 /** 1409 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1410 * @genpd: PM domain to add the device to. 1411 * @dev: Device to be added. 1412 * @td: Set of PM QoS timing parameters to attach to the device. 1413 */ 1414 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1415 struct gpd_timing_data *td) 1416 { 1417 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1418 struct pm_domain_data *pdd; 1419 int ret = 0; 1420 1421 dev_dbg(dev, "%s()\n", __func__); 1422 1423 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1424 return -EINVAL; 1425 1426 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1427 if (!gpd_data_new) 1428 return -ENOMEM; 1429 1430 genpd_acquire_lock(genpd); 1431 1432 if (genpd->prepared_count > 0) { 1433 ret = -EAGAIN; 1434 goto out; 1435 } 1436 1437 list_for_each_entry(pdd, &genpd->dev_list, list_node) 1438 if (pdd->dev == dev) { 1439 ret = -EINVAL; 1440 goto out; 1441 } 1442 1443 ret = dev_pm_get_subsys_data(dev); 1444 if (ret) 1445 goto out; 1446 1447 genpd->device_count++; 1448 genpd->max_off_time_changed = true; 1449 1450 spin_lock_irq(&dev->power.lock); 1451 1452 dev->pm_domain = &genpd->domain; 1453 if (dev->power.subsys_data->domain_data) { 1454 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1455 } else { 1456 gpd_data = gpd_data_new; 1457 dev->power.subsys_data->domain_data = &gpd_data->base; 1458 } 1459 gpd_data->refcount++; 1460 if (td) 1461 gpd_data->td = *td; 1462 1463 spin_unlock_irq(&dev->power.lock); 1464 1465 if (genpd->attach_dev) 1466 genpd->attach_dev(genpd, dev); 1467 1468 mutex_lock(&gpd_data->lock); 1469 gpd_data->base.dev = dev; 1470 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1471 gpd_data->need_restore = -1; 1472 gpd_data->td.constraint_changed = true; 1473 gpd_data->td.effective_constraint_ns = -1; 1474 mutex_unlock(&gpd_data->lock); 1475 1476 out: 1477 genpd_release_lock(genpd); 1478 1479 if (gpd_data != gpd_data_new) 1480 __pm_genpd_free_dev_data(dev, gpd_data_new); 1481 1482 return ret; 1483 } 1484 1485 /** 1486 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1487 * @domain_name: Name of the PM domain to add the device to. 1488 * @dev: Device to be added. 1489 * @td: Set of PM QoS timing parameters to attach to the device. 1490 */ 1491 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, 1492 struct gpd_timing_data *td) 1493 { 1494 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); 1495 } 1496 1497 /** 1498 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1499 * @genpd: PM domain to remove the device from. 1500 * @dev: Device to be removed. 1501 */ 1502 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1503 struct device *dev) 1504 { 1505 struct generic_pm_domain_data *gpd_data; 1506 struct pm_domain_data *pdd; 1507 bool remove = false; 1508 int ret = 0; 1509 1510 dev_dbg(dev, "%s()\n", __func__); 1511 1512 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev) 1513 || IS_ERR_OR_NULL(dev->pm_domain) 1514 || pd_to_genpd(dev->pm_domain) != genpd) 1515 return -EINVAL; 1516 1517 genpd_acquire_lock(genpd); 1518 1519 if (genpd->prepared_count > 0) { 1520 ret = -EAGAIN; 1521 goto out; 1522 } 1523 1524 genpd->device_count--; 1525 genpd->max_off_time_changed = true; 1526 1527 if (genpd->detach_dev) 1528 genpd->detach_dev(genpd, dev); 1529 1530 spin_lock_irq(&dev->power.lock); 1531 1532 dev->pm_domain = NULL; 1533 pdd = dev->power.subsys_data->domain_data; 1534 list_del_init(&pdd->list_node); 1535 gpd_data = to_gpd_data(pdd); 1536 if (--gpd_data->refcount == 0) { 1537 dev->power.subsys_data->domain_data = NULL; 1538 remove = true; 1539 } 1540 1541 spin_unlock_irq(&dev->power.lock); 1542 1543 mutex_lock(&gpd_data->lock); 1544 pdd->dev = NULL; 1545 mutex_unlock(&gpd_data->lock); 1546 1547 genpd_release_lock(genpd); 1548 1549 dev_pm_put_subsys_data(dev); 1550 if (remove) 1551 __pm_genpd_free_dev_data(dev, gpd_data); 1552 1553 return 0; 1554 1555 out: 1556 genpd_release_lock(genpd); 1557 1558 return ret; 1559 } 1560 1561 /** 1562 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. 1563 * @dev: Device to set/unset the flag for. 1564 * @val: The new value of the device's "need restore" flag. 1565 */ 1566 void pm_genpd_dev_need_restore(struct device *dev, bool val) 1567 { 1568 struct pm_subsys_data *psd; 1569 unsigned long flags; 1570 1571 spin_lock_irqsave(&dev->power.lock, flags); 1572 1573 psd = dev_to_psd(dev); 1574 if (psd && psd->domain_data) 1575 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0; 1576 1577 spin_unlock_irqrestore(&dev->power.lock, flags); 1578 } 1579 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); 1580 1581 /** 1582 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1583 * @genpd: Master PM domain to add the subdomain to. 1584 * @subdomain: Subdomain to be added. 1585 */ 1586 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1587 struct generic_pm_domain *subdomain) 1588 { 1589 struct gpd_link *link; 1590 int ret = 0; 1591 1592 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1593 || genpd == subdomain) 1594 return -EINVAL; 1595 1596 start: 1597 genpd_acquire_lock(genpd); 1598 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1599 1600 if (subdomain->status != GPD_STATE_POWER_OFF 1601 && subdomain->status != GPD_STATE_ACTIVE) { 1602 mutex_unlock(&subdomain->lock); 1603 genpd_release_lock(genpd); 1604 goto start; 1605 } 1606 1607 if (genpd->status == GPD_STATE_POWER_OFF 1608 && subdomain->status != GPD_STATE_POWER_OFF) { 1609 ret = -EINVAL; 1610 goto out; 1611 } 1612 1613 list_for_each_entry(link, &genpd->master_links, master_node) { 1614 if (link->slave == subdomain && link->master == genpd) { 1615 ret = -EINVAL; 1616 goto out; 1617 } 1618 } 1619 1620 link = kzalloc(sizeof(*link), GFP_KERNEL); 1621 if (!link) { 1622 ret = -ENOMEM; 1623 goto out; 1624 } 1625 link->master = genpd; 1626 list_add_tail(&link->master_node, &genpd->master_links); 1627 link->slave = subdomain; 1628 list_add_tail(&link->slave_node, &subdomain->slave_links); 1629 if (subdomain->status != GPD_STATE_POWER_OFF) 1630 genpd_sd_counter_inc(genpd); 1631 1632 out: 1633 mutex_unlock(&subdomain->lock); 1634 genpd_release_lock(genpd); 1635 1636 return ret; 1637 } 1638 1639 /** 1640 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. 1641 * @master_name: Name of the master PM domain to add the subdomain to. 1642 * @subdomain_name: Name of the subdomain to be added. 1643 */ 1644 int pm_genpd_add_subdomain_names(const char *master_name, 1645 const char *subdomain_name) 1646 { 1647 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; 1648 1649 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) 1650 return -EINVAL; 1651 1652 mutex_lock(&gpd_list_lock); 1653 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1654 if (!master && !strcmp(gpd->name, master_name)) 1655 master = gpd; 1656 1657 if (!subdomain && !strcmp(gpd->name, subdomain_name)) 1658 subdomain = gpd; 1659 1660 if (master && subdomain) 1661 break; 1662 } 1663 mutex_unlock(&gpd_list_lock); 1664 1665 return pm_genpd_add_subdomain(master, subdomain); 1666 } 1667 1668 /** 1669 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1670 * @genpd: Master PM domain to remove the subdomain from. 1671 * @subdomain: Subdomain to be removed. 1672 */ 1673 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1674 struct generic_pm_domain *subdomain) 1675 { 1676 struct gpd_link *link; 1677 int ret = -EINVAL; 1678 1679 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1680 return -EINVAL; 1681 1682 start: 1683 genpd_acquire_lock(genpd); 1684 1685 list_for_each_entry(link, &genpd->master_links, master_node) { 1686 if (link->slave != subdomain) 1687 continue; 1688 1689 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1690 1691 if (subdomain->status != GPD_STATE_POWER_OFF 1692 && subdomain->status != GPD_STATE_ACTIVE) { 1693 mutex_unlock(&subdomain->lock); 1694 genpd_release_lock(genpd); 1695 goto start; 1696 } 1697 1698 list_del(&link->master_node); 1699 list_del(&link->slave_node); 1700 kfree(link); 1701 if (subdomain->status != GPD_STATE_POWER_OFF) 1702 genpd_sd_counter_dec(genpd); 1703 1704 mutex_unlock(&subdomain->lock); 1705 1706 ret = 0; 1707 break; 1708 } 1709 1710 genpd_release_lock(genpd); 1711 1712 return ret; 1713 } 1714 1715 /** 1716 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1717 * @genpd: PM domain to be connected with cpuidle. 1718 * @state: cpuidle state this domain can disable/enable. 1719 * 1720 * Make a PM domain behave as though it contained a CPU core, that is, instead 1721 * of calling its power down routine it will enable the given cpuidle state so 1722 * that the cpuidle subsystem can power it down (if possible and desirable). 1723 */ 1724 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1725 { 1726 struct cpuidle_driver *cpuidle_drv; 1727 struct gpd_cpuidle_data *cpuidle_data; 1728 struct cpuidle_state *idle_state; 1729 int ret = 0; 1730 1731 if (IS_ERR_OR_NULL(genpd) || state < 0) 1732 return -EINVAL; 1733 1734 genpd_acquire_lock(genpd); 1735 1736 if (genpd->cpuidle_data) { 1737 ret = -EEXIST; 1738 goto out; 1739 } 1740 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); 1741 if (!cpuidle_data) { 1742 ret = -ENOMEM; 1743 goto out; 1744 } 1745 cpuidle_drv = cpuidle_driver_ref(); 1746 if (!cpuidle_drv) { 1747 ret = -ENODEV; 1748 goto err_drv; 1749 } 1750 if (cpuidle_drv->state_count <= state) { 1751 ret = -EINVAL; 1752 goto err; 1753 } 1754 idle_state = &cpuidle_drv->states[state]; 1755 if (!idle_state->disabled) { 1756 ret = -EAGAIN; 1757 goto err; 1758 } 1759 cpuidle_data->idle_state = idle_state; 1760 cpuidle_data->saved_exit_latency = idle_state->exit_latency; 1761 genpd->cpuidle_data = cpuidle_data; 1762 genpd_recalc_cpu_exit_latency(genpd); 1763 1764 out: 1765 genpd_release_lock(genpd); 1766 return ret; 1767 1768 err: 1769 cpuidle_driver_unref(); 1770 1771 err_drv: 1772 kfree(cpuidle_data); 1773 goto out; 1774 } 1775 1776 /** 1777 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. 1778 * @name: Name of the domain to connect to cpuidle. 1779 * @state: cpuidle state this domain can manipulate. 1780 */ 1781 int pm_genpd_name_attach_cpuidle(const char *name, int state) 1782 { 1783 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); 1784 } 1785 1786 /** 1787 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. 1788 * @genpd: PM domain to remove the cpuidle connection from. 1789 * 1790 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the 1791 * given PM domain. 1792 */ 1793 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1794 { 1795 struct gpd_cpuidle_data *cpuidle_data; 1796 struct cpuidle_state *idle_state; 1797 int ret = 0; 1798 1799 if (IS_ERR_OR_NULL(genpd)) 1800 return -EINVAL; 1801 1802 genpd_acquire_lock(genpd); 1803 1804 cpuidle_data = genpd->cpuidle_data; 1805 if (!cpuidle_data) { 1806 ret = -ENODEV; 1807 goto out; 1808 } 1809 idle_state = cpuidle_data->idle_state; 1810 if (!idle_state->disabled) { 1811 ret = -EAGAIN; 1812 goto out; 1813 } 1814 idle_state->exit_latency = cpuidle_data->saved_exit_latency; 1815 cpuidle_driver_unref(); 1816 genpd->cpuidle_data = NULL; 1817 kfree(cpuidle_data); 1818 1819 out: 1820 genpd_release_lock(genpd); 1821 return ret; 1822 } 1823 1824 /** 1825 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. 1826 * @name: Name of the domain to disconnect cpuidle from. 1827 */ 1828 int pm_genpd_name_detach_cpuidle(const char *name) 1829 { 1830 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); 1831 } 1832 1833 /* Default device callbacks for generic PM domains. */ 1834 1835 /** 1836 * pm_genpd_default_save_state - Default "save device state" for PM domains. 1837 * @dev: Device to handle. 1838 */ 1839 static int pm_genpd_default_save_state(struct device *dev) 1840 { 1841 int (*cb)(struct device *__dev); 1842 1843 if (dev->type && dev->type->pm) 1844 cb = dev->type->pm->runtime_suspend; 1845 else if (dev->class && dev->class->pm) 1846 cb = dev->class->pm->runtime_suspend; 1847 else if (dev->bus && dev->bus->pm) 1848 cb = dev->bus->pm->runtime_suspend; 1849 else 1850 cb = NULL; 1851 1852 if (!cb && dev->driver && dev->driver->pm) 1853 cb = dev->driver->pm->runtime_suspend; 1854 1855 return cb ? cb(dev) : 0; 1856 } 1857 1858 /** 1859 * pm_genpd_default_restore_state - Default PM domains "restore device state". 1860 * @dev: Device to handle. 1861 */ 1862 static int pm_genpd_default_restore_state(struct device *dev) 1863 { 1864 int (*cb)(struct device *__dev); 1865 1866 if (dev->type && dev->type->pm) 1867 cb = dev->type->pm->runtime_resume; 1868 else if (dev->class && dev->class->pm) 1869 cb = dev->class->pm->runtime_resume; 1870 else if (dev->bus && dev->bus->pm) 1871 cb = dev->bus->pm->runtime_resume; 1872 else 1873 cb = NULL; 1874 1875 if (!cb && dev->driver && dev->driver->pm) 1876 cb = dev->driver->pm->runtime_resume; 1877 1878 return cb ? cb(dev) : 0; 1879 } 1880 1881 /** 1882 * pm_genpd_init - Initialize a generic I/O PM domain object. 1883 * @genpd: PM domain object to initialize. 1884 * @gov: PM domain governor to associate with the domain (may be NULL). 1885 * @is_off: Initial value of the domain's power_is_off field. 1886 */ 1887 void pm_genpd_init(struct generic_pm_domain *genpd, 1888 struct dev_power_governor *gov, bool is_off) 1889 { 1890 if (IS_ERR_OR_NULL(genpd)) 1891 return; 1892 1893 INIT_LIST_HEAD(&genpd->master_links); 1894 INIT_LIST_HEAD(&genpd->slave_links); 1895 INIT_LIST_HEAD(&genpd->dev_list); 1896 mutex_init(&genpd->lock); 1897 genpd->gov = gov; 1898 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1899 genpd->in_progress = 0; 1900 atomic_set(&genpd->sd_count, 0); 1901 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1902 init_waitqueue_head(&genpd->status_wait_queue); 1903 genpd->poweroff_task = NULL; 1904 genpd->resume_count = 0; 1905 genpd->device_count = 0; 1906 genpd->max_off_time_ns = -1; 1907 genpd->max_off_time_changed = true; 1908 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1909 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1910 genpd->domain.ops.prepare = pm_genpd_prepare; 1911 genpd->domain.ops.suspend = pm_genpd_suspend; 1912 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1913 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1914 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1915 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1916 genpd->domain.ops.resume = pm_genpd_resume; 1917 genpd->domain.ops.freeze = pm_genpd_freeze; 1918 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1919 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1920 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1921 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1922 genpd->domain.ops.thaw = pm_genpd_thaw; 1923 genpd->domain.ops.poweroff = pm_genpd_suspend; 1924 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1925 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1926 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1927 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1928 genpd->domain.ops.restore = pm_genpd_resume; 1929 genpd->domain.ops.complete = pm_genpd_complete; 1930 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1931 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1932 mutex_lock(&gpd_list_lock); 1933 list_add(&genpd->gpd_list_node, &gpd_list); 1934 mutex_unlock(&gpd_list_lock); 1935 } 1936 1937 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1938 /* 1939 * Device Tree based PM domain providers. 1940 * 1941 * The code below implements generic device tree based PM domain providers that 1942 * bind device tree nodes with generic PM domains registered in the system. 1943 * 1944 * Any driver that registers generic PM domains and needs to support binding of 1945 * devices to these domains is supposed to register a PM domain provider, which 1946 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1947 * 1948 * Two simple mapping functions have been provided for convenience: 1949 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1950 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1951 * index. 1952 */ 1953 1954 /** 1955 * struct of_genpd_provider - PM domain provider registration structure 1956 * @link: Entry in global list of PM domain providers 1957 * @node: Pointer to device tree node of PM domain provider 1958 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1959 * into a PM domain. 1960 * @data: context pointer to be passed into @xlate callback 1961 */ 1962 struct of_genpd_provider { 1963 struct list_head link; 1964 struct device_node *node; 1965 genpd_xlate_t xlate; 1966 void *data; 1967 }; 1968 1969 /* List of registered PM domain providers. */ 1970 static LIST_HEAD(of_genpd_providers); 1971 /* Mutex to protect the list above. */ 1972 static DEFINE_MUTEX(of_genpd_mutex); 1973 1974 /** 1975 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1976 * @genpdspec: OF phandle args to map into a PM domain 1977 * @data: xlate function private data - pointer to struct generic_pm_domain 1978 * 1979 * This is a generic xlate function that can be used to model PM domains that 1980 * have their own device tree nodes. The private data of xlate function needs 1981 * to be a valid pointer to struct generic_pm_domain. 1982 */ 1983 struct generic_pm_domain *__of_genpd_xlate_simple( 1984 struct of_phandle_args *genpdspec, 1985 void *data) 1986 { 1987 if (genpdspec->args_count != 0) 1988 return ERR_PTR(-EINVAL); 1989 return data; 1990 } 1991 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 1992 1993 /** 1994 * __of_genpd_xlate_onecell() - Xlate function using a single index. 1995 * @genpdspec: OF phandle args to map into a PM domain 1996 * @data: xlate function private data - pointer to struct genpd_onecell_data 1997 * 1998 * This is a generic xlate function that can be used to model simple PM domain 1999 * controllers that have one device tree node and provide multiple PM domains. 2000 * A single cell is used as an index into an array of PM domains specified in 2001 * the genpd_onecell_data struct when registering the provider. 2002 */ 2003 struct generic_pm_domain *__of_genpd_xlate_onecell( 2004 struct of_phandle_args *genpdspec, 2005 void *data) 2006 { 2007 struct genpd_onecell_data *genpd_data = data; 2008 unsigned int idx = genpdspec->args[0]; 2009 2010 if (genpdspec->args_count != 1) 2011 return ERR_PTR(-EINVAL); 2012 2013 if (idx >= genpd_data->num_domains) { 2014 pr_err("%s: invalid domain index %u\n", __func__, idx); 2015 return ERR_PTR(-EINVAL); 2016 } 2017 2018 if (!genpd_data->domains[idx]) 2019 return ERR_PTR(-ENOENT); 2020 2021 return genpd_data->domains[idx]; 2022 } 2023 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 2024 2025 /** 2026 * __of_genpd_add_provider() - Register a PM domain provider for a node 2027 * @np: Device node pointer associated with the PM domain provider. 2028 * @xlate: Callback for decoding PM domain from phandle arguments. 2029 * @data: Context pointer for @xlate callback. 2030 */ 2031 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2032 void *data) 2033 { 2034 struct of_genpd_provider *cp; 2035 2036 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2037 if (!cp) 2038 return -ENOMEM; 2039 2040 cp->node = of_node_get(np); 2041 cp->data = data; 2042 cp->xlate = xlate; 2043 2044 mutex_lock(&of_genpd_mutex); 2045 list_add(&cp->link, &of_genpd_providers); 2046 mutex_unlock(&of_genpd_mutex); 2047 pr_debug("Added domain provider from %s\n", np->full_name); 2048 2049 return 0; 2050 } 2051 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 2052 2053 /** 2054 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2055 * @np: Device node pointer associated with the PM domain provider 2056 */ 2057 void of_genpd_del_provider(struct device_node *np) 2058 { 2059 struct of_genpd_provider *cp; 2060 2061 mutex_lock(&of_genpd_mutex); 2062 list_for_each_entry(cp, &of_genpd_providers, link) { 2063 if (cp->node == np) { 2064 list_del(&cp->link); 2065 of_node_put(cp->node); 2066 kfree(cp); 2067 break; 2068 } 2069 } 2070 mutex_unlock(&of_genpd_mutex); 2071 } 2072 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2073 2074 /** 2075 * of_genpd_get_from_provider() - Look-up PM domain 2076 * @genpdspec: OF phandle args to use for look-up 2077 * 2078 * Looks for a PM domain provider under the node specified by @genpdspec and if 2079 * found, uses xlate function of the provider to map phandle args to a PM 2080 * domain. 2081 * 2082 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2083 * on failure. 2084 */ 2085 static struct generic_pm_domain *of_genpd_get_from_provider( 2086 struct of_phandle_args *genpdspec) 2087 { 2088 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2089 struct of_genpd_provider *provider; 2090 2091 mutex_lock(&of_genpd_mutex); 2092 2093 /* Check if we have such a provider in our array */ 2094 list_for_each_entry(provider, &of_genpd_providers, link) { 2095 if (provider->node == genpdspec->np) 2096 genpd = provider->xlate(genpdspec, provider->data); 2097 if (!IS_ERR(genpd)) 2098 break; 2099 } 2100 2101 mutex_unlock(&of_genpd_mutex); 2102 2103 return genpd; 2104 } 2105 2106 /** 2107 * genpd_dev_pm_detach - Detach a device from its PM domain. 2108 * @dev: Device to attach. 2109 * @power_off: Currently not used 2110 * 2111 * Try to locate a corresponding generic PM domain, which the device was 2112 * attached to previously. If such is found, the device is detached from it. 2113 */ 2114 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2115 { 2116 struct generic_pm_domain *pd = NULL, *gpd; 2117 int ret = 0; 2118 2119 if (!dev->pm_domain) 2120 return; 2121 2122 mutex_lock(&gpd_list_lock); 2123 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2124 if (&gpd->domain == dev->pm_domain) { 2125 pd = gpd; 2126 break; 2127 } 2128 } 2129 mutex_unlock(&gpd_list_lock); 2130 2131 if (!pd) 2132 return; 2133 2134 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2135 2136 while (1) { 2137 ret = pm_genpd_remove_device(pd, dev); 2138 if (ret != -EAGAIN) 2139 break; 2140 cond_resched(); 2141 } 2142 2143 if (ret < 0) { 2144 dev_err(dev, "failed to remove from PM domain %s: %d", 2145 pd->name, ret); 2146 return; 2147 } 2148 2149 /* Check if PM domain can be powered off after removing this device. */ 2150 genpd_queue_power_off_work(pd); 2151 } 2152 2153 /** 2154 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2155 * @dev: Device to attach. 2156 * 2157 * Parse device's OF node to find a PM domain specifier. If such is found, 2158 * attaches the device to retrieved pm_domain ops. 2159 * 2160 * Both generic and legacy Samsung-specific DT bindings are supported to keep 2161 * backwards compatibility with existing DTBs. 2162 * 2163 * Returns 0 on successfully attached PM domain or negative error code. 2164 */ 2165 int genpd_dev_pm_attach(struct device *dev) 2166 { 2167 struct of_phandle_args pd_args; 2168 struct generic_pm_domain *pd; 2169 int ret; 2170 2171 if (!dev->of_node) 2172 return -ENODEV; 2173 2174 if (dev->pm_domain) 2175 return -EEXIST; 2176 2177 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2178 "#power-domain-cells", 0, &pd_args); 2179 if (ret < 0) { 2180 if (ret != -ENOENT) 2181 return ret; 2182 2183 /* 2184 * Try legacy Samsung-specific bindings 2185 * (for backwards compatibility of DT ABI) 2186 */ 2187 pd_args.args_count = 0; 2188 pd_args.np = of_parse_phandle(dev->of_node, 2189 "samsung,power-domain", 0); 2190 if (!pd_args.np) 2191 return -ENOENT; 2192 } 2193 2194 pd = of_genpd_get_from_provider(&pd_args); 2195 if (IS_ERR(pd)) { 2196 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2197 __func__, PTR_ERR(pd)); 2198 of_node_put(dev->of_node); 2199 return PTR_ERR(pd); 2200 } 2201 2202 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2203 2204 while (1) { 2205 ret = pm_genpd_add_device(pd, dev); 2206 if (ret != -EAGAIN) 2207 break; 2208 cond_resched(); 2209 } 2210 2211 if (ret < 0) { 2212 dev_err(dev, "failed to add to PM domain %s: %d", 2213 pd->name, ret); 2214 of_node_put(dev->of_node); 2215 return ret; 2216 } 2217 2218 dev->pm_domain->detach = genpd_dev_pm_detach; 2219 2220 return 0; 2221 } 2222 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2223 #endif 2224 2225 2226 /*** debugfs support ***/ 2227 2228 #ifdef CONFIG_PM_ADVANCED_DEBUG 2229 #include <linux/pm.h> 2230 #include <linux/device.h> 2231 #include <linux/debugfs.h> 2232 #include <linux/seq_file.h> 2233 #include <linux/init.h> 2234 #include <linux/kobject.h> 2235 static struct dentry *pm_genpd_debugfs_dir; 2236 2237 /* 2238 * TODO: This function is a slightly modified version of rtpm_status_show 2239 * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME 2240 * are too loose to generalize it. 2241 */ 2242 #ifdef CONFIG_PM_RUNTIME 2243 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2244 { 2245 static const char * const status_lookup[] = { 2246 [RPM_ACTIVE] = "active", 2247 [RPM_RESUMING] = "resuming", 2248 [RPM_SUSPENDED] = "suspended", 2249 [RPM_SUSPENDING] = "suspending" 2250 }; 2251 const char *p = ""; 2252 2253 if (dev->power.runtime_error) 2254 p = "error"; 2255 else if (dev->power.disable_depth) 2256 p = "unsupported"; 2257 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 2258 p = status_lookup[dev->power.runtime_status]; 2259 else 2260 WARN_ON(1); 2261 2262 seq_puts(s, p); 2263 } 2264 #else 2265 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2266 { 2267 seq_puts(s, "active"); 2268 } 2269 #endif 2270 2271 static int pm_genpd_summary_one(struct seq_file *s, 2272 struct generic_pm_domain *gpd) 2273 { 2274 static const char * const status_lookup[] = { 2275 [GPD_STATE_ACTIVE] = "on", 2276 [GPD_STATE_WAIT_MASTER] = "wait-master", 2277 [GPD_STATE_BUSY] = "busy", 2278 [GPD_STATE_REPEAT] = "off-in-progress", 2279 [GPD_STATE_POWER_OFF] = "off" 2280 }; 2281 struct pm_domain_data *pm_data; 2282 const char *kobj_path; 2283 struct gpd_link *link; 2284 int ret; 2285 2286 ret = mutex_lock_interruptible(&gpd->lock); 2287 if (ret) 2288 return -ERESTARTSYS; 2289 2290 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) 2291 goto exit; 2292 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); 2293 2294 /* 2295 * Modifications on the list require holding locks on both 2296 * master and slave, so we are safe. 2297 * Also gpd->name is immutable. 2298 */ 2299 list_for_each_entry(link, &gpd->master_links, master_node) { 2300 seq_printf(s, "%s", link->slave->name); 2301 if (!list_is_last(&link->master_node, &gpd->master_links)) 2302 seq_puts(s, ", "); 2303 } 2304 2305 list_for_each_entry(pm_data, &gpd->dev_list, list_node) { 2306 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 2307 if (kobj_path == NULL) 2308 continue; 2309 2310 seq_printf(s, "\n %-50s ", kobj_path); 2311 rtpm_status_str(s, pm_data->dev); 2312 kfree(kobj_path); 2313 } 2314 2315 seq_puts(s, "\n"); 2316 exit: 2317 mutex_unlock(&gpd->lock); 2318 2319 return 0; 2320 } 2321 2322 static int pm_genpd_summary_show(struct seq_file *s, void *data) 2323 { 2324 struct generic_pm_domain *gpd; 2325 int ret = 0; 2326 2327 seq_puts(s, " domain status slaves\n"); 2328 seq_puts(s, " /device runtime status\n"); 2329 seq_puts(s, "----------------------------------------------------------------------\n"); 2330 2331 ret = mutex_lock_interruptible(&gpd_list_lock); 2332 if (ret) 2333 return -ERESTARTSYS; 2334 2335 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2336 ret = pm_genpd_summary_one(s, gpd); 2337 if (ret) 2338 break; 2339 } 2340 mutex_unlock(&gpd_list_lock); 2341 2342 return ret; 2343 } 2344 2345 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 2346 { 2347 return single_open(file, pm_genpd_summary_show, NULL); 2348 } 2349 2350 static const struct file_operations pm_genpd_summary_fops = { 2351 .open = pm_genpd_summary_open, 2352 .read = seq_read, 2353 .llseek = seq_lseek, 2354 .release = single_release, 2355 }; 2356 2357 static int __init pm_genpd_debug_init(void) 2358 { 2359 struct dentry *d; 2360 2361 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 2362 2363 if (!pm_genpd_debugfs_dir) 2364 return -ENOMEM; 2365 2366 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 2367 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 2368 if (!d) 2369 return -ENOMEM; 2370 2371 return 0; 2372 } 2373 late_initcall(pm_genpd_debug_init); 2374 2375 static void __exit pm_genpd_debug_exit(void) 2376 { 2377 debugfs_remove_recursive(pm_genpd_debugfs_dir); 2378 } 2379 __exitcall(pm_genpd_debug_exit); 2380 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 2381