1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/platform_device.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_domain.h> 14 #include <linux/pm_qos.h> 15 #include <linux/slab.h> 16 #include <linux/err.h> 17 #include <linux/sched.h> 18 #include <linux/suspend.h> 19 #include <linux/export.h> 20 21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 22 ({ \ 23 type (*__routine)(struct device *__d); \ 24 type __ret = (type)0; \ 25 \ 26 __routine = genpd->dev_ops.callback; \ 27 if (__routine) { \ 28 __ret = __routine(dev); \ 29 } \ 30 __ret; \ 31 }) 32 33 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ 34 ({ \ 35 ktime_t __start = ktime_get(); \ 36 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 37 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 38 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 39 if (!__retval && __elapsed > __td->field) { \ 40 __td->field = __elapsed; \ 41 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ 42 __elapsed); \ 43 genpd->max_off_time_changed = true; \ 44 __td->constraint_changed = true; \ 45 } \ 46 __retval; \ 47 }) 48 49 static LIST_HEAD(gpd_list); 50 static DEFINE_MUTEX(gpd_list_lock); 51 52 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) 53 { 54 struct generic_pm_domain *genpd = NULL, *gpd; 55 56 if (IS_ERR_OR_NULL(domain_name)) 57 return NULL; 58 59 mutex_lock(&gpd_list_lock); 60 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 61 if (!strcmp(gpd->name, domain_name)) { 62 genpd = gpd; 63 break; 64 } 65 } 66 mutex_unlock(&gpd_list_lock); 67 return genpd; 68 } 69 70 struct generic_pm_domain *dev_to_genpd(struct device *dev) 71 { 72 if (IS_ERR_OR_NULL(dev->pm_domain)) 73 return ERR_PTR(-EINVAL); 74 75 return pd_to_genpd(dev->pm_domain); 76 } 77 78 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 79 { 80 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, 81 stop_latency_ns, "stop"); 82 } 83 84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 85 { 86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 87 start_latency_ns, "start"); 88 } 89 90 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 91 { 92 bool ret = false; 93 94 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 95 ret = !!atomic_dec_and_test(&genpd->sd_count); 96 97 return ret; 98 } 99 100 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 101 { 102 atomic_inc(&genpd->sd_count); 103 smp_mb__after_atomic(); 104 } 105 106 static void genpd_acquire_lock(struct generic_pm_domain *genpd) 107 { 108 DEFINE_WAIT(wait); 109 110 mutex_lock(&genpd->lock); 111 /* 112 * Wait for the domain to transition into either the active, 113 * or the power off state. 114 */ 115 for (;;) { 116 prepare_to_wait(&genpd->status_wait_queue, &wait, 117 TASK_UNINTERRUPTIBLE); 118 if (genpd->status == GPD_STATE_ACTIVE 119 || genpd->status == GPD_STATE_POWER_OFF) 120 break; 121 mutex_unlock(&genpd->lock); 122 123 schedule(); 124 125 mutex_lock(&genpd->lock); 126 } 127 finish_wait(&genpd->status_wait_queue, &wait); 128 } 129 130 static void genpd_release_lock(struct generic_pm_domain *genpd) 131 { 132 mutex_unlock(&genpd->lock); 133 } 134 135 static void genpd_set_active(struct generic_pm_domain *genpd) 136 { 137 if (genpd->resume_count == 0) 138 genpd->status = GPD_STATE_ACTIVE; 139 } 140 141 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 142 { 143 s64 usecs64; 144 145 if (!genpd->cpuidle_data) 146 return; 147 148 usecs64 = genpd->power_on_latency_ns; 149 do_div(usecs64, NSEC_PER_USEC); 150 usecs64 += genpd->cpuidle_data->saved_exit_latency; 151 genpd->cpuidle_data->idle_state->exit_latency = usecs64; 152 } 153 154 /** 155 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 156 * @genpd: PM domain to power up. 157 * 158 * Restore power to @genpd and all of its masters so that it is possible to 159 * resume a device belonging to it. 160 */ 161 static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 162 __releases(&genpd->lock) __acquires(&genpd->lock) 163 { 164 struct gpd_link *link; 165 DEFINE_WAIT(wait); 166 int ret = 0; 167 168 /* If the domain's master is being waited for, we have to wait too. */ 169 for (;;) { 170 prepare_to_wait(&genpd->status_wait_queue, &wait, 171 TASK_UNINTERRUPTIBLE); 172 if (genpd->status != GPD_STATE_WAIT_MASTER) 173 break; 174 mutex_unlock(&genpd->lock); 175 176 schedule(); 177 178 mutex_lock(&genpd->lock); 179 } 180 finish_wait(&genpd->status_wait_queue, &wait); 181 182 if (genpd->status == GPD_STATE_ACTIVE 183 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 184 return 0; 185 186 if (genpd->status != GPD_STATE_POWER_OFF) { 187 genpd_set_active(genpd); 188 return 0; 189 } 190 191 if (genpd->cpuidle_data) { 192 cpuidle_pause_and_lock(); 193 genpd->cpuidle_data->idle_state->disabled = true; 194 cpuidle_resume_and_unlock(); 195 goto out; 196 } 197 198 /* 199 * The list is guaranteed not to change while the loop below is being 200 * executed, unless one of the masters' .power_on() callbacks fiddles 201 * with it. 202 */ 203 list_for_each_entry(link, &genpd->slave_links, slave_node) { 204 genpd_sd_counter_inc(link->master); 205 genpd->status = GPD_STATE_WAIT_MASTER; 206 207 mutex_unlock(&genpd->lock); 208 209 ret = pm_genpd_poweron(link->master); 210 211 mutex_lock(&genpd->lock); 212 213 /* 214 * The "wait for parent" status is guaranteed not to change 215 * while the master is powering on. 216 */ 217 genpd->status = GPD_STATE_POWER_OFF; 218 wake_up_all(&genpd->status_wait_queue); 219 if (ret) { 220 genpd_sd_counter_dec(link->master); 221 goto err; 222 } 223 } 224 225 if (genpd->power_on) { 226 ktime_t time_start = ktime_get(); 227 s64 elapsed_ns; 228 229 ret = genpd->power_on(genpd); 230 if (ret) 231 goto err; 232 233 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 234 if (elapsed_ns > genpd->power_on_latency_ns) { 235 genpd->power_on_latency_ns = elapsed_ns; 236 genpd->max_off_time_changed = true; 237 genpd_recalc_cpu_exit_latency(genpd); 238 if (genpd->name) 239 pr_warning("%s: Power-on latency exceeded, " 240 "new value %lld ns\n", genpd->name, 241 elapsed_ns); 242 } 243 } 244 245 out: 246 genpd_set_active(genpd); 247 248 return 0; 249 250 err: 251 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 252 genpd_sd_counter_dec(link->master); 253 254 return ret; 255 } 256 257 /** 258 * pm_genpd_poweron - Restore power to a given PM domain and its masters. 259 * @genpd: PM domain to power up. 260 */ 261 int pm_genpd_poweron(struct generic_pm_domain *genpd) 262 { 263 int ret; 264 265 mutex_lock(&genpd->lock); 266 ret = __pm_genpd_poweron(genpd); 267 mutex_unlock(&genpd->lock); 268 return ret; 269 } 270 271 /** 272 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. 273 * @domain_name: Name of the PM domain to power up. 274 */ 275 int pm_genpd_name_poweron(const char *domain_name) 276 { 277 struct generic_pm_domain *genpd; 278 279 genpd = pm_genpd_lookup_name(domain_name); 280 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 281 } 282 283 #ifdef CONFIG_PM_RUNTIME 284 285 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 286 struct device *dev) 287 { 288 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 289 } 290 291 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 292 { 293 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 294 save_state_latency_ns, "state save"); 295 } 296 297 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) 298 { 299 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 300 restore_state_latency_ns, 301 "state restore"); 302 } 303 304 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 305 unsigned long val, void *ptr) 306 { 307 struct generic_pm_domain_data *gpd_data; 308 struct device *dev; 309 310 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 311 312 mutex_lock(&gpd_data->lock); 313 dev = gpd_data->base.dev; 314 if (!dev) { 315 mutex_unlock(&gpd_data->lock); 316 return NOTIFY_DONE; 317 } 318 mutex_unlock(&gpd_data->lock); 319 320 for (;;) { 321 struct generic_pm_domain *genpd; 322 struct pm_domain_data *pdd; 323 324 spin_lock_irq(&dev->power.lock); 325 326 pdd = dev->power.subsys_data ? 327 dev->power.subsys_data->domain_data : NULL; 328 if (pdd && pdd->dev) { 329 to_gpd_data(pdd)->td.constraint_changed = true; 330 genpd = dev_to_genpd(dev); 331 } else { 332 genpd = ERR_PTR(-ENODATA); 333 } 334 335 spin_unlock_irq(&dev->power.lock); 336 337 if (!IS_ERR(genpd)) { 338 mutex_lock(&genpd->lock); 339 genpd->max_off_time_changed = true; 340 mutex_unlock(&genpd->lock); 341 } 342 343 dev = dev->parent; 344 if (!dev || dev->power.ignore_children) 345 break; 346 } 347 348 return NOTIFY_DONE; 349 } 350 351 /** 352 * __pm_genpd_save_device - Save the pre-suspend state of a device. 353 * @pdd: Domain data of the device to save the state of. 354 * @genpd: PM domain the device belongs to. 355 */ 356 static int __pm_genpd_save_device(struct pm_domain_data *pdd, 357 struct generic_pm_domain *genpd) 358 __releases(&genpd->lock) __acquires(&genpd->lock) 359 { 360 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 361 struct device *dev = pdd->dev; 362 int ret = 0; 363 364 if (gpd_data->need_restore) 365 return 0; 366 367 mutex_unlock(&genpd->lock); 368 369 genpd_start_dev(genpd, dev); 370 ret = genpd_save_dev(genpd, dev); 371 genpd_stop_dev(genpd, dev); 372 373 mutex_lock(&genpd->lock); 374 375 if (!ret) 376 gpd_data->need_restore = true; 377 378 return ret; 379 } 380 381 /** 382 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 383 * @pdd: Domain data of the device to restore the state of. 384 * @genpd: PM domain the device belongs to. 385 */ 386 static void __pm_genpd_restore_device(struct pm_domain_data *pdd, 387 struct generic_pm_domain *genpd) 388 __releases(&genpd->lock) __acquires(&genpd->lock) 389 { 390 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 391 struct device *dev = pdd->dev; 392 bool need_restore = gpd_data->need_restore; 393 394 gpd_data->need_restore = false; 395 mutex_unlock(&genpd->lock); 396 397 genpd_start_dev(genpd, dev); 398 if (need_restore) 399 genpd_restore_dev(genpd, dev); 400 401 mutex_lock(&genpd->lock); 402 } 403 404 /** 405 * genpd_abort_poweroff - Check if a PM domain power off should be aborted. 406 * @genpd: PM domain to check. 407 * 408 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during 409 * a "power off" operation, which means that a "power on" has occured in the 410 * meantime, or if its resume_count field is different from zero, which means 411 * that one of its devices has been resumed in the meantime. 412 */ 413 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 414 { 415 return genpd->status == GPD_STATE_WAIT_MASTER 416 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 417 } 418 419 /** 420 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 421 * @genpd: PM domait to power off. 422 * 423 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 424 * before. 425 */ 426 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 427 { 428 queue_work(pm_wq, &genpd->power_off_work); 429 } 430 431 /** 432 * pm_genpd_poweroff - Remove power from a given PM domain. 433 * @genpd: PM domain to power down. 434 * 435 * If all of the @genpd's devices have been suspended and all of its subdomains 436 * have been powered down, run the runtime suspend callbacks provided by all of 437 * the @genpd's devices' drivers and remove power from @genpd. 438 */ 439 static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 440 __releases(&genpd->lock) __acquires(&genpd->lock) 441 { 442 struct pm_domain_data *pdd; 443 struct gpd_link *link; 444 unsigned int not_suspended; 445 int ret = 0; 446 447 start: 448 /* 449 * Do not try to power off the domain in the following situations: 450 * (1) The domain is already in the "power off" state. 451 * (2) The domain is waiting for its master to power up. 452 * (3) One of the domain's devices is being resumed right now. 453 * (4) System suspend is in progress. 454 */ 455 if (genpd->status == GPD_STATE_POWER_OFF 456 || genpd->status == GPD_STATE_WAIT_MASTER 457 || genpd->resume_count > 0 || genpd->prepared_count > 0) 458 return 0; 459 460 if (atomic_read(&genpd->sd_count) > 0) 461 return -EBUSY; 462 463 not_suspended = 0; 464 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 465 enum pm_qos_flags_status stat; 466 467 stat = dev_pm_qos_flags(pdd->dev, 468 PM_QOS_FLAG_NO_POWER_OFF 469 | PM_QOS_FLAG_REMOTE_WAKEUP); 470 if (stat > PM_QOS_FLAGS_NONE) 471 return -EBUSY; 472 473 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 474 || pdd->dev->power.irq_safe)) 475 not_suspended++; 476 } 477 478 if (not_suspended > genpd->in_progress) 479 return -EBUSY; 480 481 if (genpd->poweroff_task) { 482 /* 483 * Another instance of pm_genpd_poweroff() is executing 484 * callbacks, so tell it to start over and return. 485 */ 486 genpd->status = GPD_STATE_REPEAT; 487 return 0; 488 } 489 490 if (genpd->gov && genpd->gov->power_down_ok) { 491 if (!genpd->gov->power_down_ok(&genpd->domain)) 492 return -EAGAIN; 493 } 494 495 genpd->status = GPD_STATE_BUSY; 496 genpd->poweroff_task = current; 497 498 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { 499 ret = atomic_read(&genpd->sd_count) == 0 ? 500 __pm_genpd_save_device(pdd, genpd) : -EBUSY; 501 502 if (genpd_abort_poweroff(genpd)) 503 goto out; 504 505 if (ret) { 506 genpd_set_active(genpd); 507 goto out; 508 } 509 510 if (genpd->status == GPD_STATE_REPEAT) { 511 genpd->poweroff_task = NULL; 512 goto start; 513 } 514 } 515 516 if (genpd->cpuidle_data) { 517 /* 518 * If cpuidle_data is set, cpuidle should turn the domain off 519 * when the CPU in it is idle. In that case we don't decrement 520 * the subdomain counts of the master domains, so that power is 521 * not removed from the current domain prematurely as a result 522 * of cutting off the masters' power. 523 */ 524 genpd->status = GPD_STATE_POWER_OFF; 525 cpuidle_pause_and_lock(); 526 genpd->cpuidle_data->idle_state->disabled = false; 527 cpuidle_resume_and_unlock(); 528 goto out; 529 } 530 531 if (genpd->power_off) { 532 ktime_t time_start; 533 s64 elapsed_ns; 534 535 if (atomic_read(&genpd->sd_count) > 0) { 536 ret = -EBUSY; 537 goto out; 538 } 539 540 time_start = ktime_get(); 541 542 /* 543 * If sd_count > 0 at this point, one of the subdomains hasn't 544 * managed to call pm_genpd_poweron() for the master yet after 545 * incrementing it. In that case pm_genpd_poweron() will wait 546 * for us to drop the lock, so we can call .power_off() and let 547 * the pm_genpd_poweron() restore power for us (this shouldn't 548 * happen very often). 549 */ 550 ret = genpd->power_off(genpd); 551 if (ret == -EBUSY) { 552 genpd_set_active(genpd); 553 goto out; 554 } 555 556 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 557 if (elapsed_ns > genpd->power_off_latency_ns) { 558 genpd->power_off_latency_ns = elapsed_ns; 559 genpd->max_off_time_changed = true; 560 if (genpd->name) 561 pr_warning("%s: Power-off latency exceeded, " 562 "new value %lld ns\n", genpd->name, 563 elapsed_ns); 564 } 565 } 566 567 genpd->status = GPD_STATE_POWER_OFF; 568 569 list_for_each_entry(link, &genpd->slave_links, slave_node) { 570 genpd_sd_counter_dec(link->master); 571 genpd_queue_power_off_work(link->master); 572 } 573 574 out: 575 genpd->poweroff_task = NULL; 576 wake_up_all(&genpd->status_wait_queue); 577 return ret; 578 } 579 580 /** 581 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 582 * @work: Work structure used for scheduling the execution of this function. 583 */ 584 static void genpd_power_off_work_fn(struct work_struct *work) 585 { 586 struct generic_pm_domain *genpd; 587 588 genpd = container_of(work, struct generic_pm_domain, power_off_work); 589 590 genpd_acquire_lock(genpd); 591 pm_genpd_poweroff(genpd); 592 genpd_release_lock(genpd); 593 } 594 595 /** 596 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 597 * @dev: Device to suspend. 598 * 599 * Carry out a runtime suspend of a device under the assumption that its 600 * pm_domain field points to the domain member of an object of type 601 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 602 */ 603 static int pm_genpd_runtime_suspend(struct device *dev) 604 { 605 struct generic_pm_domain *genpd; 606 bool (*stop_ok)(struct device *__dev); 607 int ret; 608 609 dev_dbg(dev, "%s()\n", __func__); 610 611 genpd = dev_to_genpd(dev); 612 if (IS_ERR(genpd)) 613 return -EINVAL; 614 615 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 616 if (stop_ok && !stop_ok(dev)) 617 return -EBUSY; 618 619 ret = genpd_stop_dev(genpd, dev); 620 if (ret) 621 return ret; 622 623 /* 624 * If power.irq_safe is set, this routine will be run with interrupts 625 * off, so it can't use mutexes. 626 */ 627 if (dev->power.irq_safe) 628 return 0; 629 630 mutex_lock(&genpd->lock); 631 genpd->in_progress++; 632 pm_genpd_poweroff(genpd); 633 genpd->in_progress--; 634 mutex_unlock(&genpd->lock); 635 636 return 0; 637 } 638 639 /** 640 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 641 * @dev: Device to resume. 642 * 643 * Carry out a runtime resume of a device under the assumption that its 644 * pm_domain field points to the domain member of an object of type 645 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 646 */ 647 static int pm_genpd_runtime_resume(struct device *dev) 648 { 649 struct generic_pm_domain *genpd; 650 DEFINE_WAIT(wait); 651 int ret; 652 653 dev_dbg(dev, "%s()\n", __func__); 654 655 genpd = dev_to_genpd(dev); 656 if (IS_ERR(genpd)) 657 return -EINVAL; 658 659 /* If power.irq_safe, the PM domain is never powered off. */ 660 if (dev->power.irq_safe) 661 return genpd_start_dev_no_timing(genpd, dev); 662 663 mutex_lock(&genpd->lock); 664 ret = __pm_genpd_poweron(genpd); 665 if (ret) { 666 mutex_unlock(&genpd->lock); 667 return ret; 668 } 669 genpd->status = GPD_STATE_BUSY; 670 genpd->resume_count++; 671 for (;;) { 672 prepare_to_wait(&genpd->status_wait_queue, &wait, 673 TASK_UNINTERRUPTIBLE); 674 /* 675 * If current is the powering off task, we have been called 676 * reentrantly from one of the device callbacks, so we should 677 * not wait. 678 */ 679 if (!genpd->poweroff_task || genpd->poweroff_task == current) 680 break; 681 mutex_unlock(&genpd->lock); 682 683 schedule(); 684 685 mutex_lock(&genpd->lock); 686 } 687 finish_wait(&genpd->status_wait_queue, &wait); 688 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); 689 genpd->resume_count--; 690 genpd_set_active(genpd); 691 wake_up_all(&genpd->status_wait_queue); 692 mutex_unlock(&genpd->lock); 693 694 return 0; 695 } 696 697 static bool pd_ignore_unused; 698 static int __init pd_ignore_unused_setup(char *__unused) 699 { 700 pd_ignore_unused = true; 701 return 1; 702 } 703 __setup("pd_ignore_unused", pd_ignore_unused_setup); 704 705 /** 706 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. 707 */ 708 void pm_genpd_poweroff_unused(void) 709 { 710 struct generic_pm_domain *genpd; 711 712 if (pd_ignore_unused) { 713 pr_warn("genpd: Not disabling unused power domains\n"); 714 return; 715 } 716 717 mutex_lock(&gpd_list_lock); 718 719 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 720 genpd_queue_power_off_work(genpd); 721 722 mutex_unlock(&gpd_list_lock); 723 } 724 725 static int __init genpd_poweroff_unused(void) 726 { 727 pm_genpd_poweroff_unused(); 728 return 0; 729 } 730 late_initcall(genpd_poweroff_unused); 731 732 #else 733 734 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 735 unsigned long val, void *ptr) 736 { 737 return NOTIFY_DONE; 738 } 739 740 static inline void 741 genpd_queue_power_off_work(struct generic_pm_domain *genpd) {} 742 743 static inline void genpd_power_off_work_fn(struct work_struct *work) {} 744 745 #define pm_genpd_runtime_suspend NULL 746 #define pm_genpd_runtime_resume NULL 747 748 #endif /* CONFIG_PM_RUNTIME */ 749 750 #ifdef CONFIG_PM_SLEEP 751 752 /** 753 * pm_genpd_present - Check if the given PM domain has been initialized. 754 * @genpd: PM domain to check. 755 */ 756 static bool pm_genpd_present(struct generic_pm_domain *genpd) 757 { 758 struct generic_pm_domain *gpd; 759 760 if (IS_ERR_OR_NULL(genpd)) 761 return false; 762 763 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 764 if (gpd == genpd) 765 return true; 766 767 return false; 768 } 769 770 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 771 struct device *dev) 772 { 773 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 774 } 775 776 /** 777 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 778 * @genpd: PM domain to power off, if possible. 779 * 780 * Check if the given PM domain can be powered off (during system suspend or 781 * hibernation) and do that if so. Also, in that case propagate to its masters. 782 * 783 * This function is only called in "noirq" and "syscore" stages of system power 784 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 785 * executed sequentially, so it is guaranteed that it will never run twice in 786 * parallel). 787 */ 788 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 789 { 790 struct gpd_link *link; 791 792 if (genpd->status == GPD_STATE_POWER_OFF) 793 return; 794 795 if (genpd->suspended_count != genpd->device_count 796 || atomic_read(&genpd->sd_count) > 0) 797 return; 798 799 if (genpd->power_off) 800 genpd->power_off(genpd); 801 802 genpd->status = GPD_STATE_POWER_OFF; 803 804 list_for_each_entry(link, &genpd->slave_links, slave_node) { 805 genpd_sd_counter_dec(link->master); 806 pm_genpd_sync_poweroff(link->master); 807 } 808 } 809 810 /** 811 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 812 * @genpd: PM domain to power on. 813 * 814 * This function is only called in "noirq" and "syscore" stages of system power 815 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 816 * executed sequentially, so it is guaranteed that it will never run twice in 817 * parallel). 818 */ 819 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) 820 { 821 struct gpd_link *link; 822 823 if (genpd->status != GPD_STATE_POWER_OFF) 824 return; 825 826 list_for_each_entry(link, &genpd->slave_links, slave_node) { 827 pm_genpd_sync_poweron(link->master); 828 genpd_sd_counter_inc(link->master); 829 } 830 831 if (genpd->power_on) 832 genpd->power_on(genpd); 833 834 genpd->status = GPD_STATE_ACTIVE; 835 } 836 837 /** 838 * resume_needed - Check whether to resume a device before system suspend. 839 * @dev: Device to check. 840 * @genpd: PM domain the device belongs to. 841 * 842 * There are two cases in which a device that can wake up the system from sleep 843 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 844 * to wake up the system and it has to remain active for this purpose while the 845 * system is in the sleep state and (2) if the device is not enabled to wake up 846 * the system from sleep states and it generally doesn't generate wakeup signals 847 * by itself (those signals are generated on its behalf by other parts of the 848 * system). In the latter case it may be necessary to reconfigure the device's 849 * wakeup settings during system suspend, because it may have been set up to 850 * signal remote wakeup from the system's working state as needed by runtime PM. 851 * Return 'true' in either of the above cases. 852 */ 853 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 854 { 855 bool active_wakeup; 856 857 if (!device_can_wakeup(dev)) 858 return false; 859 860 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 861 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 862 } 863 864 /** 865 * pm_genpd_prepare - Start power transition of a device in a PM domain. 866 * @dev: Device to start the transition of. 867 * 868 * Start a power transition of a device (during a system-wide power transition) 869 * under the assumption that its pm_domain field points to the domain member of 870 * an object of type struct generic_pm_domain representing a PM domain 871 * consisting of I/O devices. 872 */ 873 static int pm_genpd_prepare(struct device *dev) 874 { 875 struct generic_pm_domain *genpd; 876 int ret; 877 878 dev_dbg(dev, "%s()\n", __func__); 879 880 genpd = dev_to_genpd(dev); 881 if (IS_ERR(genpd)) 882 return -EINVAL; 883 884 /* 885 * If a wakeup request is pending for the device, it should be woken up 886 * at this point and a system wakeup event should be reported if it's 887 * set up to wake up the system from sleep states. 888 */ 889 pm_runtime_get_noresume(dev); 890 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 891 pm_wakeup_event(dev, 0); 892 893 if (pm_wakeup_pending()) { 894 pm_runtime_put(dev); 895 return -EBUSY; 896 } 897 898 if (resume_needed(dev, genpd)) 899 pm_runtime_resume(dev); 900 901 genpd_acquire_lock(genpd); 902 903 if (genpd->prepared_count++ == 0) { 904 genpd->suspended_count = 0; 905 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 906 } 907 908 genpd_release_lock(genpd); 909 910 if (genpd->suspend_power_off) { 911 pm_runtime_put_noidle(dev); 912 return 0; 913 } 914 915 /* 916 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 917 * so pm_genpd_poweron() will return immediately, but if the device 918 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 919 * to make it operational. 920 */ 921 pm_runtime_resume(dev); 922 __pm_runtime_disable(dev, false); 923 924 ret = pm_generic_prepare(dev); 925 if (ret) { 926 mutex_lock(&genpd->lock); 927 928 if (--genpd->prepared_count == 0) 929 genpd->suspend_power_off = false; 930 931 mutex_unlock(&genpd->lock); 932 pm_runtime_enable(dev); 933 } 934 935 pm_runtime_put(dev); 936 return ret; 937 } 938 939 /** 940 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 941 * @dev: Device to suspend. 942 * 943 * Suspend a device under the assumption that its pm_domain field points to the 944 * domain member of an object of type struct generic_pm_domain representing 945 * a PM domain consisting of I/O devices. 946 */ 947 static int pm_genpd_suspend(struct device *dev) 948 { 949 struct generic_pm_domain *genpd; 950 951 dev_dbg(dev, "%s()\n", __func__); 952 953 genpd = dev_to_genpd(dev); 954 if (IS_ERR(genpd)) 955 return -EINVAL; 956 957 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 958 } 959 960 /** 961 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 962 * @dev: Device to suspend. 963 * 964 * Carry out a late suspend of a device under the assumption that its 965 * pm_domain field points to the domain member of an object of type 966 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 967 */ 968 static int pm_genpd_suspend_late(struct device *dev) 969 { 970 struct generic_pm_domain *genpd; 971 972 dev_dbg(dev, "%s()\n", __func__); 973 974 genpd = dev_to_genpd(dev); 975 if (IS_ERR(genpd)) 976 return -EINVAL; 977 978 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 979 } 980 981 /** 982 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 983 * @dev: Device to suspend. 984 * 985 * Stop the device and remove power from the domain if all devices in it have 986 * been stopped. 987 */ 988 static int pm_genpd_suspend_noirq(struct device *dev) 989 { 990 struct generic_pm_domain *genpd; 991 992 dev_dbg(dev, "%s()\n", __func__); 993 994 genpd = dev_to_genpd(dev); 995 if (IS_ERR(genpd)) 996 return -EINVAL; 997 998 if (genpd->suspend_power_off 999 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1000 return 0; 1001 1002 genpd_stop_dev(genpd, dev); 1003 1004 /* 1005 * Since all of the "noirq" callbacks are executed sequentially, it is 1006 * guaranteed that this function will never run twice in parallel for 1007 * the same PM domain, so it is not necessary to use locking here. 1008 */ 1009 genpd->suspended_count++; 1010 pm_genpd_sync_poweroff(genpd); 1011 1012 return 0; 1013 } 1014 1015 /** 1016 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1017 * @dev: Device to resume. 1018 * 1019 * Restore power to the device's PM domain, if necessary, and start the device. 1020 */ 1021 static int pm_genpd_resume_noirq(struct device *dev) 1022 { 1023 struct generic_pm_domain *genpd; 1024 1025 dev_dbg(dev, "%s()\n", __func__); 1026 1027 genpd = dev_to_genpd(dev); 1028 if (IS_ERR(genpd)) 1029 return -EINVAL; 1030 1031 if (genpd->suspend_power_off 1032 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1033 return 0; 1034 1035 /* 1036 * Since all of the "noirq" callbacks are executed sequentially, it is 1037 * guaranteed that this function will never run twice in parallel for 1038 * the same PM domain, so it is not necessary to use locking here. 1039 */ 1040 pm_genpd_sync_poweron(genpd); 1041 genpd->suspended_count--; 1042 1043 return genpd_start_dev(genpd, dev); 1044 } 1045 1046 /** 1047 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 1048 * @dev: Device to resume. 1049 * 1050 * Carry out an early resume of a device under the assumption that its 1051 * pm_domain field points to the domain member of an object of type 1052 * struct generic_pm_domain representing a power domain consisting of I/O 1053 * devices. 1054 */ 1055 static int pm_genpd_resume_early(struct device *dev) 1056 { 1057 struct generic_pm_domain *genpd; 1058 1059 dev_dbg(dev, "%s()\n", __func__); 1060 1061 genpd = dev_to_genpd(dev); 1062 if (IS_ERR(genpd)) 1063 return -EINVAL; 1064 1065 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 1066 } 1067 1068 /** 1069 * pm_genpd_resume - Resume of device in an I/O PM domain. 1070 * @dev: Device to resume. 1071 * 1072 * Resume a device under the assumption that its pm_domain field points to the 1073 * domain member of an object of type struct generic_pm_domain representing 1074 * a power domain consisting of I/O devices. 1075 */ 1076 static int pm_genpd_resume(struct device *dev) 1077 { 1078 struct generic_pm_domain *genpd; 1079 1080 dev_dbg(dev, "%s()\n", __func__); 1081 1082 genpd = dev_to_genpd(dev); 1083 if (IS_ERR(genpd)) 1084 return -EINVAL; 1085 1086 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 1087 } 1088 1089 /** 1090 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 1091 * @dev: Device to freeze. 1092 * 1093 * Freeze a device under the assumption that its pm_domain field points to the 1094 * domain member of an object of type struct generic_pm_domain representing 1095 * a power domain consisting of I/O devices. 1096 */ 1097 static int pm_genpd_freeze(struct device *dev) 1098 { 1099 struct generic_pm_domain *genpd; 1100 1101 dev_dbg(dev, "%s()\n", __func__); 1102 1103 genpd = dev_to_genpd(dev); 1104 if (IS_ERR(genpd)) 1105 return -EINVAL; 1106 1107 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 1108 } 1109 1110 /** 1111 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 1112 * @dev: Device to freeze. 1113 * 1114 * Carry out a late freeze of a device under the assumption that its 1115 * pm_domain field points to the domain member of an object of type 1116 * struct generic_pm_domain representing a power domain consisting of I/O 1117 * devices. 1118 */ 1119 static int pm_genpd_freeze_late(struct device *dev) 1120 { 1121 struct generic_pm_domain *genpd; 1122 1123 dev_dbg(dev, "%s()\n", __func__); 1124 1125 genpd = dev_to_genpd(dev); 1126 if (IS_ERR(genpd)) 1127 return -EINVAL; 1128 1129 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 1130 } 1131 1132 /** 1133 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1134 * @dev: Device to freeze. 1135 * 1136 * Carry out a late freeze of a device under the assumption that its 1137 * pm_domain field points to the domain member of an object of type 1138 * struct generic_pm_domain representing a power domain consisting of I/O 1139 * devices. 1140 */ 1141 static int pm_genpd_freeze_noirq(struct device *dev) 1142 { 1143 struct generic_pm_domain *genpd; 1144 1145 dev_dbg(dev, "%s()\n", __func__); 1146 1147 genpd = dev_to_genpd(dev); 1148 if (IS_ERR(genpd)) 1149 return -EINVAL; 1150 1151 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 1152 } 1153 1154 /** 1155 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1156 * @dev: Device to thaw. 1157 * 1158 * Start the device, unless power has been removed from the domain already 1159 * before the system transition. 1160 */ 1161 static int pm_genpd_thaw_noirq(struct device *dev) 1162 { 1163 struct generic_pm_domain *genpd; 1164 1165 dev_dbg(dev, "%s()\n", __func__); 1166 1167 genpd = dev_to_genpd(dev); 1168 if (IS_ERR(genpd)) 1169 return -EINVAL; 1170 1171 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); 1172 } 1173 1174 /** 1175 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1176 * @dev: Device to thaw. 1177 * 1178 * Carry out an early thaw of a device under the assumption that its 1179 * pm_domain field points to the domain member of an object of type 1180 * struct generic_pm_domain representing a power domain consisting of I/O 1181 * devices. 1182 */ 1183 static int pm_genpd_thaw_early(struct device *dev) 1184 { 1185 struct generic_pm_domain *genpd; 1186 1187 dev_dbg(dev, "%s()\n", __func__); 1188 1189 genpd = dev_to_genpd(dev); 1190 if (IS_ERR(genpd)) 1191 return -EINVAL; 1192 1193 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 1194 } 1195 1196 /** 1197 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1198 * @dev: Device to thaw. 1199 * 1200 * Thaw a device under the assumption that its pm_domain field points to the 1201 * domain member of an object of type struct generic_pm_domain representing 1202 * a power domain consisting of I/O devices. 1203 */ 1204 static int pm_genpd_thaw(struct device *dev) 1205 { 1206 struct generic_pm_domain *genpd; 1207 1208 dev_dbg(dev, "%s()\n", __func__); 1209 1210 genpd = dev_to_genpd(dev); 1211 if (IS_ERR(genpd)) 1212 return -EINVAL; 1213 1214 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1215 } 1216 1217 /** 1218 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1219 * @dev: Device to resume. 1220 * 1221 * Make sure the domain will be in the same power state as before the 1222 * hibernation the system is resuming from and start the device if necessary. 1223 */ 1224 static int pm_genpd_restore_noirq(struct device *dev) 1225 { 1226 struct generic_pm_domain *genpd; 1227 1228 dev_dbg(dev, "%s()\n", __func__); 1229 1230 genpd = dev_to_genpd(dev); 1231 if (IS_ERR(genpd)) 1232 return -EINVAL; 1233 1234 /* 1235 * Since all of the "noirq" callbacks are executed sequentially, it is 1236 * guaranteed that this function will never run twice in parallel for 1237 * the same PM domain, so it is not necessary to use locking here. 1238 * 1239 * At this point suspended_count == 0 means we are being run for the 1240 * first time for the given domain in the present cycle. 1241 */ 1242 if (genpd->suspended_count++ == 0) { 1243 /* 1244 * The boot kernel might put the domain into arbitrary state, 1245 * so make it appear as powered off to pm_genpd_sync_poweron(), 1246 * so that it tries to power it on in case it was really off. 1247 */ 1248 genpd->status = GPD_STATE_POWER_OFF; 1249 if (genpd->suspend_power_off) { 1250 /* 1251 * If the domain was off before the hibernation, make 1252 * sure it will be off going forward. 1253 */ 1254 if (genpd->power_off) 1255 genpd->power_off(genpd); 1256 1257 return 0; 1258 } 1259 } 1260 1261 if (genpd->suspend_power_off) 1262 return 0; 1263 1264 pm_genpd_sync_poweron(genpd); 1265 1266 return genpd_start_dev(genpd, dev); 1267 } 1268 1269 /** 1270 * pm_genpd_complete - Complete power transition of a device in a power domain. 1271 * @dev: Device to complete the transition of. 1272 * 1273 * Complete a power transition of a device (during a system-wide power 1274 * transition) under the assumption that its pm_domain field points to the 1275 * domain member of an object of type struct generic_pm_domain representing 1276 * a power domain consisting of I/O devices. 1277 */ 1278 static void pm_genpd_complete(struct device *dev) 1279 { 1280 struct generic_pm_domain *genpd; 1281 bool run_complete; 1282 1283 dev_dbg(dev, "%s()\n", __func__); 1284 1285 genpd = dev_to_genpd(dev); 1286 if (IS_ERR(genpd)) 1287 return; 1288 1289 mutex_lock(&genpd->lock); 1290 1291 run_complete = !genpd->suspend_power_off; 1292 if (--genpd->prepared_count == 0) 1293 genpd->suspend_power_off = false; 1294 1295 mutex_unlock(&genpd->lock); 1296 1297 if (run_complete) { 1298 pm_generic_complete(dev); 1299 pm_runtime_set_active(dev); 1300 pm_runtime_enable(dev); 1301 pm_request_idle(dev); 1302 } 1303 } 1304 1305 /** 1306 * genpd_syscore_switch - Switch power during system core suspend or resume. 1307 * @dev: Device that normally is marked as "always on" to switch power for. 1308 * 1309 * This routine may only be called during the system core (syscore) suspend or 1310 * resume phase for devices whose "always on" flags are set. 1311 */ 1312 static void genpd_syscore_switch(struct device *dev, bool suspend) 1313 { 1314 struct generic_pm_domain *genpd; 1315 1316 genpd = dev_to_genpd(dev); 1317 if (!pm_genpd_present(genpd)) 1318 return; 1319 1320 if (suspend) { 1321 genpd->suspended_count++; 1322 pm_genpd_sync_poweroff(genpd); 1323 } else { 1324 pm_genpd_sync_poweron(genpd); 1325 genpd->suspended_count--; 1326 } 1327 } 1328 1329 void pm_genpd_syscore_poweroff(struct device *dev) 1330 { 1331 genpd_syscore_switch(dev, true); 1332 } 1333 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); 1334 1335 void pm_genpd_syscore_poweron(struct device *dev) 1336 { 1337 genpd_syscore_switch(dev, false); 1338 } 1339 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); 1340 1341 #else 1342 1343 #define pm_genpd_prepare NULL 1344 #define pm_genpd_suspend NULL 1345 #define pm_genpd_suspend_late NULL 1346 #define pm_genpd_suspend_noirq NULL 1347 #define pm_genpd_resume_early NULL 1348 #define pm_genpd_resume_noirq NULL 1349 #define pm_genpd_resume NULL 1350 #define pm_genpd_freeze NULL 1351 #define pm_genpd_freeze_late NULL 1352 #define pm_genpd_freeze_noirq NULL 1353 #define pm_genpd_thaw_early NULL 1354 #define pm_genpd_thaw_noirq NULL 1355 #define pm_genpd_thaw NULL 1356 #define pm_genpd_restore_noirq NULL 1357 #define pm_genpd_complete NULL 1358 1359 #endif /* CONFIG_PM_SLEEP */ 1360 1361 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) 1362 { 1363 struct generic_pm_domain_data *gpd_data; 1364 1365 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1366 if (!gpd_data) 1367 return NULL; 1368 1369 mutex_init(&gpd_data->lock); 1370 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1371 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1372 return gpd_data; 1373 } 1374 1375 static void __pm_genpd_free_dev_data(struct device *dev, 1376 struct generic_pm_domain_data *gpd_data) 1377 { 1378 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1379 kfree(gpd_data); 1380 } 1381 1382 /** 1383 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1384 * @genpd: PM domain to add the device to. 1385 * @dev: Device to be added. 1386 * @td: Set of PM QoS timing parameters to attach to the device. 1387 */ 1388 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1389 struct gpd_timing_data *td) 1390 { 1391 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1392 struct pm_domain_data *pdd; 1393 int ret = 0; 1394 1395 dev_dbg(dev, "%s()\n", __func__); 1396 1397 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1398 return -EINVAL; 1399 1400 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1401 if (!gpd_data_new) 1402 return -ENOMEM; 1403 1404 genpd_acquire_lock(genpd); 1405 1406 if (genpd->prepared_count > 0) { 1407 ret = -EAGAIN; 1408 goto out; 1409 } 1410 1411 list_for_each_entry(pdd, &genpd->dev_list, list_node) 1412 if (pdd->dev == dev) { 1413 ret = -EINVAL; 1414 goto out; 1415 } 1416 1417 ret = dev_pm_get_subsys_data(dev); 1418 if (ret) 1419 goto out; 1420 1421 genpd->device_count++; 1422 genpd->max_off_time_changed = true; 1423 1424 spin_lock_irq(&dev->power.lock); 1425 1426 dev->pm_domain = &genpd->domain; 1427 if (dev->power.subsys_data->domain_data) { 1428 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1429 } else { 1430 gpd_data = gpd_data_new; 1431 dev->power.subsys_data->domain_data = &gpd_data->base; 1432 } 1433 gpd_data->refcount++; 1434 if (td) 1435 gpd_data->td = *td; 1436 1437 spin_unlock_irq(&dev->power.lock); 1438 1439 if (genpd->attach_dev) 1440 genpd->attach_dev(dev); 1441 1442 mutex_lock(&gpd_data->lock); 1443 gpd_data->base.dev = dev; 1444 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1445 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1446 gpd_data->td.constraint_changed = true; 1447 gpd_data->td.effective_constraint_ns = -1; 1448 mutex_unlock(&gpd_data->lock); 1449 1450 out: 1451 genpd_release_lock(genpd); 1452 1453 if (gpd_data != gpd_data_new) 1454 __pm_genpd_free_dev_data(dev, gpd_data_new); 1455 1456 return ret; 1457 } 1458 1459 /** 1460 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1461 * @domain_name: Name of the PM domain to add the device to. 1462 * @dev: Device to be added. 1463 * @td: Set of PM QoS timing parameters to attach to the device. 1464 */ 1465 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, 1466 struct gpd_timing_data *td) 1467 { 1468 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); 1469 } 1470 1471 /** 1472 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1473 * @genpd: PM domain to remove the device from. 1474 * @dev: Device to be removed. 1475 */ 1476 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1477 struct device *dev) 1478 { 1479 struct generic_pm_domain_data *gpd_data; 1480 struct pm_domain_data *pdd; 1481 bool remove = false; 1482 int ret = 0; 1483 1484 dev_dbg(dev, "%s()\n", __func__); 1485 1486 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev) 1487 || IS_ERR_OR_NULL(dev->pm_domain) 1488 || pd_to_genpd(dev->pm_domain) != genpd) 1489 return -EINVAL; 1490 1491 genpd_acquire_lock(genpd); 1492 1493 if (genpd->prepared_count > 0) { 1494 ret = -EAGAIN; 1495 goto out; 1496 } 1497 1498 genpd->device_count--; 1499 genpd->max_off_time_changed = true; 1500 1501 if (genpd->detach_dev) 1502 genpd->detach_dev(dev); 1503 1504 spin_lock_irq(&dev->power.lock); 1505 1506 dev->pm_domain = NULL; 1507 pdd = dev->power.subsys_data->domain_data; 1508 list_del_init(&pdd->list_node); 1509 gpd_data = to_gpd_data(pdd); 1510 if (--gpd_data->refcount == 0) { 1511 dev->power.subsys_data->domain_data = NULL; 1512 remove = true; 1513 } 1514 1515 spin_unlock_irq(&dev->power.lock); 1516 1517 mutex_lock(&gpd_data->lock); 1518 pdd->dev = NULL; 1519 mutex_unlock(&gpd_data->lock); 1520 1521 genpd_release_lock(genpd); 1522 1523 dev_pm_put_subsys_data(dev); 1524 if (remove) 1525 __pm_genpd_free_dev_data(dev, gpd_data); 1526 1527 return 0; 1528 1529 out: 1530 genpd_release_lock(genpd); 1531 1532 return ret; 1533 } 1534 1535 /** 1536 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. 1537 * @dev: Device to set/unset the flag for. 1538 * @val: The new value of the device's "need restore" flag. 1539 */ 1540 void pm_genpd_dev_need_restore(struct device *dev, bool val) 1541 { 1542 struct pm_subsys_data *psd; 1543 unsigned long flags; 1544 1545 spin_lock_irqsave(&dev->power.lock, flags); 1546 1547 psd = dev_to_psd(dev); 1548 if (psd && psd->domain_data) 1549 to_gpd_data(psd->domain_data)->need_restore = val; 1550 1551 spin_unlock_irqrestore(&dev->power.lock, flags); 1552 } 1553 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); 1554 1555 /** 1556 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1557 * @genpd: Master PM domain to add the subdomain to. 1558 * @subdomain: Subdomain to be added. 1559 */ 1560 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1561 struct generic_pm_domain *subdomain) 1562 { 1563 struct gpd_link *link; 1564 int ret = 0; 1565 1566 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1567 || genpd == subdomain) 1568 return -EINVAL; 1569 1570 start: 1571 genpd_acquire_lock(genpd); 1572 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1573 1574 if (subdomain->status != GPD_STATE_POWER_OFF 1575 && subdomain->status != GPD_STATE_ACTIVE) { 1576 mutex_unlock(&subdomain->lock); 1577 genpd_release_lock(genpd); 1578 goto start; 1579 } 1580 1581 if (genpd->status == GPD_STATE_POWER_OFF 1582 && subdomain->status != GPD_STATE_POWER_OFF) { 1583 ret = -EINVAL; 1584 goto out; 1585 } 1586 1587 list_for_each_entry(link, &genpd->master_links, master_node) { 1588 if (link->slave == subdomain && link->master == genpd) { 1589 ret = -EINVAL; 1590 goto out; 1591 } 1592 } 1593 1594 link = kzalloc(sizeof(*link), GFP_KERNEL); 1595 if (!link) { 1596 ret = -ENOMEM; 1597 goto out; 1598 } 1599 link->master = genpd; 1600 list_add_tail(&link->master_node, &genpd->master_links); 1601 link->slave = subdomain; 1602 list_add_tail(&link->slave_node, &subdomain->slave_links); 1603 if (subdomain->status != GPD_STATE_POWER_OFF) 1604 genpd_sd_counter_inc(genpd); 1605 1606 out: 1607 mutex_unlock(&subdomain->lock); 1608 genpd_release_lock(genpd); 1609 1610 return ret; 1611 } 1612 1613 /** 1614 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. 1615 * @master_name: Name of the master PM domain to add the subdomain to. 1616 * @subdomain_name: Name of the subdomain to be added. 1617 */ 1618 int pm_genpd_add_subdomain_names(const char *master_name, 1619 const char *subdomain_name) 1620 { 1621 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; 1622 1623 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) 1624 return -EINVAL; 1625 1626 mutex_lock(&gpd_list_lock); 1627 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1628 if (!master && !strcmp(gpd->name, master_name)) 1629 master = gpd; 1630 1631 if (!subdomain && !strcmp(gpd->name, subdomain_name)) 1632 subdomain = gpd; 1633 1634 if (master && subdomain) 1635 break; 1636 } 1637 mutex_unlock(&gpd_list_lock); 1638 1639 return pm_genpd_add_subdomain(master, subdomain); 1640 } 1641 1642 /** 1643 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1644 * @genpd: Master PM domain to remove the subdomain from. 1645 * @subdomain: Subdomain to be removed. 1646 */ 1647 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1648 struct generic_pm_domain *subdomain) 1649 { 1650 struct gpd_link *link; 1651 int ret = -EINVAL; 1652 1653 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1654 return -EINVAL; 1655 1656 start: 1657 genpd_acquire_lock(genpd); 1658 1659 list_for_each_entry(link, &genpd->master_links, master_node) { 1660 if (link->slave != subdomain) 1661 continue; 1662 1663 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1664 1665 if (subdomain->status != GPD_STATE_POWER_OFF 1666 && subdomain->status != GPD_STATE_ACTIVE) { 1667 mutex_unlock(&subdomain->lock); 1668 genpd_release_lock(genpd); 1669 goto start; 1670 } 1671 1672 list_del(&link->master_node); 1673 list_del(&link->slave_node); 1674 kfree(link); 1675 if (subdomain->status != GPD_STATE_POWER_OFF) 1676 genpd_sd_counter_dec(genpd); 1677 1678 mutex_unlock(&subdomain->lock); 1679 1680 ret = 0; 1681 break; 1682 } 1683 1684 genpd_release_lock(genpd); 1685 1686 return ret; 1687 } 1688 1689 /** 1690 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1691 * @genpd: PM domain to be connected with cpuidle. 1692 * @state: cpuidle state this domain can disable/enable. 1693 * 1694 * Make a PM domain behave as though it contained a CPU core, that is, instead 1695 * of calling its power down routine it will enable the given cpuidle state so 1696 * that the cpuidle subsystem can power it down (if possible and desirable). 1697 */ 1698 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1699 { 1700 struct cpuidle_driver *cpuidle_drv; 1701 struct gpd_cpuidle_data *cpuidle_data; 1702 struct cpuidle_state *idle_state; 1703 int ret = 0; 1704 1705 if (IS_ERR_OR_NULL(genpd) || state < 0) 1706 return -EINVAL; 1707 1708 genpd_acquire_lock(genpd); 1709 1710 if (genpd->cpuidle_data) { 1711 ret = -EEXIST; 1712 goto out; 1713 } 1714 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); 1715 if (!cpuidle_data) { 1716 ret = -ENOMEM; 1717 goto out; 1718 } 1719 cpuidle_drv = cpuidle_driver_ref(); 1720 if (!cpuidle_drv) { 1721 ret = -ENODEV; 1722 goto err_drv; 1723 } 1724 if (cpuidle_drv->state_count <= state) { 1725 ret = -EINVAL; 1726 goto err; 1727 } 1728 idle_state = &cpuidle_drv->states[state]; 1729 if (!idle_state->disabled) { 1730 ret = -EAGAIN; 1731 goto err; 1732 } 1733 cpuidle_data->idle_state = idle_state; 1734 cpuidle_data->saved_exit_latency = idle_state->exit_latency; 1735 genpd->cpuidle_data = cpuidle_data; 1736 genpd_recalc_cpu_exit_latency(genpd); 1737 1738 out: 1739 genpd_release_lock(genpd); 1740 return ret; 1741 1742 err: 1743 cpuidle_driver_unref(); 1744 1745 err_drv: 1746 kfree(cpuidle_data); 1747 goto out; 1748 } 1749 1750 /** 1751 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. 1752 * @name: Name of the domain to connect to cpuidle. 1753 * @state: cpuidle state this domain can manipulate. 1754 */ 1755 int pm_genpd_name_attach_cpuidle(const char *name, int state) 1756 { 1757 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); 1758 } 1759 1760 /** 1761 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. 1762 * @genpd: PM domain to remove the cpuidle connection from. 1763 * 1764 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the 1765 * given PM domain. 1766 */ 1767 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1768 { 1769 struct gpd_cpuidle_data *cpuidle_data; 1770 struct cpuidle_state *idle_state; 1771 int ret = 0; 1772 1773 if (IS_ERR_OR_NULL(genpd)) 1774 return -EINVAL; 1775 1776 genpd_acquire_lock(genpd); 1777 1778 cpuidle_data = genpd->cpuidle_data; 1779 if (!cpuidle_data) { 1780 ret = -ENODEV; 1781 goto out; 1782 } 1783 idle_state = cpuidle_data->idle_state; 1784 if (!idle_state->disabled) { 1785 ret = -EAGAIN; 1786 goto out; 1787 } 1788 idle_state->exit_latency = cpuidle_data->saved_exit_latency; 1789 cpuidle_driver_unref(); 1790 genpd->cpuidle_data = NULL; 1791 kfree(cpuidle_data); 1792 1793 out: 1794 genpd_release_lock(genpd); 1795 return ret; 1796 } 1797 1798 /** 1799 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. 1800 * @name: Name of the domain to disconnect cpuidle from. 1801 */ 1802 int pm_genpd_name_detach_cpuidle(const char *name) 1803 { 1804 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); 1805 } 1806 1807 /* Default device callbacks for generic PM domains. */ 1808 1809 /** 1810 * pm_genpd_default_save_state - Default "save device state" for PM domains. 1811 * @dev: Device to handle. 1812 */ 1813 static int pm_genpd_default_save_state(struct device *dev) 1814 { 1815 int (*cb)(struct device *__dev); 1816 1817 if (dev->type && dev->type->pm) 1818 cb = dev->type->pm->runtime_suspend; 1819 else if (dev->class && dev->class->pm) 1820 cb = dev->class->pm->runtime_suspend; 1821 else if (dev->bus && dev->bus->pm) 1822 cb = dev->bus->pm->runtime_suspend; 1823 else 1824 cb = NULL; 1825 1826 if (!cb && dev->driver && dev->driver->pm) 1827 cb = dev->driver->pm->runtime_suspend; 1828 1829 return cb ? cb(dev) : 0; 1830 } 1831 1832 /** 1833 * pm_genpd_default_restore_state - Default PM domains "restore device state". 1834 * @dev: Device to handle. 1835 */ 1836 static int pm_genpd_default_restore_state(struct device *dev) 1837 { 1838 int (*cb)(struct device *__dev); 1839 1840 if (dev->type && dev->type->pm) 1841 cb = dev->type->pm->runtime_resume; 1842 else if (dev->class && dev->class->pm) 1843 cb = dev->class->pm->runtime_resume; 1844 else if (dev->bus && dev->bus->pm) 1845 cb = dev->bus->pm->runtime_resume; 1846 else 1847 cb = NULL; 1848 1849 if (!cb && dev->driver && dev->driver->pm) 1850 cb = dev->driver->pm->runtime_resume; 1851 1852 return cb ? cb(dev) : 0; 1853 } 1854 1855 /** 1856 * pm_genpd_init - Initialize a generic I/O PM domain object. 1857 * @genpd: PM domain object to initialize. 1858 * @gov: PM domain governor to associate with the domain (may be NULL). 1859 * @is_off: Initial value of the domain's power_is_off field. 1860 */ 1861 void pm_genpd_init(struct generic_pm_domain *genpd, 1862 struct dev_power_governor *gov, bool is_off) 1863 { 1864 if (IS_ERR_OR_NULL(genpd)) 1865 return; 1866 1867 INIT_LIST_HEAD(&genpd->master_links); 1868 INIT_LIST_HEAD(&genpd->slave_links); 1869 INIT_LIST_HEAD(&genpd->dev_list); 1870 mutex_init(&genpd->lock); 1871 genpd->gov = gov; 1872 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 1873 genpd->in_progress = 0; 1874 atomic_set(&genpd->sd_count, 0); 1875 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 1876 init_waitqueue_head(&genpd->status_wait_queue); 1877 genpd->poweroff_task = NULL; 1878 genpd->resume_count = 0; 1879 genpd->device_count = 0; 1880 genpd->max_off_time_ns = -1; 1881 genpd->max_off_time_changed = true; 1882 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1883 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1884 genpd->domain.ops.prepare = pm_genpd_prepare; 1885 genpd->domain.ops.suspend = pm_genpd_suspend; 1886 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 1887 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1888 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1889 genpd->domain.ops.resume_early = pm_genpd_resume_early; 1890 genpd->domain.ops.resume = pm_genpd_resume; 1891 genpd->domain.ops.freeze = pm_genpd_freeze; 1892 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 1893 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1894 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1895 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 1896 genpd->domain.ops.thaw = pm_genpd_thaw; 1897 genpd->domain.ops.poweroff = pm_genpd_suspend; 1898 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 1899 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1900 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1901 genpd->domain.ops.restore_early = pm_genpd_resume_early; 1902 genpd->domain.ops.restore = pm_genpd_resume; 1903 genpd->domain.ops.complete = pm_genpd_complete; 1904 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1905 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 1906 mutex_lock(&gpd_list_lock); 1907 list_add(&genpd->gpd_list_node, &gpd_list); 1908 mutex_unlock(&gpd_list_lock); 1909 } 1910 1911 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 1912 /* 1913 * Device Tree based PM domain providers. 1914 * 1915 * The code below implements generic device tree based PM domain providers that 1916 * bind device tree nodes with generic PM domains registered in the system. 1917 * 1918 * Any driver that registers generic PM domains and needs to support binding of 1919 * devices to these domains is supposed to register a PM domain provider, which 1920 * maps a PM domain specifier retrieved from the device tree to a PM domain. 1921 * 1922 * Two simple mapping functions have been provided for convenience: 1923 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 1924 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by 1925 * index. 1926 */ 1927 1928 /** 1929 * struct of_genpd_provider - PM domain provider registration structure 1930 * @link: Entry in global list of PM domain providers 1931 * @node: Pointer to device tree node of PM domain provider 1932 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 1933 * into a PM domain. 1934 * @data: context pointer to be passed into @xlate callback 1935 */ 1936 struct of_genpd_provider { 1937 struct list_head link; 1938 struct device_node *node; 1939 genpd_xlate_t xlate; 1940 void *data; 1941 }; 1942 1943 /* List of registered PM domain providers. */ 1944 static LIST_HEAD(of_genpd_providers); 1945 /* Mutex to protect the list above. */ 1946 static DEFINE_MUTEX(of_genpd_mutex); 1947 1948 /** 1949 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping 1950 * @genpdspec: OF phandle args to map into a PM domain 1951 * @data: xlate function private data - pointer to struct generic_pm_domain 1952 * 1953 * This is a generic xlate function that can be used to model PM domains that 1954 * have their own device tree nodes. The private data of xlate function needs 1955 * to be a valid pointer to struct generic_pm_domain. 1956 */ 1957 struct generic_pm_domain *__of_genpd_xlate_simple( 1958 struct of_phandle_args *genpdspec, 1959 void *data) 1960 { 1961 if (genpdspec->args_count != 0) 1962 return ERR_PTR(-EINVAL); 1963 return data; 1964 } 1965 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); 1966 1967 /** 1968 * __of_genpd_xlate_onecell() - Xlate function using a single index. 1969 * @genpdspec: OF phandle args to map into a PM domain 1970 * @data: xlate function private data - pointer to struct genpd_onecell_data 1971 * 1972 * This is a generic xlate function that can be used to model simple PM domain 1973 * controllers that have one device tree node and provide multiple PM domains. 1974 * A single cell is used as an index into an array of PM domains specified in 1975 * the genpd_onecell_data struct when registering the provider. 1976 */ 1977 struct generic_pm_domain *__of_genpd_xlate_onecell( 1978 struct of_phandle_args *genpdspec, 1979 void *data) 1980 { 1981 struct genpd_onecell_data *genpd_data = data; 1982 unsigned int idx = genpdspec->args[0]; 1983 1984 if (genpdspec->args_count != 1) 1985 return ERR_PTR(-EINVAL); 1986 1987 if (idx >= genpd_data->num_domains) { 1988 pr_err("%s: invalid domain index %u\n", __func__, idx); 1989 return ERR_PTR(-EINVAL); 1990 } 1991 1992 if (!genpd_data->domains[idx]) 1993 return ERR_PTR(-ENOENT); 1994 1995 return genpd_data->domains[idx]; 1996 } 1997 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); 1998 1999 /** 2000 * __of_genpd_add_provider() - Register a PM domain provider for a node 2001 * @np: Device node pointer associated with the PM domain provider. 2002 * @xlate: Callback for decoding PM domain from phandle arguments. 2003 * @data: Context pointer for @xlate callback. 2004 */ 2005 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2006 void *data) 2007 { 2008 struct of_genpd_provider *cp; 2009 2010 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2011 if (!cp) 2012 return -ENOMEM; 2013 2014 cp->node = of_node_get(np); 2015 cp->data = data; 2016 cp->xlate = xlate; 2017 2018 mutex_lock(&of_genpd_mutex); 2019 list_add(&cp->link, &of_genpd_providers); 2020 mutex_unlock(&of_genpd_mutex); 2021 pr_debug("Added domain provider from %s\n", np->full_name); 2022 2023 return 0; 2024 } 2025 EXPORT_SYMBOL_GPL(__of_genpd_add_provider); 2026 2027 /** 2028 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2029 * @np: Device node pointer associated with the PM domain provider 2030 */ 2031 void of_genpd_del_provider(struct device_node *np) 2032 { 2033 struct of_genpd_provider *cp; 2034 2035 mutex_lock(&of_genpd_mutex); 2036 list_for_each_entry(cp, &of_genpd_providers, link) { 2037 if (cp->node == np) { 2038 list_del(&cp->link); 2039 of_node_put(cp->node); 2040 kfree(cp); 2041 break; 2042 } 2043 } 2044 mutex_unlock(&of_genpd_mutex); 2045 } 2046 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2047 2048 /** 2049 * of_genpd_get_from_provider() - Look-up PM domain 2050 * @genpdspec: OF phandle args to use for look-up 2051 * 2052 * Looks for a PM domain provider under the node specified by @genpdspec and if 2053 * found, uses xlate function of the provider to map phandle args to a PM 2054 * domain. 2055 * 2056 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2057 * on failure. 2058 */ 2059 static struct generic_pm_domain *of_genpd_get_from_provider( 2060 struct of_phandle_args *genpdspec) 2061 { 2062 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2063 struct of_genpd_provider *provider; 2064 2065 mutex_lock(&of_genpd_mutex); 2066 2067 /* Check if we have such a provider in our array */ 2068 list_for_each_entry(provider, &of_genpd_providers, link) { 2069 if (provider->node == genpdspec->np) 2070 genpd = provider->xlate(genpdspec, provider->data); 2071 if (!IS_ERR(genpd)) 2072 break; 2073 } 2074 2075 mutex_unlock(&of_genpd_mutex); 2076 2077 return genpd; 2078 } 2079 2080 /** 2081 * genpd_dev_pm_detach - Detach a device from its PM domain. 2082 * @dev: Device to attach. 2083 * @power_off: Currently not used 2084 * 2085 * Try to locate a corresponding generic PM domain, which the device was 2086 * attached to previously. If such is found, the device is detached from it. 2087 */ 2088 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2089 { 2090 struct generic_pm_domain *pd = NULL, *gpd; 2091 int ret = 0; 2092 2093 if (!dev->pm_domain) 2094 return; 2095 2096 mutex_lock(&gpd_list_lock); 2097 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2098 if (&gpd->domain == dev->pm_domain) { 2099 pd = gpd; 2100 break; 2101 } 2102 } 2103 mutex_unlock(&gpd_list_lock); 2104 2105 if (!pd) 2106 return; 2107 2108 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2109 2110 while (1) { 2111 ret = pm_genpd_remove_device(pd, dev); 2112 if (ret != -EAGAIN) 2113 break; 2114 cond_resched(); 2115 } 2116 2117 if (ret < 0) { 2118 dev_err(dev, "failed to remove from PM domain %s: %d", 2119 pd->name, ret); 2120 return; 2121 } 2122 2123 /* Check if PM domain can be powered off after removing this device. */ 2124 genpd_queue_power_off_work(pd); 2125 } 2126 2127 /** 2128 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2129 * @dev: Device to attach. 2130 * 2131 * Parse device's OF node to find a PM domain specifier. If such is found, 2132 * attaches the device to retrieved pm_domain ops. 2133 * 2134 * Both generic and legacy Samsung-specific DT bindings are supported to keep 2135 * backwards compatibility with existing DTBs. 2136 * 2137 * Returns 0 on successfully attached PM domain or negative error code. 2138 */ 2139 int genpd_dev_pm_attach(struct device *dev) 2140 { 2141 struct of_phandle_args pd_args; 2142 struct generic_pm_domain *pd; 2143 int ret; 2144 2145 if (!dev->of_node) 2146 return -ENODEV; 2147 2148 if (dev->pm_domain) 2149 return -EEXIST; 2150 2151 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2152 "#power-domain-cells", 0, &pd_args); 2153 if (ret < 0) { 2154 if (ret != -ENOENT) 2155 return ret; 2156 2157 /* 2158 * Try legacy Samsung-specific bindings 2159 * (for backwards compatibility of DT ABI) 2160 */ 2161 pd_args.args_count = 0; 2162 pd_args.np = of_parse_phandle(dev->of_node, 2163 "samsung,power-domain", 0); 2164 if (!pd_args.np) 2165 return -ENOENT; 2166 } 2167 2168 pd = of_genpd_get_from_provider(&pd_args); 2169 if (IS_ERR(pd)) { 2170 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2171 __func__, PTR_ERR(pd)); 2172 of_node_put(dev->of_node); 2173 return PTR_ERR(pd); 2174 } 2175 2176 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2177 2178 while (1) { 2179 ret = pm_genpd_add_device(pd, dev); 2180 if (ret != -EAGAIN) 2181 break; 2182 cond_resched(); 2183 } 2184 2185 if (ret < 0) { 2186 dev_err(dev, "failed to add to PM domain %s: %d", 2187 pd->name, ret); 2188 of_node_put(dev->of_node); 2189 return ret; 2190 } 2191 2192 dev->pm_domain->detach = genpd_dev_pm_detach; 2193 2194 return 0; 2195 } 2196 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2197 #endif 2198 2199 2200 /*** debugfs support ***/ 2201 2202 #ifdef CONFIG_PM_ADVANCED_DEBUG 2203 #include <linux/pm.h> 2204 #include <linux/device.h> 2205 #include <linux/debugfs.h> 2206 #include <linux/seq_file.h> 2207 #include <linux/init.h> 2208 #include <linux/kobject.h> 2209 static struct dentry *pm_genpd_debugfs_dir; 2210 2211 /* 2212 * TODO: This function is a slightly modified version of rtpm_status_show 2213 * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME 2214 * are too loose to generalize it. 2215 */ 2216 #ifdef CONFIG_PM_RUNTIME 2217 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2218 { 2219 static const char * const status_lookup[] = { 2220 [RPM_ACTIVE] = "active", 2221 [RPM_RESUMING] = "resuming", 2222 [RPM_SUSPENDED] = "suspended", 2223 [RPM_SUSPENDING] = "suspending" 2224 }; 2225 const char *p = ""; 2226 2227 if (dev->power.runtime_error) 2228 p = "error"; 2229 else if (dev->power.disable_depth) 2230 p = "unsupported"; 2231 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 2232 p = status_lookup[dev->power.runtime_status]; 2233 else 2234 WARN_ON(1); 2235 2236 seq_puts(s, p); 2237 } 2238 #else 2239 static void rtpm_status_str(struct seq_file *s, struct device *dev) 2240 { 2241 seq_puts(s, "active"); 2242 } 2243 #endif 2244 2245 static int pm_genpd_summary_one(struct seq_file *s, 2246 struct generic_pm_domain *gpd) 2247 { 2248 static const char * const status_lookup[] = { 2249 [GPD_STATE_ACTIVE] = "on", 2250 [GPD_STATE_WAIT_MASTER] = "wait-master", 2251 [GPD_STATE_BUSY] = "busy", 2252 [GPD_STATE_REPEAT] = "off-in-progress", 2253 [GPD_STATE_POWER_OFF] = "off" 2254 }; 2255 struct pm_domain_data *pm_data; 2256 const char *kobj_path; 2257 struct gpd_link *link; 2258 int ret; 2259 2260 ret = mutex_lock_interruptible(&gpd->lock); 2261 if (ret) 2262 return -ERESTARTSYS; 2263 2264 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) 2265 goto exit; 2266 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); 2267 2268 /* 2269 * Modifications on the list require holding locks on both 2270 * master and slave, so we are safe. 2271 * Also gpd->name is immutable. 2272 */ 2273 list_for_each_entry(link, &gpd->master_links, master_node) { 2274 seq_printf(s, "%s", link->slave->name); 2275 if (!list_is_last(&link->master_node, &gpd->master_links)) 2276 seq_puts(s, ", "); 2277 } 2278 2279 list_for_each_entry(pm_data, &gpd->dev_list, list_node) { 2280 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); 2281 if (kobj_path == NULL) 2282 continue; 2283 2284 seq_printf(s, "\n %-50s ", kobj_path); 2285 rtpm_status_str(s, pm_data->dev); 2286 kfree(kobj_path); 2287 } 2288 2289 seq_puts(s, "\n"); 2290 exit: 2291 mutex_unlock(&gpd->lock); 2292 2293 return 0; 2294 } 2295 2296 static int pm_genpd_summary_show(struct seq_file *s, void *data) 2297 { 2298 struct generic_pm_domain *gpd; 2299 int ret = 0; 2300 2301 seq_puts(s, " domain status slaves\n"); 2302 seq_puts(s, " /device runtime status\n"); 2303 seq_puts(s, "----------------------------------------------------------------------\n"); 2304 2305 ret = mutex_lock_interruptible(&gpd_list_lock); 2306 if (ret) 2307 return -ERESTARTSYS; 2308 2309 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2310 ret = pm_genpd_summary_one(s, gpd); 2311 if (ret) 2312 break; 2313 } 2314 mutex_unlock(&gpd_list_lock); 2315 2316 return ret; 2317 } 2318 2319 static int pm_genpd_summary_open(struct inode *inode, struct file *file) 2320 { 2321 return single_open(file, pm_genpd_summary_show, NULL); 2322 } 2323 2324 static const struct file_operations pm_genpd_summary_fops = { 2325 .open = pm_genpd_summary_open, 2326 .read = seq_read, 2327 .llseek = seq_lseek, 2328 .release = single_release, 2329 }; 2330 2331 static int __init pm_genpd_debug_init(void) 2332 { 2333 struct dentry *d; 2334 2335 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 2336 2337 if (!pm_genpd_debugfs_dir) 2338 return -ENOMEM; 2339 2340 d = debugfs_create_file("pm_genpd_summary", S_IRUGO, 2341 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); 2342 if (!d) 2343 return -ENOMEM; 2344 2345 return 0; 2346 } 2347 late_initcall(pm_genpd_debug_init); 2348 2349 static void __exit pm_genpd_debug_exit(void) 2350 { 2351 debugfs_remove_recursive(pm_genpd_debugfs_dir); 2352 } 2353 __exitcall(pm_genpd_debug_exit); 2354 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 2355