1 /* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9 #include <linux/init.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_domain.h> 14 #include <linux/pm_qos.h> 15 #include <linux/slab.h> 16 #include <linux/err.h> 17 #include <linux/sched.h> 18 #include <linux/suspend.h> 19 #include <linux/export.h> 20 21 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 22 ({ \ 23 type (*__routine)(struct device *__d); \ 24 type __ret = (type)0; \ 25 \ 26 __routine = genpd->dev_ops.callback; \ 27 if (__routine) { \ 28 __ret = __routine(dev); \ 29 } else { \ 30 __routine = dev_gpd_data(dev)->ops.callback; \ 31 if (__routine) \ 32 __ret = __routine(dev); \ 33 } \ 34 __ret; \ 35 }) 36 37 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ 38 ({ \ 39 ktime_t __start = ktime_get(); \ 40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 43 if (!__retval && __elapsed > __td->field) { \ 44 __td->field = __elapsed; \ 45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ 46 __elapsed); \ 47 genpd->max_off_time_changed = true; \ 48 __td->constraint_changed = true; \ 49 } \ 50 __retval; \ 51 }) 52 53 static LIST_HEAD(gpd_list); 54 static DEFINE_MUTEX(gpd_list_lock); 55 56 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) 57 { 58 struct generic_pm_domain *genpd = NULL, *gpd; 59 60 if (IS_ERR_OR_NULL(domain_name)) 61 return NULL; 62 63 mutex_lock(&gpd_list_lock); 64 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 65 if (!strcmp(gpd->name, domain_name)) { 66 genpd = gpd; 67 break; 68 } 69 } 70 mutex_unlock(&gpd_list_lock); 71 return genpd; 72 } 73 74 #ifdef CONFIG_PM 75 76 struct generic_pm_domain *dev_to_genpd(struct device *dev) 77 { 78 if (IS_ERR_OR_NULL(dev->pm_domain)) 79 return ERR_PTR(-EINVAL); 80 81 return pd_to_genpd(dev->pm_domain); 82 } 83 84 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 85 { 86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, 87 stop_latency_ns, "stop"); 88 } 89 90 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 91 { 92 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 93 start_latency_ns, "start"); 94 } 95 96 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 97 { 98 bool ret = false; 99 100 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 101 ret = !!atomic_dec_and_test(&genpd->sd_count); 102 103 return ret; 104 } 105 106 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 107 { 108 atomic_inc(&genpd->sd_count); 109 smp_mb__after_atomic_inc(); 110 } 111 112 static void genpd_acquire_lock(struct generic_pm_domain *genpd) 113 { 114 DEFINE_WAIT(wait); 115 116 mutex_lock(&genpd->lock); 117 /* 118 * Wait for the domain to transition into either the active, 119 * or the power off state. 120 */ 121 for (;;) { 122 prepare_to_wait(&genpd->status_wait_queue, &wait, 123 TASK_UNINTERRUPTIBLE); 124 if (genpd->status == GPD_STATE_ACTIVE 125 || genpd->status == GPD_STATE_POWER_OFF) 126 break; 127 mutex_unlock(&genpd->lock); 128 129 schedule(); 130 131 mutex_lock(&genpd->lock); 132 } 133 finish_wait(&genpd->status_wait_queue, &wait); 134 } 135 136 static void genpd_release_lock(struct generic_pm_domain *genpd) 137 { 138 mutex_unlock(&genpd->lock); 139 } 140 141 static void genpd_set_active(struct generic_pm_domain *genpd) 142 { 143 if (genpd->resume_count == 0) 144 genpd->status = GPD_STATE_ACTIVE; 145 } 146 147 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 148 { 149 s64 usecs64; 150 151 if (!genpd->cpu_data) 152 return; 153 154 usecs64 = genpd->power_on_latency_ns; 155 do_div(usecs64, NSEC_PER_USEC); 156 usecs64 += genpd->cpu_data->saved_exit_latency; 157 genpd->cpu_data->idle_state->exit_latency = usecs64; 158 } 159 160 /** 161 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 162 * @genpd: PM domain to power up. 163 * 164 * Restore power to @genpd and all of its masters so that it is possible to 165 * resume a device belonging to it. 166 */ 167 static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 168 __releases(&genpd->lock) __acquires(&genpd->lock) 169 { 170 struct gpd_link *link; 171 DEFINE_WAIT(wait); 172 int ret = 0; 173 174 /* If the domain's master is being waited for, we have to wait too. */ 175 for (;;) { 176 prepare_to_wait(&genpd->status_wait_queue, &wait, 177 TASK_UNINTERRUPTIBLE); 178 if (genpd->status != GPD_STATE_WAIT_MASTER) 179 break; 180 mutex_unlock(&genpd->lock); 181 182 schedule(); 183 184 mutex_lock(&genpd->lock); 185 } 186 finish_wait(&genpd->status_wait_queue, &wait); 187 188 if (genpd->status == GPD_STATE_ACTIVE 189 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 190 return 0; 191 192 if (genpd->status != GPD_STATE_POWER_OFF) { 193 genpd_set_active(genpd); 194 return 0; 195 } 196 197 if (genpd->cpu_data) { 198 cpuidle_pause_and_lock(); 199 genpd->cpu_data->idle_state->disabled = true; 200 cpuidle_resume_and_unlock(); 201 goto out; 202 } 203 204 /* 205 * The list is guaranteed not to change while the loop below is being 206 * executed, unless one of the masters' .power_on() callbacks fiddles 207 * with it. 208 */ 209 list_for_each_entry(link, &genpd->slave_links, slave_node) { 210 genpd_sd_counter_inc(link->master); 211 genpd->status = GPD_STATE_WAIT_MASTER; 212 213 mutex_unlock(&genpd->lock); 214 215 ret = pm_genpd_poweron(link->master); 216 217 mutex_lock(&genpd->lock); 218 219 /* 220 * The "wait for parent" status is guaranteed not to change 221 * while the master is powering on. 222 */ 223 genpd->status = GPD_STATE_POWER_OFF; 224 wake_up_all(&genpd->status_wait_queue); 225 if (ret) { 226 genpd_sd_counter_dec(link->master); 227 goto err; 228 } 229 } 230 231 if (genpd->power_on) { 232 ktime_t time_start = ktime_get(); 233 s64 elapsed_ns; 234 235 ret = genpd->power_on(genpd); 236 if (ret) 237 goto err; 238 239 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 240 if (elapsed_ns > genpd->power_on_latency_ns) { 241 genpd->power_on_latency_ns = elapsed_ns; 242 genpd->max_off_time_changed = true; 243 genpd_recalc_cpu_exit_latency(genpd); 244 if (genpd->name) 245 pr_warning("%s: Power-on latency exceeded, " 246 "new value %lld ns\n", genpd->name, 247 elapsed_ns); 248 } 249 } 250 251 out: 252 genpd_set_active(genpd); 253 254 return 0; 255 256 err: 257 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 258 genpd_sd_counter_dec(link->master); 259 260 return ret; 261 } 262 263 /** 264 * pm_genpd_poweron - Restore power to a given PM domain and its masters. 265 * @genpd: PM domain to power up. 266 */ 267 int pm_genpd_poweron(struct generic_pm_domain *genpd) 268 { 269 int ret; 270 271 mutex_lock(&genpd->lock); 272 ret = __pm_genpd_poweron(genpd); 273 mutex_unlock(&genpd->lock); 274 return ret; 275 } 276 277 /** 278 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. 279 * @domain_name: Name of the PM domain to power up. 280 */ 281 int pm_genpd_name_poweron(const char *domain_name) 282 { 283 struct generic_pm_domain *genpd; 284 285 genpd = pm_genpd_lookup_name(domain_name); 286 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 287 } 288 289 #endif /* CONFIG_PM */ 290 291 #ifdef CONFIG_PM_RUNTIME 292 293 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 294 struct device *dev) 295 { 296 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 297 } 298 299 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 300 { 301 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 302 save_state_latency_ns, "state save"); 303 } 304 305 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) 306 { 307 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 308 restore_state_latency_ns, 309 "state restore"); 310 } 311 312 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 313 unsigned long val, void *ptr) 314 { 315 struct generic_pm_domain_data *gpd_data; 316 struct device *dev; 317 318 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 319 320 mutex_lock(&gpd_data->lock); 321 dev = gpd_data->base.dev; 322 if (!dev) { 323 mutex_unlock(&gpd_data->lock); 324 return NOTIFY_DONE; 325 } 326 mutex_unlock(&gpd_data->lock); 327 328 for (;;) { 329 struct generic_pm_domain *genpd; 330 struct pm_domain_data *pdd; 331 332 spin_lock_irq(&dev->power.lock); 333 334 pdd = dev->power.subsys_data ? 335 dev->power.subsys_data->domain_data : NULL; 336 if (pdd && pdd->dev) { 337 to_gpd_data(pdd)->td.constraint_changed = true; 338 genpd = dev_to_genpd(dev); 339 } else { 340 genpd = ERR_PTR(-ENODATA); 341 } 342 343 spin_unlock_irq(&dev->power.lock); 344 345 if (!IS_ERR(genpd)) { 346 mutex_lock(&genpd->lock); 347 genpd->max_off_time_changed = true; 348 mutex_unlock(&genpd->lock); 349 } 350 351 dev = dev->parent; 352 if (!dev || dev->power.ignore_children) 353 break; 354 } 355 356 return NOTIFY_DONE; 357 } 358 359 /** 360 * __pm_genpd_save_device - Save the pre-suspend state of a device. 361 * @pdd: Domain data of the device to save the state of. 362 * @genpd: PM domain the device belongs to. 363 */ 364 static int __pm_genpd_save_device(struct pm_domain_data *pdd, 365 struct generic_pm_domain *genpd) 366 __releases(&genpd->lock) __acquires(&genpd->lock) 367 { 368 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 369 struct device *dev = pdd->dev; 370 int ret = 0; 371 372 if (gpd_data->need_restore) 373 return 0; 374 375 mutex_unlock(&genpd->lock); 376 377 genpd_start_dev(genpd, dev); 378 ret = genpd_save_dev(genpd, dev); 379 genpd_stop_dev(genpd, dev); 380 381 mutex_lock(&genpd->lock); 382 383 if (!ret) 384 gpd_data->need_restore = true; 385 386 return ret; 387 } 388 389 /** 390 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 391 * @pdd: Domain data of the device to restore the state of. 392 * @genpd: PM domain the device belongs to. 393 */ 394 static void __pm_genpd_restore_device(struct pm_domain_data *pdd, 395 struct generic_pm_domain *genpd) 396 __releases(&genpd->lock) __acquires(&genpd->lock) 397 { 398 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 399 struct device *dev = pdd->dev; 400 bool need_restore = gpd_data->need_restore; 401 402 gpd_data->need_restore = false; 403 mutex_unlock(&genpd->lock); 404 405 genpd_start_dev(genpd, dev); 406 if (need_restore) 407 genpd_restore_dev(genpd, dev); 408 409 mutex_lock(&genpd->lock); 410 } 411 412 /** 413 * genpd_abort_poweroff - Check if a PM domain power off should be aborted. 414 * @genpd: PM domain to check. 415 * 416 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during 417 * a "power off" operation, which means that a "power on" has occured in the 418 * meantime, or if its resume_count field is different from zero, which means 419 * that one of its devices has been resumed in the meantime. 420 */ 421 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 422 { 423 return genpd->status == GPD_STATE_WAIT_MASTER 424 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 425 } 426 427 /** 428 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 429 * @genpd: PM domait to power off. 430 * 431 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 432 * before. 433 */ 434 void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 435 { 436 if (!work_pending(&genpd->power_off_work)) 437 queue_work(pm_wq, &genpd->power_off_work); 438 } 439 440 /** 441 * pm_genpd_poweroff - Remove power from a given PM domain. 442 * @genpd: PM domain to power down. 443 * 444 * If all of the @genpd's devices have been suspended and all of its subdomains 445 * have been powered down, run the runtime suspend callbacks provided by all of 446 * the @genpd's devices' drivers and remove power from @genpd. 447 */ 448 static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 449 __releases(&genpd->lock) __acquires(&genpd->lock) 450 { 451 struct pm_domain_data *pdd; 452 struct gpd_link *link; 453 unsigned int not_suspended; 454 int ret = 0; 455 456 start: 457 /* 458 * Do not try to power off the domain in the following situations: 459 * (1) The domain is already in the "power off" state. 460 * (2) The domain is waiting for its master to power up. 461 * (3) One of the domain's devices is being resumed right now. 462 * (4) System suspend is in progress. 463 */ 464 if (genpd->status == GPD_STATE_POWER_OFF 465 || genpd->status == GPD_STATE_WAIT_MASTER 466 || genpd->resume_count > 0 || genpd->prepared_count > 0) 467 return 0; 468 469 if (atomic_read(&genpd->sd_count) > 0) 470 return -EBUSY; 471 472 not_suspended = 0; 473 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 474 enum pm_qos_flags_status stat; 475 476 stat = dev_pm_qos_flags(pdd->dev, 477 PM_QOS_FLAG_NO_POWER_OFF 478 | PM_QOS_FLAG_REMOTE_WAKEUP); 479 if (stat > PM_QOS_FLAGS_NONE) 480 return -EBUSY; 481 482 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 483 || pdd->dev->power.irq_safe)) 484 not_suspended++; 485 } 486 487 if (not_suspended > genpd->in_progress) 488 return -EBUSY; 489 490 if (genpd->poweroff_task) { 491 /* 492 * Another instance of pm_genpd_poweroff() is executing 493 * callbacks, so tell it to start over and return. 494 */ 495 genpd->status = GPD_STATE_REPEAT; 496 return 0; 497 } 498 499 if (genpd->gov && genpd->gov->power_down_ok) { 500 if (!genpd->gov->power_down_ok(&genpd->domain)) 501 return -EAGAIN; 502 } 503 504 genpd->status = GPD_STATE_BUSY; 505 genpd->poweroff_task = current; 506 507 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { 508 ret = atomic_read(&genpd->sd_count) == 0 ? 509 __pm_genpd_save_device(pdd, genpd) : -EBUSY; 510 511 if (genpd_abort_poweroff(genpd)) 512 goto out; 513 514 if (ret) { 515 genpd_set_active(genpd); 516 goto out; 517 } 518 519 if (genpd->status == GPD_STATE_REPEAT) { 520 genpd->poweroff_task = NULL; 521 goto start; 522 } 523 } 524 525 if (genpd->cpu_data) { 526 /* 527 * If cpu_data is set, cpuidle should turn the domain off when 528 * the CPU in it is idle. In that case we don't decrement the 529 * subdomain counts of the master domains, so that power is not 530 * removed from the current domain prematurely as a result of 531 * cutting off the masters' power. 532 */ 533 genpd->status = GPD_STATE_POWER_OFF; 534 cpuidle_pause_and_lock(); 535 genpd->cpu_data->idle_state->disabled = false; 536 cpuidle_resume_and_unlock(); 537 goto out; 538 } 539 540 if (genpd->power_off) { 541 ktime_t time_start; 542 s64 elapsed_ns; 543 544 if (atomic_read(&genpd->sd_count) > 0) { 545 ret = -EBUSY; 546 goto out; 547 } 548 549 time_start = ktime_get(); 550 551 /* 552 * If sd_count > 0 at this point, one of the subdomains hasn't 553 * managed to call pm_genpd_poweron() for the master yet after 554 * incrementing it. In that case pm_genpd_poweron() will wait 555 * for us to drop the lock, so we can call .power_off() and let 556 * the pm_genpd_poweron() restore power for us (this shouldn't 557 * happen very often). 558 */ 559 ret = genpd->power_off(genpd); 560 if (ret == -EBUSY) { 561 genpd_set_active(genpd); 562 goto out; 563 } 564 565 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 566 if (elapsed_ns > genpd->power_off_latency_ns) { 567 genpd->power_off_latency_ns = elapsed_ns; 568 genpd->max_off_time_changed = true; 569 if (genpd->name) 570 pr_warning("%s: Power-off latency exceeded, " 571 "new value %lld ns\n", genpd->name, 572 elapsed_ns); 573 } 574 } 575 576 genpd->status = GPD_STATE_POWER_OFF; 577 578 list_for_each_entry(link, &genpd->slave_links, slave_node) { 579 genpd_sd_counter_dec(link->master); 580 genpd_queue_power_off_work(link->master); 581 } 582 583 out: 584 genpd->poweroff_task = NULL; 585 wake_up_all(&genpd->status_wait_queue); 586 return ret; 587 } 588 589 /** 590 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 591 * @work: Work structure used for scheduling the execution of this function. 592 */ 593 static void genpd_power_off_work_fn(struct work_struct *work) 594 { 595 struct generic_pm_domain *genpd; 596 597 genpd = container_of(work, struct generic_pm_domain, power_off_work); 598 599 genpd_acquire_lock(genpd); 600 pm_genpd_poweroff(genpd); 601 genpd_release_lock(genpd); 602 } 603 604 /** 605 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 606 * @dev: Device to suspend. 607 * 608 * Carry out a runtime suspend of a device under the assumption that its 609 * pm_domain field points to the domain member of an object of type 610 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 611 */ 612 static int pm_genpd_runtime_suspend(struct device *dev) 613 { 614 struct generic_pm_domain *genpd; 615 bool (*stop_ok)(struct device *__dev); 616 int ret; 617 618 dev_dbg(dev, "%s()\n", __func__); 619 620 genpd = dev_to_genpd(dev); 621 if (IS_ERR(genpd)) 622 return -EINVAL; 623 624 might_sleep_if(!genpd->dev_irq_safe); 625 626 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 627 if (stop_ok && !stop_ok(dev)) 628 return -EBUSY; 629 630 ret = genpd_stop_dev(genpd, dev); 631 if (ret) 632 return ret; 633 634 /* 635 * If power.irq_safe is set, this routine will be run with interrupts 636 * off, so it can't use mutexes. 637 */ 638 if (dev->power.irq_safe) 639 return 0; 640 641 mutex_lock(&genpd->lock); 642 genpd->in_progress++; 643 pm_genpd_poweroff(genpd); 644 genpd->in_progress--; 645 mutex_unlock(&genpd->lock); 646 647 return 0; 648 } 649 650 /** 651 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 652 * @dev: Device to resume. 653 * 654 * Carry out a runtime resume of a device under the assumption that its 655 * pm_domain field points to the domain member of an object of type 656 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 657 */ 658 static int pm_genpd_runtime_resume(struct device *dev) 659 { 660 struct generic_pm_domain *genpd; 661 DEFINE_WAIT(wait); 662 int ret; 663 664 dev_dbg(dev, "%s()\n", __func__); 665 666 genpd = dev_to_genpd(dev); 667 if (IS_ERR(genpd)) 668 return -EINVAL; 669 670 might_sleep_if(!genpd->dev_irq_safe); 671 672 /* If power.irq_safe, the PM domain is never powered off. */ 673 if (dev->power.irq_safe) 674 return genpd_start_dev_no_timing(genpd, dev); 675 676 mutex_lock(&genpd->lock); 677 ret = __pm_genpd_poweron(genpd); 678 if (ret) { 679 mutex_unlock(&genpd->lock); 680 return ret; 681 } 682 genpd->status = GPD_STATE_BUSY; 683 genpd->resume_count++; 684 for (;;) { 685 prepare_to_wait(&genpd->status_wait_queue, &wait, 686 TASK_UNINTERRUPTIBLE); 687 /* 688 * If current is the powering off task, we have been called 689 * reentrantly from one of the device callbacks, so we should 690 * not wait. 691 */ 692 if (!genpd->poweroff_task || genpd->poweroff_task == current) 693 break; 694 mutex_unlock(&genpd->lock); 695 696 schedule(); 697 698 mutex_lock(&genpd->lock); 699 } 700 finish_wait(&genpd->status_wait_queue, &wait); 701 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); 702 genpd->resume_count--; 703 genpd_set_active(genpd); 704 wake_up_all(&genpd->status_wait_queue); 705 mutex_unlock(&genpd->lock); 706 707 return 0; 708 } 709 710 /** 711 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. 712 */ 713 void pm_genpd_poweroff_unused(void) 714 { 715 struct generic_pm_domain *genpd; 716 717 mutex_lock(&gpd_list_lock); 718 719 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 720 genpd_queue_power_off_work(genpd); 721 722 mutex_unlock(&gpd_list_lock); 723 } 724 725 #else 726 727 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 728 unsigned long val, void *ptr) 729 { 730 return NOTIFY_DONE; 731 } 732 733 static inline void genpd_power_off_work_fn(struct work_struct *work) {} 734 735 #define pm_genpd_runtime_suspend NULL 736 #define pm_genpd_runtime_resume NULL 737 738 #endif /* CONFIG_PM_RUNTIME */ 739 740 #ifdef CONFIG_PM_SLEEP 741 742 /** 743 * pm_genpd_present - Check if the given PM domain has been initialized. 744 * @genpd: PM domain to check. 745 */ 746 static bool pm_genpd_present(struct generic_pm_domain *genpd) 747 { 748 struct generic_pm_domain *gpd; 749 750 if (IS_ERR_OR_NULL(genpd)) 751 return false; 752 753 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 754 if (gpd == genpd) 755 return true; 756 757 return false; 758 } 759 760 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 761 struct device *dev) 762 { 763 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 764 } 765 766 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) 767 { 768 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); 769 } 770 771 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) 772 { 773 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); 774 } 775 776 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) 777 { 778 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); 779 } 780 781 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) 782 { 783 return GENPD_DEV_CALLBACK(genpd, int, resume, dev); 784 } 785 786 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) 787 { 788 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); 789 } 790 791 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) 792 { 793 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); 794 } 795 796 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) 797 { 798 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); 799 } 800 801 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) 802 { 803 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); 804 } 805 806 /** 807 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 808 * @genpd: PM domain to power off, if possible. 809 * 810 * Check if the given PM domain can be powered off (during system suspend or 811 * hibernation) and do that if so. Also, in that case propagate to its masters. 812 * 813 * This function is only called in "noirq" and "syscore" stages of system power 814 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 815 * executed sequentially, so it is guaranteed that it will never run twice in 816 * parallel). 817 */ 818 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 819 { 820 struct gpd_link *link; 821 822 if (genpd->status == GPD_STATE_POWER_OFF) 823 return; 824 825 if (genpd->suspended_count != genpd->device_count 826 || atomic_read(&genpd->sd_count) > 0) 827 return; 828 829 if (genpd->power_off) 830 genpd->power_off(genpd); 831 832 genpd->status = GPD_STATE_POWER_OFF; 833 834 list_for_each_entry(link, &genpd->slave_links, slave_node) { 835 genpd_sd_counter_dec(link->master); 836 pm_genpd_sync_poweroff(link->master); 837 } 838 } 839 840 /** 841 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 842 * @genpd: PM domain to power on. 843 * 844 * This function is only called in "noirq" and "syscore" stages of system power 845 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 846 * executed sequentially, so it is guaranteed that it will never run twice in 847 * parallel). 848 */ 849 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) 850 { 851 struct gpd_link *link; 852 853 if (genpd->status != GPD_STATE_POWER_OFF) 854 return; 855 856 list_for_each_entry(link, &genpd->slave_links, slave_node) { 857 pm_genpd_sync_poweron(link->master); 858 genpd_sd_counter_inc(link->master); 859 } 860 861 if (genpd->power_on) 862 genpd->power_on(genpd); 863 864 genpd->status = GPD_STATE_ACTIVE; 865 } 866 867 /** 868 * resume_needed - Check whether to resume a device before system suspend. 869 * @dev: Device to check. 870 * @genpd: PM domain the device belongs to. 871 * 872 * There are two cases in which a device that can wake up the system from sleep 873 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 874 * to wake up the system and it has to remain active for this purpose while the 875 * system is in the sleep state and (2) if the device is not enabled to wake up 876 * the system from sleep states and it generally doesn't generate wakeup signals 877 * by itself (those signals are generated on its behalf by other parts of the 878 * system). In the latter case it may be necessary to reconfigure the device's 879 * wakeup settings during system suspend, because it may have been set up to 880 * signal remote wakeup from the system's working state as needed by runtime PM. 881 * Return 'true' in either of the above cases. 882 */ 883 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 884 { 885 bool active_wakeup; 886 887 if (!device_can_wakeup(dev)) 888 return false; 889 890 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 891 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 892 } 893 894 /** 895 * pm_genpd_prepare - Start power transition of a device in a PM domain. 896 * @dev: Device to start the transition of. 897 * 898 * Start a power transition of a device (during a system-wide power transition) 899 * under the assumption that its pm_domain field points to the domain member of 900 * an object of type struct generic_pm_domain representing a PM domain 901 * consisting of I/O devices. 902 */ 903 static int pm_genpd_prepare(struct device *dev) 904 { 905 struct generic_pm_domain *genpd; 906 int ret; 907 908 dev_dbg(dev, "%s()\n", __func__); 909 910 genpd = dev_to_genpd(dev); 911 if (IS_ERR(genpd)) 912 return -EINVAL; 913 914 /* 915 * If a wakeup request is pending for the device, it should be woken up 916 * at this point and a system wakeup event should be reported if it's 917 * set up to wake up the system from sleep states. 918 */ 919 pm_runtime_get_noresume(dev); 920 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 921 pm_wakeup_event(dev, 0); 922 923 if (pm_wakeup_pending()) { 924 pm_runtime_put_sync(dev); 925 return -EBUSY; 926 } 927 928 if (resume_needed(dev, genpd)) 929 pm_runtime_resume(dev); 930 931 genpd_acquire_lock(genpd); 932 933 if (genpd->prepared_count++ == 0) { 934 genpd->suspended_count = 0; 935 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 936 } 937 938 genpd_release_lock(genpd); 939 940 if (genpd->suspend_power_off) { 941 pm_runtime_put_noidle(dev); 942 return 0; 943 } 944 945 /* 946 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 947 * so pm_genpd_poweron() will return immediately, but if the device 948 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 949 * to make it operational. 950 */ 951 pm_runtime_resume(dev); 952 __pm_runtime_disable(dev, false); 953 954 ret = pm_generic_prepare(dev); 955 if (ret) { 956 mutex_lock(&genpd->lock); 957 958 if (--genpd->prepared_count == 0) 959 genpd->suspend_power_off = false; 960 961 mutex_unlock(&genpd->lock); 962 pm_runtime_enable(dev); 963 } 964 965 pm_runtime_put_sync(dev); 966 return ret; 967 } 968 969 /** 970 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 971 * @dev: Device to suspend. 972 * 973 * Suspend a device under the assumption that its pm_domain field points to the 974 * domain member of an object of type struct generic_pm_domain representing 975 * a PM domain consisting of I/O devices. 976 */ 977 static int pm_genpd_suspend(struct device *dev) 978 { 979 struct generic_pm_domain *genpd; 980 981 dev_dbg(dev, "%s()\n", __func__); 982 983 genpd = dev_to_genpd(dev); 984 if (IS_ERR(genpd)) 985 return -EINVAL; 986 987 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); 988 } 989 990 /** 991 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 992 * @dev: Device to suspend. 993 * 994 * Carry out a late suspend of a device under the assumption that its 995 * pm_domain field points to the domain member of an object of type 996 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 997 */ 998 static int pm_genpd_suspend_late(struct device *dev) 999 { 1000 struct generic_pm_domain *genpd; 1001 1002 dev_dbg(dev, "%s()\n", __func__); 1003 1004 genpd = dev_to_genpd(dev); 1005 if (IS_ERR(genpd)) 1006 return -EINVAL; 1007 1008 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); 1009 } 1010 1011 /** 1012 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1013 * @dev: Device to suspend. 1014 * 1015 * Stop the device and remove power from the domain if all devices in it have 1016 * been stopped. 1017 */ 1018 static int pm_genpd_suspend_noirq(struct device *dev) 1019 { 1020 struct generic_pm_domain *genpd; 1021 1022 dev_dbg(dev, "%s()\n", __func__); 1023 1024 genpd = dev_to_genpd(dev); 1025 if (IS_ERR(genpd)) 1026 return -EINVAL; 1027 1028 if (genpd->suspend_power_off 1029 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1030 return 0; 1031 1032 genpd_stop_dev(genpd, dev); 1033 1034 /* 1035 * Since all of the "noirq" callbacks are executed sequentially, it is 1036 * guaranteed that this function will never run twice in parallel for 1037 * the same PM domain, so it is not necessary to use locking here. 1038 */ 1039 genpd->suspended_count++; 1040 pm_genpd_sync_poweroff(genpd); 1041 1042 return 0; 1043 } 1044 1045 /** 1046 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1047 * @dev: Device to resume. 1048 * 1049 * Restore power to the device's PM domain, if necessary, and start the device. 1050 */ 1051 static int pm_genpd_resume_noirq(struct device *dev) 1052 { 1053 struct generic_pm_domain *genpd; 1054 1055 dev_dbg(dev, "%s()\n", __func__); 1056 1057 genpd = dev_to_genpd(dev); 1058 if (IS_ERR(genpd)) 1059 return -EINVAL; 1060 1061 if (genpd->suspend_power_off 1062 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1063 return 0; 1064 1065 /* 1066 * Since all of the "noirq" callbacks are executed sequentially, it is 1067 * guaranteed that this function will never run twice in parallel for 1068 * the same PM domain, so it is not necessary to use locking here. 1069 */ 1070 pm_genpd_sync_poweron(genpd); 1071 genpd->suspended_count--; 1072 1073 return genpd_start_dev(genpd, dev); 1074 } 1075 1076 /** 1077 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 1078 * @dev: Device to resume. 1079 * 1080 * Carry out an early resume of a device under the assumption that its 1081 * pm_domain field points to the domain member of an object of type 1082 * struct generic_pm_domain representing a power domain consisting of I/O 1083 * devices. 1084 */ 1085 static int pm_genpd_resume_early(struct device *dev) 1086 { 1087 struct generic_pm_domain *genpd; 1088 1089 dev_dbg(dev, "%s()\n", __func__); 1090 1091 genpd = dev_to_genpd(dev); 1092 if (IS_ERR(genpd)) 1093 return -EINVAL; 1094 1095 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); 1096 } 1097 1098 /** 1099 * pm_genpd_resume - Resume of device in an I/O PM domain. 1100 * @dev: Device to resume. 1101 * 1102 * Resume a device under the assumption that its pm_domain field points to the 1103 * domain member of an object of type struct generic_pm_domain representing 1104 * a power domain consisting of I/O devices. 1105 */ 1106 static int pm_genpd_resume(struct device *dev) 1107 { 1108 struct generic_pm_domain *genpd; 1109 1110 dev_dbg(dev, "%s()\n", __func__); 1111 1112 genpd = dev_to_genpd(dev); 1113 if (IS_ERR(genpd)) 1114 return -EINVAL; 1115 1116 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); 1117 } 1118 1119 /** 1120 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 1121 * @dev: Device to freeze. 1122 * 1123 * Freeze a device under the assumption that its pm_domain field points to the 1124 * domain member of an object of type struct generic_pm_domain representing 1125 * a power domain consisting of I/O devices. 1126 */ 1127 static int pm_genpd_freeze(struct device *dev) 1128 { 1129 struct generic_pm_domain *genpd; 1130 1131 dev_dbg(dev, "%s()\n", __func__); 1132 1133 genpd = dev_to_genpd(dev); 1134 if (IS_ERR(genpd)) 1135 return -EINVAL; 1136 1137 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); 1138 } 1139 1140 /** 1141 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 1142 * @dev: Device to freeze. 1143 * 1144 * Carry out a late freeze of a device under the assumption that its 1145 * pm_domain field points to the domain member of an object of type 1146 * struct generic_pm_domain representing a power domain consisting of I/O 1147 * devices. 1148 */ 1149 static int pm_genpd_freeze_late(struct device *dev) 1150 { 1151 struct generic_pm_domain *genpd; 1152 1153 dev_dbg(dev, "%s()\n", __func__); 1154 1155 genpd = dev_to_genpd(dev); 1156 if (IS_ERR(genpd)) 1157 return -EINVAL; 1158 1159 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); 1160 } 1161 1162 /** 1163 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1164 * @dev: Device to freeze. 1165 * 1166 * Carry out a late freeze of a device under the assumption that its 1167 * pm_domain field points to the domain member of an object of type 1168 * struct generic_pm_domain representing a power domain consisting of I/O 1169 * devices. 1170 */ 1171 static int pm_genpd_freeze_noirq(struct device *dev) 1172 { 1173 struct generic_pm_domain *genpd; 1174 1175 dev_dbg(dev, "%s()\n", __func__); 1176 1177 genpd = dev_to_genpd(dev); 1178 if (IS_ERR(genpd)) 1179 return -EINVAL; 1180 1181 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 1182 } 1183 1184 /** 1185 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1186 * @dev: Device to thaw. 1187 * 1188 * Start the device, unless power has been removed from the domain already 1189 * before the system transition. 1190 */ 1191 static int pm_genpd_thaw_noirq(struct device *dev) 1192 { 1193 struct generic_pm_domain *genpd; 1194 1195 dev_dbg(dev, "%s()\n", __func__); 1196 1197 genpd = dev_to_genpd(dev); 1198 if (IS_ERR(genpd)) 1199 return -EINVAL; 1200 1201 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); 1202 } 1203 1204 /** 1205 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1206 * @dev: Device to thaw. 1207 * 1208 * Carry out an early thaw of a device under the assumption that its 1209 * pm_domain field points to the domain member of an object of type 1210 * struct generic_pm_domain representing a power domain consisting of I/O 1211 * devices. 1212 */ 1213 static int pm_genpd_thaw_early(struct device *dev) 1214 { 1215 struct generic_pm_domain *genpd; 1216 1217 dev_dbg(dev, "%s()\n", __func__); 1218 1219 genpd = dev_to_genpd(dev); 1220 if (IS_ERR(genpd)) 1221 return -EINVAL; 1222 1223 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); 1224 } 1225 1226 /** 1227 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1228 * @dev: Device to thaw. 1229 * 1230 * Thaw a device under the assumption that its pm_domain field points to the 1231 * domain member of an object of type struct generic_pm_domain representing 1232 * a power domain consisting of I/O devices. 1233 */ 1234 static int pm_genpd_thaw(struct device *dev) 1235 { 1236 struct generic_pm_domain *genpd; 1237 1238 dev_dbg(dev, "%s()\n", __func__); 1239 1240 genpd = dev_to_genpd(dev); 1241 if (IS_ERR(genpd)) 1242 return -EINVAL; 1243 1244 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); 1245 } 1246 1247 /** 1248 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1249 * @dev: Device to resume. 1250 * 1251 * Make sure the domain will be in the same power state as before the 1252 * hibernation the system is resuming from and start the device if necessary. 1253 */ 1254 static int pm_genpd_restore_noirq(struct device *dev) 1255 { 1256 struct generic_pm_domain *genpd; 1257 1258 dev_dbg(dev, "%s()\n", __func__); 1259 1260 genpd = dev_to_genpd(dev); 1261 if (IS_ERR(genpd)) 1262 return -EINVAL; 1263 1264 /* 1265 * Since all of the "noirq" callbacks are executed sequentially, it is 1266 * guaranteed that this function will never run twice in parallel for 1267 * the same PM domain, so it is not necessary to use locking here. 1268 * 1269 * At this point suspended_count == 0 means we are being run for the 1270 * first time for the given domain in the present cycle. 1271 */ 1272 if (genpd->suspended_count++ == 0) { 1273 /* 1274 * The boot kernel might put the domain into arbitrary state, 1275 * so make it appear as powered off to pm_genpd_sync_poweron(), 1276 * so that it tries to power it on in case it was really off. 1277 */ 1278 genpd->status = GPD_STATE_POWER_OFF; 1279 if (genpd->suspend_power_off) { 1280 /* 1281 * If the domain was off before the hibernation, make 1282 * sure it will be off going forward. 1283 */ 1284 if (genpd->power_off) 1285 genpd->power_off(genpd); 1286 1287 return 0; 1288 } 1289 } 1290 1291 if (genpd->suspend_power_off) 1292 return 0; 1293 1294 pm_genpd_sync_poweron(genpd); 1295 1296 return genpd_start_dev(genpd, dev); 1297 } 1298 1299 /** 1300 * pm_genpd_complete - Complete power transition of a device in a power domain. 1301 * @dev: Device to complete the transition of. 1302 * 1303 * Complete a power transition of a device (during a system-wide power 1304 * transition) under the assumption that its pm_domain field points to the 1305 * domain member of an object of type struct generic_pm_domain representing 1306 * a power domain consisting of I/O devices. 1307 */ 1308 static void pm_genpd_complete(struct device *dev) 1309 { 1310 struct generic_pm_domain *genpd; 1311 bool run_complete; 1312 1313 dev_dbg(dev, "%s()\n", __func__); 1314 1315 genpd = dev_to_genpd(dev); 1316 if (IS_ERR(genpd)) 1317 return; 1318 1319 mutex_lock(&genpd->lock); 1320 1321 run_complete = !genpd->suspend_power_off; 1322 if (--genpd->prepared_count == 0) 1323 genpd->suspend_power_off = false; 1324 1325 mutex_unlock(&genpd->lock); 1326 1327 if (run_complete) { 1328 pm_generic_complete(dev); 1329 pm_runtime_set_active(dev); 1330 pm_runtime_enable(dev); 1331 pm_runtime_idle(dev); 1332 } 1333 } 1334 1335 /** 1336 * pm_genpd_syscore_switch - Switch power during system core suspend or resume. 1337 * @dev: Device that normally is marked as "always on" to switch power for. 1338 * 1339 * This routine may only be called during the system core (syscore) suspend or 1340 * resume phase for devices whose "always on" flags are set. 1341 */ 1342 void pm_genpd_syscore_switch(struct device *dev, bool suspend) 1343 { 1344 struct generic_pm_domain *genpd; 1345 1346 genpd = dev_to_genpd(dev); 1347 if (!pm_genpd_present(genpd)) 1348 return; 1349 1350 if (suspend) { 1351 genpd->suspended_count++; 1352 pm_genpd_sync_poweroff(genpd); 1353 } else { 1354 pm_genpd_sync_poweron(genpd); 1355 genpd->suspended_count--; 1356 } 1357 } 1358 EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); 1359 1360 #else 1361 1362 #define pm_genpd_prepare NULL 1363 #define pm_genpd_suspend NULL 1364 #define pm_genpd_suspend_late NULL 1365 #define pm_genpd_suspend_noirq NULL 1366 #define pm_genpd_resume_early NULL 1367 #define pm_genpd_resume_noirq NULL 1368 #define pm_genpd_resume NULL 1369 #define pm_genpd_freeze NULL 1370 #define pm_genpd_freeze_late NULL 1371 #define pm_genpd_freeze_noirq NULL 1372 #define pm_genpd_thaw_early NULL 1373 #define pm_genpd_thaw_noirq NULL 1374 #define pm_genpd_thaw NULL 1375 #define pm_genpd_restore_noirq NULL 1376 #define pm_genpd_complete NULL 1377 1378 #endif /* CONFIG_PM_SLEEP */ 1379 1380 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) 1381 { 1382 struct generic_pm_domain_data *gpd_data; 1383 1384 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1385 if (!gpd_data) 1386 return NULL; 1387 1388 mutex_init(&gpd_data->lock); 1389 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1390 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1391 return gpd_data; 1392 } 1393 1394 static void __pm_genpd_free_dev_data(struct device *dev, 1395 struct generic_pm_domain_data *gpd_data) 1396 { 1397 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1398 kfree(gpd_data); 1399 } 1400 1401 /** 1402 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1403 * @genpd: PM domain to add the device to. 1404 * @dev: Device to be added. 1405 * @td: Set of PM QoS timing parameters to attach to the device. 1406 */ 1407 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1408 struct gpd_timing_data *td) 1409 { 1410 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1411 struct pm_domain_data *pdd; 1412 int ret = 0; 1413 1414 dev_dbg(dev, "%s()\n", __func__); 1415 1416 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1417 return -EINVAL; 1418 1419 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1420 if (!gpd_data_new) 1421 return -ENOMEM; 1422 1423 genpd_acquire_lock(genpd); 1424 1425 if (genpd->prepared_count > 0) { 1426 ret = -EAGAIN; 1427 goto out; 1428 } 1429 1430 list_for_each_entry(pdd, &genpd->dev_list, list_node) 1431 if (pdd->dev == dev) { 1432 ret = -EINVAL; 1433 goto out; 1434 } 1435 1436 ret = dev_pm_get_subsys_data(dev); 1437 if (ret) 1438 goto out; 1439 1440 genpd->device_count++; 1441 genpd->max_off_time_changed = true; 1442 1443 spin_lock_irq(&dev->power.lock); 1444 1445 dev->pm_domain = &genpd->domain; 1446 if (dev->power.subsys_data->domain_data) { 1447 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1448 } else { 1449 gpd_data = gpd_data_new; 1450 dev->power.subsys_data->domain_data = &gpd_data->base; 1451 } 1452 gpd_data->refcount++; 1453 if (td) 1454 gpd_data->td = *td; 1455 1456 spin_unlock_irq(&dev->power.lock); 1457 1458 mutex_lock(&gpd_data->lock); 1459 gpd_data->base.dev = dev; 1460 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1461 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1462 gpd_data->td.constraint_changed = true; 1463 gpd_data->td.effective_constraint_ns = -1; 1464 mutex_unlock(&gpd_data->lock); 1465 1466 out: 1467 genpd_release_lock(genpd); 1468 1469 if (gpd_data != gpd_data_new) 1470 __pm_genpd_free_dev_data(dev, gpd_data_new); 1471 1472 return ret; 1473 } 1474 1475 /** 1476 * __pm_genpd_of_add_device - Add a device to an I/O PM domain. 1477 * @genpd_node: Device tree node pointer representing a PM domain to which the 1478 * the device is added to. 1479 * @dev: Device to be added. 1480 * @td: Set of PM QoS timing parameters to attach to the device. 1481 */ 1482 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, 1483 struct gpd_timing_data *td) 1484 { 1485 struct generic_pm_domain *genpd = NULL, *gpd; 1486 1487 dev_dbg(dev, "%s()\n", __func__); 1488 1489 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) 1490 return -EINVAL; 1491 1492 mutex_lock(&gpd_list_lock); 1493 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1494 if (gpd->of_node == genpd_node) { 1495 genpd = gpd; 1496 break; 1497 } 1498 } 1499 mutex_unlock(&gpd_list_lock); 1500 1501 if (!genpd) 1502 return -EINVAL; 1503 1504 return __pm_genpd_add_device(genpd, dev, td); 1505 } 1506 1507 1508 /** 1509 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1510 * @domain_name: Name of the PM domain to add the device to. 1511 * @dev: Device to be added. 1512 * @td: Set of PM QoS timing parameters to attach to the device. 1513 */ 1514 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, 1515 struct gpd_timing_data *td) 1516 { 1517 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); 1518 } 1519 1520 /** 1521 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1522 * @genpd: PM domain to remove the device from. 1523 * @dev: Device to be removed. 1524 */ 1525 int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1526 struct device *dev) 1527 { 1528 struct generic_pm_domain_data *gpd_data; 1529 struct pm_domain_data *pdd; 1530 bool remove = false; 1531 int ret = 0; 1532 1533 dev_dbg(dev, "%s()\n", __func__); 1534 1535 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev) 1536 || IS_ERR_OR_NULL(dev->pm_domain) 1537 || pd_to_genpd(dev->pm_domain) != genpd) 1538 return -EINVAL; 1539 1540 genpd_acquire_lock(genpd); 1541 1542 if (genpd->prepared_count > 0) { 1543 ret = -EAGAIN; 1544 goto out; 1545 } 1546 1547 genpd->device_count--; 1548 genpd->max_off_time_changed = true; 1549 1550 spin_lock_irq(&dev->power.lock); 1551 1552 dev->pm_domain = NULL; 1553 pdd = dev->power.subsys_data->domain_data; 1554 list_del_init(&pdd->list_node); 1555 gpd_data = to_gpd_data(pdd); 1556 if (--gpd_data->refcount == 0) { 1557 dev->power.subsys_data->domain_data = NULL; 1558 remove = true; 1559 } 1560 1561 spin_unlock_irq(&dev->power.lock); 1562 1563 mutex_lock(&gpd_data->lock); 1564 pdd->dev = NULL; 1565 mutex_unlock(&gpd_data->lock); 1566 1567 genpd_release_lock(genpd); 1568 1569 dev_pm_put_subsys_data(dev); 1570 if (remove) 1571 __pm_genpd_free_dev_data(dev, gpd_data); 1572 1573 return 0; 1574 1575 out: 1576 genpd_release_lock(genpd); 1577 1578 return ret; 1579 } 1580 1581 /** 1582 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. 1583 * @dev: Device to set/unset the flag for. 1584 * @val: The new value of the device's "need restore" flag. 1585 */ 1586 void pm_genpd_dev_need_restore(struct device *dev, bool val) 1587 { 1588 struct pm_subsys_data *psd; 1589 unsigned long flags; 1590 1591 spin_lock_irqsave(&dev->power.lock, flags); 1592 1593 psd = dev_to_psd(dev); 1594 if (psd && psd->domain_data) 1595 to_gpd_data(psd->domain_data)->need_restore = val; 1596 1597 spin_unlock_irqrestore(&dev->power.lock, flags); 1598 } 1599 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); 1600 1601 /** 1602 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1603 * @genpd: Master PM domain to add the subdomain to. 1604 * @subdomain: Subdomain to be added. 1605 */ 1606 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1607 struct generic_pm_domain *subdomain) 1608 { 1609 struct gpd_link *link; 1610 int ret = 0; 1611 1612 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1613 || genpd == subdomain) 1614 return -EINVAL; 1615 1616 start: 1617 genpd_acquire_lock(genpd); 1618 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1619 1620 if (subdomain->status != GPD_STATE_POWER_OFF 1621 && subdomain->status != GPD_STATE_ACTIVE) { 1622 mutex_unlock(&subdomain->lock); 1623 genpd_release_lock(genpd); 1624 goto start; 1625 } 1626 1627 if (genpd->status == GPD_STATE_POWER_OFF 1628 && subdomain->status != GPD_STATE_POWER_OFF) { 1629 ret = -EINVAL; 1630 goto out; 1631 } 1632 1633 list_for_each_entry(link, &genpd->master_links, master_node) { 1634 if (link->slave == subdomain && link->master == genpd) { 1635 ret = -EINVAL; 1636 goto out; 1637 } 1638 } 1639 1640 link = kzalloc(sizeof(*link), GFP_KERNEL); 1641 if (!link) { 1642 ret = -ENOMEM; 1643 goto out; 1644 } 1645 link->master = genpd; 1646 list_add_tail(&link->master_node, &genpd->master_links); 1647 link->slave = subdomain; 1648 list_add_tail(&link->slave_node, &subdomain->slave_links); 1649 if (subdomain->status != GPD_STATE_POWER_OFF) 1650 genpd_sd_counter_inc(genpd); 1651 1652 out: 1653 mutex_unlock(&subdomain->lock); 1654 genpd_release_lock(genpd); 1655 1656 return ret; 1657 } 1658 1659 /** 1660 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. 1661 * @master_name: Name of the master PM domain to add the subdomain to. 1662 * @subdomain_name: Name of the subdomain to be added. 1663 */ 1664 int pm_genpd_add_subdomain_names(const char *master_name, 1665 const char *subdomain_name) 1666 { 1667 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; 1668 1669 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) 1670 return -EINVAL; 1671 1672 mutex_lock(&gpd_list_lock); 1673 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1674 if (!master && !strcmp(gpd->name, master_name)) 1675 master = gpd; 1676 1677 if (!subdomain && !strcmp(gpd->name, subdomain_name)) 1678 subdomain = gpd; 1679 1680 if (master && subdomain) 1681 break; 1682 } 1683 mutex_unlock(&gpd_list_lock); 1684 1685 return pm_genpd_add_subdomain(master, subdomain); 1686 } 1687 1688 /** 1689 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1690 * @genpd: Master PM domain to remove the subdomain from. 1691 * @subdomain: Subdomain to be removed. 1692 */ 1693 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1694 struct generic_pm_domain *subdomain) 1695 { 1696 struct gpd_link *link; 1697 int ret = -EINVAL; 1698 1699 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1700 return -EINVAL; 1701 1702 start: 1703 genpd_acquire_lock(genpd); 1704 1705 list_for_each_entry(link, &genpd->master_links, master_node) { 1706 if (link->slave != subdomain) 1707 continue; 1708 1709 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1710 1711 if (subdomain->status != GPD_STATE_POWER_OFF 1712 && subdomain->status != GPD_STATE_ACTIVE) { 1713 mutex_unlock(&subdomain->lock); 1714 genpd_release_lock(genpd); 1715 goto start; 1716 } 1717 1718 list_del(&link->master_node); 1719 list_del(&link->slave_node); 1720 kfree(link); 1721 if (subdomain->status != GPD_STATE_POWER_OFF) 1722 genpd_sd_counter_dec(genpd); 1723 1724 mutex_unlock(&subdomain->lock); 1725 1726 ret = 0; 1727 break; 1728 } 1729 1730 genpd_release_lock(genpd); 1731 1732 return ret; 1733 } 1734 1735 /** 1736 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. 1737 * @dev: Device to add the callbacks to. 1738 * @ops: Set of callbacks to add. 1739 * @td: Timing data to add to the device along with the callbacks (optional). 1740 * 1741 * Every call to this routine should be balanced with a call to 1742 * __pm_genpd_remove_callbacks() and they must not be nested. 1743 */ 1744 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, 1745 struct gpd_timing_data *td) 1746 { 1747 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1748 int ret = 0; 1749 1750 if (!(dev && ops)) 1751 return -EINVAL; 1752 1753 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1754 if (!gpd_data_new) 1755 return -ENOMEM; 1756 1757 pm_runtime_disable(dev); 1758 device_pm_lock(); 1759 1760 ret = dev_pm_get_subsys_data(dev); 1761 if (ret) 1762 goto out; 1763 1764 spin_lock_irq(&dev->power.lock); 1765 1766 if (dev->power.subsys_data->domain_data) { 1767 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1768 } else { 1769 gpd_data = gpd_data_new; 1770 dev->power.subsys_data->domain_data = &gpd_data->base; 1771 } 1772 gpd_data->refcount++; 1773 gpd_data->ops = *ops; 1774 if (td) 1775 gpd_data->td = *td; 1776 1777 spin_unlock_irq(&dev->power.lock); 1778 1779 out: 1780 device_pm_unlock(); 1781 pm_runtime_enable(dev); 1782 1783 if (gpd_data != gpd_data_new) 1784 __pm_genpd_free_dev_data(dev, gpd_data_new); 1785 1786 return ret; 1787 } 1788 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); 1789 1790 /** 1791 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. 1792 * @dev: Device to remove the callbacks from. 1793 * @clear_td: If set, clear the device's timing data too. 1794 * 1795 * This routine can only be called after pm_genpd_add_callbacks(). 1796 */ 1797 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) 1798 { 1799 struct generic_pm_domain_data *gpd_data = NULL; 1800 bool remove = false; 1801 int ret = 0; 1802 1803 if (!(dev && dev->power.subsys_data)) 1804 return -EINVAL; 1805 1806 pm_runtime_disable(dev); 1807 device_pm_lock(); 1808 1809 spin_lock_irq(&dev->power.lock); 1810 1811 if (dev->power.subsys_data->domain_data) { 1812 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1813 gpd_data->ops = (struct gpd_dev_ops){ NULL }; 1814 if (clear_td) 1815 gpd_data->td = (struct gpd_timing_data){ 0 }; 1816 1817 if (--gpd_data->refcount == 0) { 1818 dev->power.subsys_data->domain_data = NULL; 1819 remove = true; 1820 } 1821 } else { 1822 ret = -EINVAL; 1823 } 1824 1825 spin_unlock_irq(&dev->power.lock); 1826 1827 device_pm_unlock(); 1828 pm_runtime_enable(dev); 1829 1830 if (ret) 1831 return ret; 1832 1833 dev_pm_put_subsys_data(dev); 1834 if (remove) 1835 __pm_genpd_free_dev_data(dev, gpd_data); 1836 1837 return 0; 1838 } 1839 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); 1840 1841 /** 1842 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1843 * @genpd: PM domain to be connected with cpuidle. 1844 * @state: cpuidle state this domain can disable/enable. 1845 * 1846 * Make a PM domain behave as though it contained a CPU core, that is, instead 1847 * of calling its power down routine it will enable the given cpuidle state so 1848 * that the cpuidle subsystem can power it down (if possible and desirable). 1849 */ 1850 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1851 { 1852 struct cpuidle_driver *cpuidle_drv; 1853 struct gpd_cpu_data *cpu_data; 1854 struct cpuidle_state *idle_state; 1855 int ret = 0; 1856 1857 if (IS_ERR_OR_NULL(genpd) || state < 0) 1858 return -EINVAL; 1859 1860 genpd_acquire_lock(genpd); 1861 1862 if (genpd->cpu_data) { 1863 ret = -EEXIST; 1864 goto out; 1865 } 1866 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); 1867 if (!cpu_data) { 1868 ret = -ENOMEM; 1869 goto out; 1870 } 1871 cpuidle_drv = cpuidle_driver_ref(); 1872 if (!cpuidle_drv) { 1873 ret = -ENODEV; 1874 goto err_drv; 1875 } 1876 if (cpuidle_drv->state_count <= state) { 1877 ret = -EINVAL; 1878 goto err; 1879 } 1880 idle_state = &cpuidle_drv->states[state]; 1881 if (!idle_state->disabled) { 1882 ret = -EAGAIN; 1883 goto err; 1884 } 1885 cpu_data->idle_state = idle_state; 1886 cpu_data->saved_exit_latency = idle_state->exit_latency; 1887 genpd->cpu_data = cpu_data; 1888 genpd_recalc_cpu_exit_latency(genpd); 1889 1890 out: 1891 genpd_release_lock(genpd); 1892 return ret; 1893 1894 err: 1895 cpuidle_driver_unref(); 1896 1897 err_drv: 1898 kfree(cpu_data); 1899 goto out; 1900 } 1901 1902 /** 1903 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. 1904 * @name: Name of the domain to connect to cpuidle. 1905 * @state: cpuidle state this domain can manipulate. 1906 */ 1907 int pm_genpd_name_attach_cpuidle(const char *name, int state) 1908 { 1909 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); 1910 } 1911 1912 /** 1913 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. 1914 * @genpd: PM domain to remove the cpuidle connection from. 1915 * 1916 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the 1917 * given PM domain. 1918 */ 1919 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1920 { 1921 struct gpd_cpu_data *cpu_data; 1922 struct cpuidle_state *idle_state; 1923 int ret = 0; 1924 1925 if (IS_ERR_OR_NULL(genpd)) 1926 return -EINVAL; 1927 1928 genpd_acquire_lock(genpd); 1929 1930 cpu_data = genpd->cpu_data; 1931 if (!cpu_data) { 1932 ret = -ENODEV; 1933 goto out; 1934 } 1935 idle_state = cpu_data->idle_state; 1936 if (!idle_state->disabled) { 1937 ret = -EAGAIN; 1938 goto out; 1939 } 1940 idle_state->exit_latency = cpu_data->saved_exit_latency; 1941 cpuidle_driver_unref(); 1942 genpd->cpu_data = NULL; 1943 kfree(cpu_data); 1944 1945 out: 1946 genpd_release_lock(genpd); 1947 return ret; 1948 } 1949 1950 /** 1951 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. 1952 * @name: Name of the domain to disconnect cpuidle from. 1953 */ 1954 int pm_genpd_name_detach_cpuidle(const char *name) 1955 { 1956 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); 1957 } 1958 1959 /* Default device callbacks for generic PM domains. */ 1960 1961 /** 1962 * pm_genpd_default_save_state - Default "save device state" for PM domians. 1963 * @dev: Device to handle. 1964 */ 1965 static int pm_genpd_default_save_state(struct device *dev) 1966 { 1967 int (*cb)(struct device *__dev); 1968 1969 cb = dev_gpd_data(dev)->ops.save_state; 1970 if (cb) 1971 return cb(dev); 1972 1973 if (dev->type && dev->type->pm) 1974 cb = dev->type->pm->runtime_suspend; 1975 else if (dev->class && dev->class->pm) 1976 cb = dev->class->pm->runtime_suspend; 1977 else if (dev->bus && dev->bus->pm) 1978 cb = dev->bus->pm->runtime_suspend; 1979 else 1980 cb = NULL; 1981 1982 if (!cb && dev->driver && dev->driver->pm) 1983 cb = dev->driver->pm->runtime_suspend; 1984 1985 return cb ? cb(dev) : 0; 1986 } 1987 1988 /** 1989 * pm_genpd_default_restore_state - Default PM domians "restore device state". 1990 * @dev: Device to handle. 1991 */ 1992 static int pm_genpd_default_restore_state(struct device *dev) 1993 { 1994 int (*cb)(struct device *__dev); 1995 1996 cb = dev_gpd_data(dev)->ops.restore_state; 1997 if (cb) 1998 return cb(dev); 1999 2000 if (dev->type && dev->type->pm) 2001 cb = dev->type->pm->runtime_resume; 2002 else if (dev->class && dev->class->pm) 2003 cb = dev->class->pm->runtime_resume; 2004 else if (dev->bus && dev->bus->pm) 2005 cb = dev->bus->pm->runtime_resume; 2006 else 2007 cb = NULL; 2008 2009 if (!cb && dev->driver && dev->driver->pm) 2010 cb = dev->driver->pm->runtime_resume; 2011 2012 return cb ? cb(dev) : 0; 2013 } 2014 2015 #ifdef CONFIG_PM_SLEEP 2016 2017 /** 2018 * pm_genpd_default_suspend - Default "device suspend" for PM domians. 2019 * @dev: Device to handle. 2020 */ 2021 static int pm_genpd_default_suspend(struct device *dev) 2022 { 2023 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; 2024 2025 return cb ? cb(dev) : pm_generic_suspend(dev); 2026 } 2027 2028 /** 2029 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. 2030 * @dev: Device to handle. 2031 */ 2032 static int pm_genpd_default_suspend_late(struct device *dev) 2033 { 2034 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; 2035 2036 return cb ? cb(dev) : pm_generic_suspend_late(dev); 2037 } 2038 2039 /** 2040 * pm_genpd_default_resume_early - Default "early device resume" for PM domians. 2041 * @dev: Device to handle. 2042 */ 2043 static int pm_genpd_default_resume_early(struct device *dev) 2044 { 2045 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; 2046 2047 return cb ? cb(dev) : pm_generic_resume_early(dev); 2048 } 2049 2050 /** 2051 * pm_genpd_default_resume - Default "device resume" for PM domians. 2052 * @dev: Device to handle. 2053 */ 2054 static int pm_genpd_default_resume(struct device *dev) 2055 { 2056 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; 2057 2058 return cb ? cb(dev) : pm_generic_resume(dev); 2059 } 2060 2061 /** 2062 * pm_genpd_default_freeze - Default "device freeze" for PM domians. 2063 * @dev: Device to handle. 2064 */ 2065 static int pm_genpd_default_freeze(struct device *dev) 2066 { 2067 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; 2068 2069 return cb ? cb(dev) : pm_generic_freeze(dev); 2070 } 2071 2072 /** 2073 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. 2074 * @dev: Device to handle. 2075 */ 2076 static int pm_genpd_default_freeze_late(struct device *dev) 2077 { 2078 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; 2079 2080 return cb ? cb(dev) : pm_generic_freeze_late(dev); 2081 } 2082 2083 /** 2084 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. 2085 * @dev: Device to handle. 2086 */ 2087 static int pm_genpd_default_thaw_early(struct device *dev) 2088 { 2089 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; 2090 2091 return cb ? cb(dev) : pm_generic_thaw_early(dev); 2092 } 2093 2094 /** 2095 * pm_genpd_default_thaw - Default "device thaw" for PM domians. 2096 * @dev: Device to handle. 2097 */ 2098 static int pm_genpd_default_thaw(struct device *dev) 2099 { 2100 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; 2101 2102 return cb ? cb(dev) : pm_generic_thaw(dev); 2103 } 2104 2105 #else /* !CONFIG_PM_SLEEP */ 2106 2107 #define pm_genpd_default_suspend NULL 2108 #define pm_genpd_default_suspend_late NULL 2109 #define pm_genpd_default_resume_early NULL 2110 #define pm_genpd_default_resume NULL 2111 #define pm_genpd_default_freeze NULL 2112 #define pm_genpd_default_freeze_late NULL 2113 #define pm_genpd_default_thaw_early NULL 2114 #define pm_genpd_default_thaw NULL 2115 2116 #endif /* !CONFIG_PM_SLEEP */ 2117 2118 /** 2119 * pm_genpd_init - Initialize a generic I/O PM domain object. 2120 * @genpd: PM domain object to initialize. 2121 * @gov: PM domain governor to associate with the domain (may be NULL). 2122 * @is_off: Initial value of the domain's power_is_off field. 2123 */ 2124 void pm_genpd_init(struct generic_pm_domain *genpd, 2125 struct dev_power_governor *gov, bool is_off) 2126 { 2127 if (IS_ERR_OR_NULL(genpd)) 2128 return; 2129 2130 INIT_LIST_HEAD(&genpd->master_links); 2131 INIT_LIST_HEAD(&genpd->slave_links); 2132 INIT_LIST_HEAD(&genpd->dev_list); 2133 mutex_init(&genpd->lock); 2134 genpd->gov = gov; 2135 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2136 genpd->in_progress = 0; 2137 atomic_set(&genpd->sd_count, 0); 2138 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 2139 init_waitqueue_head(&genpd->status_wait_queue); 2140 genpd->poweroff_task = NULL; 2141 genpd->resume_count = 0; 2142 genpd->device_count = 0; 2143 genpd->max_off_time_ns = -1; 2144 genpd->max_off_time_changed = true; 2145 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 2146 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 2147 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 2148 genpd->domain.ops.prepare = pm_genpd_prepare; 2149 genpd->domain.ops.suspend = pm_genpd_suspend; 2150 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 2151 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 2152 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 2153 genpd->domain.ops.resume_early = pm_genpd_resume_early; 2154 genpd->domain.ops.resume = pm_genpd_resume; 2155 genpd->domain.ops.freeze = pm_genpd_freeze; 2156 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 2157 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 2158 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 2159 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 2160 genpd->domain.ops.thaw = pm_genpd_thaw; 2161 genpd->domain.ops.poweroff = pm_genpd_suspend; 2162 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 2163 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 2164 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 2165 genpd->domain.ops.restore_early = pm_genpd_resume_early; 2166 genpd->domain.ops.restore = pm_genpd_resume; 2167 genpd->domain.ops.complete = pm_genpd_complete; 2168 genpd->dev_ops.save_state = pm_genpd_default_save_state; 2169 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 2170 genpd->dev_ops.suspend = pm_genpd_default_suspend; 2171 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; 2172 genpd->dev_ops.resume_early = pm_genpd_default_resume_early; 2173 genpd->dev_ops.resume = pm_genpd_default_resume; 2174 genpd->dev_ops.freeze = pm_genpd_default_freeze; 2175 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; 2176 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; 2177 genpd->dev_ops.thaw = pm_genpd_default_thaw; 2178 mutex_lock(&gpd_list_lock); 2179 list_add(&genpd->gpd_list_node, &gpd_list); 2180 mutex_unlock(&gpd_list_lock); 2181 } 2182