1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/power/main.c - PM subsystem core functionality. 4 * 5 * Copyright (c) 2003 Patrick Mochel 6 * Copyright (c) 2003 Open Source Development Lab 7 */ 8 9 #include <linux/acpi.h> 10 #include <linux/export.h> 11 #include <linux/kobject.h> 12 #include <linux/string.h> 13 #include <linux/pm-trace.h> 14 #include <linux/workqueue.h> 15 #include <linux/debugfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/suspend.h> 18 #include <linux/syscalls.h> 19 #include <linux/pm_runtime.h> 20 21 #include "power.h" 22 23 #ifdef CONFIG_PM_SLEEP 24 /* 25 * The following functions are used by the suspend/hibernate code to temporarily 26 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 27 * while devices are suspended. To avoid races with the suspend/hibernate code, 28 * they should always be called with system_transition_mutex held 29 * (gfp_allowed_mask also should only be modified with system_transition_mutex 30 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 31 * with that modification). 32 */ 33 static gfp_t saved_gfp_mask; 34 35 void pm_restore_gfp_mask(void) 36 { 37 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 38 if (saved_gfp_mask) { 39 gfp_allowed_mask = saved_gfp_mask; 40 saved_gfp_mask = 0; 41 } 42 } 43 44 void pm_restrict_gfp_mask(void) 45 { 46 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 47 WARN_ON(saved_gfp_mask); 48 saved_gfp_mask = gfp_allowed_mask; 49 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 50 } 51 52 unsigned int lock_system_sleep(void) 53 { 54 unsigned int flags = current->flags; 55 current->flags |= PF_NOFREEZE; 56 mutex_lock(&system_transition_mutex); 57 return flags; 58 } 59 EXPORT_SYMBOL_GPL(lock_system_sleep); 60 61 void unlock_system_sleep(unsigned int flags) 62 { 63 /* 64 * Don't use freezer_count() because we don't want the call to 65 * try_to_freeze() here. 66 * 67 * Reason: 68 * Fundamentally, we just don't need it, because freezing condition 69 * doesn't come into effect until we release the 70 * system_transition_mutex lock, since the freezer always works with 71 * system_transition_mutex held. 72 * 73 * More importantly, in the case of hibernation, 74 * unlock_system_sleep() gets called in snapshot_read() and 75 * snapshot_write() when the freezing condition is still in effect. 76 * Which means, if we use try_to_freeze() here, it would make them 77 * enter the refrigerator, thus causing hibernation to lockup. 78 */ 79 if (!(flags & PF_NOFREEZE)) 80 current->flags &= ~PF_NOFREEZE; 81 mutex_unlock(&system_transition_mutex); 82 } 83 EXPORT_SYMBOL_GPL(unlock_system_sleep); 84 85 void ksys_sync_helper(void) 86 { 87 ktime_t start; 88 long elapsed_msecs; 89 90 start = ktime_get(); 91 ksys_sync(); 92 elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start)); 93 pr_info("Filesystems sync: %ld.%03ld seconds\n", 94 elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC); 95 } 96 EXPORT_SYMBOL_GPL(ksys_sync_helper); 97 98 /* Routines for PM-transition notifications */ 99 100 static BLOCKING_NOTIFIER_HEAD(pm_chain_head); 101 102 int register_pm_notifier(struct notifier_block *nb) 103 { 104 return blocking_notifier_chain_register(&pm_chain_head, nb); 105 } 106 EXPORT_SYMBOL_GPL(register_pm_notifier); 107 108 int unregister_pm_notifier(struct notifier_block *nb) 109 { 110 return blocking_notifier_chain_unregister(&pm_chain_head, nb); 111 } 112 EXPORT_SYMBOL_GPL(unregister_pm_notifier); 113 114 void pm_report_hw_sleep_time(u64 t) 115 { 116 suspend_stats.last_hw_sleep = t; 117 suspend_stats.total_hw_sleep += t; 118 } 119 EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time); 120 121 void pm_report_max_hw_sleep(u64 t) 122 { 123 suspend_stats.max_hw_sleep = t; 124 } 125 EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep); 126 127 int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down) 128 { 129 int ret; 130 131 ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL); 132 133 return notifier_to_errno(ret); 134 } 135 136 int pm_notifier_call_chain(unsigned long val) 137 { 138 return blocking_notifier_call_chain(&pm_chain_head, val, NULL); 139 } 140 141 /* If set, devices may be suspended and resumed asynchronously. */ 142 int pm_async_enabled = 1; 143 144 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, 145 char *buf) 146 { 147 return sprintf(buf, "%d\n", pm_async_enabled); 148 } 149 150 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, 151 const char *buf, size_t n) 152 { 153 unsigned long val; 154 155 if (kstrtoul(buf, 10, &val)) 156 return -EINVAL; 157 158 if (val > 1) 159 return -EINVAL; 160 161 pm_async_enabled = val; 162 return n; 163 } 164 165 power_attr(pm_async); 166 167 #ifdef CONFIG_SUSPEND 168 static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr, 169 char *buf) 170 { 171 char *s = buf; 172 suspend_state_t i; 173 174 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) { 175 if (i >= PM_SUSPEND_MEM && cxl_mem_active()) 176 continue; 177 if (mem_sleep_states[i]) { 178 const char *label = mem_sleep_states[i]; 179 180 if (mem_sleep_current == i) 181 s += sprintf(s, "[%s] ", label); 182 else 183 s += sprintf(s, "%s ", label); 184 } 185 } 186 187 /* Convert the last space to a newline if needed. */ 188 if (s != buf) 189 *(s-1) = '\n'; 190 191 return (s - buf); 192 } 193 194 static suspend_state_t decode_suspend_state(const char *buf, size_t n) 195 { 196 suspend_state_t state; 197 char *p; 198 int len; 199 200 p = memchr(buf, '\n', n); 201 len = p ? p - buf : n; 202 203 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 204 const char *label = mem_sleep_states[state]; 205 206 if (label && len == strlen(label) && !strncmp(buf, label, len)) 207 return state; 208 } 209 210 return PM_SUSPEND_ON; 211 } 212 213 static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr, 214 const char *buf, size_t n) 215 { 216 suspend_state_t state; 217 int error; 218 219 error = pm_autosleep_lock(); 220 if (error) 221 return error; 222 223 if (pm_autosleep_state() > PM_SUSPEND_ON) { 224 error = -EBUSY; 225 goto out; 226 } 227 228 state = decode_suspend_state(buf, n); 229 if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON) 230 mem_sleep_current = state; 231 else 232 error = -EINVAL; 233 234 out: 235 pm_autosleep_unlock(); 236 return error ? error : n; 237 } 238 239 power_attr(mem_sleep); 240 241 /* 242 * sync_on_suspend: invoke ksys_sync_helper() before suspend. 243 * 244 * show() returns whether ksys_sync_helper() is invoked before suspend. 245 * store() accepts 0 or 1. 0 disables ksys_sync_helper() and 1 enables it. 246 */ 247 bool sync_on_suspend_enabled = !IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC); 248 249 static ssize_t sync_on_suspend_show(struct kobject *kobj, 250 struct kobj_attribute *attr, char *buf) 251 { 252 return sprintf(buf, "%d\n", sync_on_suspend_enabled); 253 } 254 255 static ssize_t sync_on_suspend_store(struct kobject *kobj, 256 struct kobj_attribute *attr, 257 const char *buf, size_t n) 258 { 259 unsigned long val; 260 261 if (kstrtoul(buf, 10, &val)) 262 return -EINVAL; 263 264 if (val > 1) 265 return -EINVAL; 266 267 sync_on_suspend_enabled = !!val; 268 return n; 269 } 270 271 power_attr(sync_on_suspend); 272 #endif /* CONFIG_SUSPEND */ 273 274 #ifdef CONFIG_PM_SLEEP_DEBUG 275 int pm_test_level = TEST_NONE; 276 277 static const char * const pm_tests[__TEST_AFTER_LAST] = { 278 [TEST_NONE] = "none", 279 [TEST_CORE] = "core", 280 [TEST_CPUS] = "processors", 281 [TEST_PLATFORM] = "platform", 282 [TEST_DEVICES] = "devices", 283 [TEST_FREEZER] = "freezer", 284 }; 285 286 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, 287 char *buf) 288 { 289 char *s = buf; 290 int level; 291 292 for (level = TEST_FIRST; level <= TEST_MAX; level++) 293 if (pm_tests[level]) { 294 if (level == pm_test_level) 295 s += sprintf(s, "[%s] ", pm_tests[level]); 296 else 297 s += sprintf(s, "%s ", pm_tests[level]); 298 } 299 300 if (s != buf) 301 /* convert the last space to a newline */ 302 *(s-1) = '\n'; 303 304 return (s - buf); 305 } 306 307 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, 308 const char *buf, size_t n) 309 { 310 unsigned int sleep_flags; 311 const char * const *s; 312 int error = -EINVAL; 313 int level; 314 char *p; 315 int len; 316 317 p = memchr(buf, '\n', n); 318 len = p ? p - buf : n; 319 320 sleep_flags = lock_system_sleep(); 321 322 level = TEST_FIRST; 323 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 324 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { 325 pm_test_level = level; 326 error = 0; 327 break; 328 } 329 330 unlock_system_sleep(sleep_flags); 331 332 return error ? error : n; 333 } 334 335 power_attr(pm_test); 336 #endif /* CONFIG_PM_SLEEP_DEBUG */ 337 338 static char *suspend_step_name(enum suspend_stat_step step) 339 { 340 switch (step) { 341 case SUSPEND_FREEZE: 342 return "freeze"; 343 case SUSPEND_PREPARE: 344 return "prepare"; 345 case SUSPEND_SUSPEND: 346 return "suspend"; 347 case SUSPEND_SUSPEND_NOIRQ: 348 return "suspend_noirq"; 349 case SUSPEND_RESUME_NOIRQ: 350 return "resume_noirq"; 351 case SUSPEND_RESUME: 352 return "resume"; 353 default: 354 return ""; 355 } 356 } 357 358 #define suspend_attr(_name, format_str) \ 359 static ssize_t _name##_show(struct kobject *kobj, \ 360 struct kobj_attribute *attr, char *buf) \ 361 { \ 362 return sprintf(buf, format_str, suspend_stats._name); \ 363 } \ 364 static struct kobj_attribute _name = __ATTR_RO(_name) 365 366 suspend_attr(success, "%d\n"); 367 suspend_attr(fail, "%d\n"); 368 suspend_attr(failed_freeze, "%d\n"); 369 suspend_attr(failed_prepare, "%d\n"); 370 suspend_attr(failed_suspend, "%d\n"); 371 suspend_attr(failed_suspend_late, "%d\n"); 372 suspend_attr(failed_suspend_noirq, "%d\n"); 373 suspend_attr(failed_resume, "%d\n"); 374 suspend_attr(failed_resume_early, "%d\n"); 375 suspend_attr(failed_resume_noirq, "%d\n"); 376 suspend_attr(last_hw_sleep, "%llu\n"); 377 suspend_attr(total_hw_sleep, "%llu\n"); 378 suspend_attr(max_hw_sleep, "%llu\n"); 379 380 static ssize_t last_failed_dev_show(struct kobject *kobj, 381 struct kobj_attribute *attr, char *buf) 382 { 383 int index; 384 char *last_failed_dev = NULL; 385 386 index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 387 index %= REC_FAILED_NUM; 388 last_failed_dev = suspend_stats.failed_devs[index]; 389 390 return sprintf(buf, "%s\n", last_failed_dev); 391 } 392 static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev); 393 394 static ssize_t last_failed_errno_show(struct kobject *kobj, 395 struct kobj_attribute *attr, char *buf) 396 { 397 int index; 398 int last_failed_errno; 399 400 index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 401 index %= REC_FAILED_NUM; 402 last_failed_errno = suspend_stats.errno[index]; 403 404 return sprintf(buf, "%d\n", last_failed_errno); 405 } 406 static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno); 407 408 static ssize_t last_failed_step_show(struct kobject *kobj, 409 struct kobj_attribute *attr, char *buf) 410 { 411 int index; 412 enum suspend_stat_step step; 413 char *last_failed_step = NULL; 414 415 index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 416 index %= REC_FAILED_NUM; 417 step = suspend_stats.failed_steps[index]; 418 last_failed_step = suspend_step_name(step); 419 420 return sprintf(buf, "%s\n", last_failed_step); 421 } 422 static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step); 423 424 static struct attribute *suspend_attrs[] = { 425 &success.attr, 426 &fail.attr, 427 &failed_freeze.attr, 428 &failed_prepare.attr, 429 &failed_suspend.attr, 430 &failed_suspend_late.attr, 431 &failed_suspend_noirq.attr, 432 &failed_resume.attr, 433 &failed_resume_early.attr, 434 &failed_resume_noirq.attr, 435 &last_failed_dev.attr, 436 &last_failed_errno.attr, 437 &last_failed_step.attr, 438 &last_hw_sleep.attr, 439 &total_hw_sleep.attr, 440 &max_hw_sleep.attr, 441 NULL, 442 }; 443 444 static umode_t suspend_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 445 { 446 if (attr != &last_hw_sleep.attr && 447 attr != &total_hw_sleep.attr && 448 attr != &max_hw_sleep.attr) 449 return 0444; 450 451 #ifdef CONFIG_ACPI 452 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) 453 return 0444; 454 #endif 455 return 0; 456 } 457 458 static const struct attribute_group suspend_attr_group = { 459 .name = "suspend_stats", 460 .attrs = suspend_attrs, 461 .is_visible = suspend_attr_is_visible, 462 }; 463 464 #ifdef CONFIG_DEBUG_FS 465 static int suspend_stats_show(struct seq_file *s, void *unused) 466 { 467 int i, index, last_dev, last_errno, last_step; 468 469 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 470 last_dev %= REC_FAILED_NUM; 471 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 472 last_errno %= REC_FAILED_NUM; 473 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 474 last_step %= REC_FAILED_NUM; 475 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" 476 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", 477 "success", suspend_stats.success, 478 "fail", suspend_stats.fail, 479 "failed_freeze", suspend_stats.failed_freeze, 480 "failed_prepare", suspend_stats.failed_prepare, 481 "failed_suspend", suspend_stats.failed_suspend, 482 "failed_suspend_late", 483 suspend_stats.failed_suspend_late, 484 "failed_suspend_noirq", 485 suspend_stats.failed_suspend_noirq, 486 "failed_resume", suspend_stats.failed_resume, 487 "failed_resume_early", 488 suspend_stats.failed_resume_early, 489 "failed_resume_noirq", 490 suspend_stats.failed_resume_noirq); 491 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", 492 suspend_stats.failed_devs[last_dev]); 493 for (i = 1; i < REC_FAILED_NUM; i++) { 494 index = last_dev + REC_FAILED_NUM - i; 495 index %= REC_FAILED_NUM; 496 seq_printf(s, "\t\t\t%-s\n", 497 suspend_stats.failed_devs[index]); 498 } 499 seq_printf(s, " last_failed_errno:\t%-d\n", 500 suspend_stats.errno[last_errno]); 501 for (i = 1; i < REC_FAILED_NUM; i++) { 502 index = last_errno + REC_FAILED_NUM - i; 503 index %= REC_FAILED_NUM; 504 seq_printf(s, "\t\t\t%-d\n", 505 suspend_stats.errno[index]); 506 } 507 seq_printf(s, " last_failed_step:\t%-s\n", 508 suspend_step_name( 509 suspend_stats.failed_steps[last_step])); 510 for (i = 1; i < REC_FAILED_NUM; i++) { 511 index = last_step + REC_FAILED_NUM - i; 512 index %= REC_FAILED_NUM; 513 seq_printf(s, "\t\t\t%-s\n", 514 suspend_step_name( 515 suspend_stats.failed_steps[index])); 516 } 517 518 return 0; 519 } 520 DEFINE_SHOW_ATTRIBUTE(suspend_stats); 521 522 static int __init pm_debugfs_init(void) 523 { 524 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, 525 NULL, NULL, &suspend_stats_fops); 526 return 0; 527 } 528 529 late_initcall(pm_debugfs_init); 530 #endif /* CONFIG_DEBUG_FS */ 531 532 #endif /* CONFIG_PM_SLEEP */ 533 534 #ifdef CONFIG_PM_SLEEP_DEBUG 535 /* 536 * pm_print_times: print time taken by devices to suspend and resume. 537 * 538 * show() returns whether printing of suspend and resume times is enabled. 539 * store() accepts 0 or 1. 0 disables printing and 1 enables it. 540 */ 541 bool pm_print_times_enabled; 542 543 static ssize_t pm_print_times_show(struct kobject *kobj, 544 struct kobj_attribute *attr, char *buf) 545 { 546 return sprintf(buf, "%d\n", pm_print_times_enabled); 547 } 548 549 static ssize_t pm_print_times_store(struct kobject *kobj, 550 struct kobj_attribute *attr, 551 const char *buf, size_t n) 552 { 553 unsigned long val; 554 555 if (kstrtoul(buf, 10, &val)) 556 return -EINVAL; 557 558 if (val > 1) 559 return -EINVAL; 560 561 pm_print_times_enabled = !!val; 562 return n; 563 } 564 565 power_attr(pm_print_times); 566 567 static inline void pm_print_times_init(void) 568 { 569 pm_print_times_enabled = !!initcall_debug; 570 } 571 572 static ssize_t pm_wakeup_irq_show(struct kobject *kobj, 573 struct kobj_attribute *attr, 574 char *buf) 575 { 576 if (!pm_wakeup_irq()) 577 return -ENODATA; 578 579 return sprintf(buf, "%u\n", pm_wakeup_irq()); 580 } 581 582 power_attr_ro(pm_wakeup_irq); 583 584 bool pm_debug_messages_on __read_mostly; 585 586 static ssize_t pm_debug_messages_show(struct kobject *kobj, 587 struct kobj_attribute *attr, char *buf) 588 { 589 return sprintf(buf, "%d\n", pm_debug_messages_on); 590 } 591 592 static ssize_t pm_debug_messages_store(struct kobject *kobj, 593 struct kobj_attribute *attr, 594 const char *buf, size_t n) 595 { 596 unsigned long val; 597 598 if (kstrtoul(buf, 10, &val)) 599 return -EINVAL; 600 601 if (val > 1) 602 return -EINVAL; 603 604 pm_debug_messages_on = !!val; 605 return n; 606 } 607 608 power_attr(pm_debug_messages); 609 610 static int __init pm_debug_messages_setup(char *str) 611 { 612 pm_debug_messages_on = true; 613 return 1; 614 } 615 __setup("pm_debug_messages", pm_debug_messages_setup); 616 617 #else /* !CONFIG_PM_SLEEP_DEBUG */ 618 static inline void pm_print_times_init(void) {} 619 #endif /* CONFIG_PM_SLEEP_DEBUG */ 620 621 struct kobject *power_kobj; 622 623 /* 624 * state - control system sleep states. 625 * 626 * show() returns available sleep state labels, which may be "mem", "standby", 627 * "freeze" and "disk" (hibernation). 628 * See Documentation/admin-guide/pm/sleep-states.rst for a description of 629 * what they mean. 630 * 631 * store() accepts one of those strings, translates it into the proper 632 * enumerated value, and initiates a suspend transition. 633 */ 634 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 635 char *buf) 636 { 637 char *s = buf; 638 #ifdef CONFIG_SUSPEND 639 suspend_state_t i; 640 641 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) 642 if (pm_states[i]) 643 s += sprintf(s,"%s ", pm_states[i]); 644 645 #endif 646 if (hibernation_available()) 647 s += sprintf(s, "disk "); 648 if (s != buf) 649 /* convert the last space to a newline */ 650 *(s-1) = '\n'; 651 return (s - buf); 652 } 653 654 static suspend_state_t decode_state(const char *buf, size_t n) 655 { 656 #ifdef CONFIG_SUSPEND 657 suspend_state_t state; 658 #endif 659 char *p; 660 int len; 661 662 p = memchr(buf, '\n', n); 663 len = p ? p - buf : n; 664 665 /* Check hibernation first. */ 666 if (len == 4 && str_has_prefix(buf, "disk")) 667 return PM_SUSPEND_MAX; 668 669 #ifdef CONFIG_SUSPEND 670 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) { 671 const char *label = pm_states[state]; 672 673 if (label && len == strlen(label) && !strncmp(buf, label, len)) 674 return state; 675 } 676 #endif 677 678 return PM_SUSPEND_ON; 679 } 680 681 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 682 const char *buf, size_t n) 683 { 684 suspend_state_t state; 685 int error; 686 687 error = pm_autosleep_lock(); 688 if (error) 689 return error; 690 691 if (pm_autosleep_state() > PM_SUSPEND_ON) { 692 error = -EBUSY; 693 goto out; 694 } 695 696 state = decode_state(buf, n); 697 if (state < PM_SUSPEND_MAX) { 698 if (state == PM_SUSPEND_MEM) 699 state = mem_sleep_current; 700 701 error = pm_suspend(state); 702 } else if (state == PM_SUSPEND_MAX) { 703 error = hibernate(); 704 } else { 705 error = -EINVAL; 706 } 707 708 out: 709 pm_autosleep_unlock(); 710 return error ? error : n; 711 } 712 713 power_attr(state); 714 715 #ifdef CONFIG_PM_SLEEP 716 /* 717 * The 'wakeup_count' attribute, along with the functions defined in 718 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 719 * handled in a non-racy way. 720 * 721 * If a wakeup event occurs when the system is in a sleep state, it simply is 722 * woken up. In turn, if an event that would wake the system up from a sleep 723 * state occurs when it is undergoing a transition to that sleep state, the 724 * transition should be aborted. Moreover, if such an event occurs when the 725 * system is in the working state, an attempt to start a transition to the 726 * given sleep state should fail during certain period after the detection of 727 * the event. Using the 'state' attribute alone is not sufficient to satisfy 728 * these requirements, because a wakeup event may occur exactly when 'state' 729 * is being written to and may be delivered to user space right before it is 730 * frozen, so the event will remain only partially processed until the system is 731 * woken up by another event. In particular, it won't cause the transition to 732 * a sleep state to be aborted. 733 * 734 * This difficulty may be overcome if user space uses 'wakeup_count' before 735 * writing to 'state'. It first should read from 'wakeup_count' and store 736 * the read value. Then, after carrying out its own preparations for the system 737 * transition to a sleep state, it should write the stored value to 738 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since 739 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 740 * is allowed to write to 'state', but the transition will be aborted if there 741 * are any wakeup events detected after 'wakeup_count' was written to. 742 */ 743 744 static ssize_t wakeup_count_show(struct kobject *kobj, 745 struct kobj_attribute *attr, 746 char *buf) 747 { 748 unsigned int val; 749 750 return pm_get_wakeup_count(&val, true) ? 751 sprintf(buf, "%u\n", val) : -EINTR; 752 } 753 754 static ssize_t wakeup_count_store(struct kobject *kobj, 755 struct kobj_attribute *attr, 756 const char *buf, size_t n) 757 { 758 unsigned int val; 759 int error; 760 761 error = pm_autosleep_lock(); 762 if (error) 763 return error; 764 765 if (pm_autosleep_state() > PM_SUSPEND_ON) { 766 error = -EBUSY; 767 goto out; 768 } 769 770 error = -EINVAL; 771 if (sscanf(buf, "%u", &val) == 1) { 772 if (pm_save_wakeup_count(val)) 773 error = n; 774 else 775 pm_print_active_wakeup_sources(); 776 } 777 778 out: 779 pm_autosleep_unlock(); 780 return error; 781 } 782 783 power_attr(wakeup_count); 784 785 #ifdef CONFIG_PM_AUTOSLEEP 786 static ssize_t autosleep_show(struct kobject *kobj, 787 struct kobj_attribute *attr, 788 char *buf) 789 { 790 suspend_state_t state = pm_autosleep_state(); 791 792 if (state == PM_SUSPEND_ON) 793 return sprintf(buf, "off\n"); 794 795 #ifdef CONFIG_SUSPEND 796 if (state < PM_SUSPEND_MAX) 797 return sprintf(buf, "%s\n", pm_states[state] ? 798 pm_states[state] : "error"); 799 #endif 800 #ifdef CONFIG_HIBERNATION 801 return sprintf(buf, "disk\n"); 802 #else 803 return sprintf(buf, "error"); 804 #endif 805 } 806 807 static ssize_t autosleep_store(struct kobject *kobj, 808 struct kobj_attribute *attr, 809 const char *buf, size_t n) 810 { 811 suspend_state_t state = decode_state(buf, n); 812 int error; 813 814 if (state == PM_SUSPEND_ON 815 && strcmp(buf, "off") && strcmp(buf, "off\n")) 816 return -EINVAL; 817 818 if (state == PM_SUSPEND_MEM) 819 state = mem_sleep_current; 820 821 error = pm_autosleep_set_state(state); 822 return error ? error : n; 823 } 824 825 power_attr(autosleep); 826 #endif /* CONFIG_PM_AUTOSLEEP */ 827 828 #ifdef CONFIG_PM_WAKELOCKS 829 static ssize_t wake_lock_show(struct kobject *kobj, 830 struct kobj_attribute *attr, 831 char *buf) 832 { 833 return pm_show_wakelocks(buf, true); 834 } 835 836 static ssize_t wake_lock_store(struct kobject *kobj, 837 struct kobj_attribute *attr, 838 const char *buf, size_t n) 839 { 840 int error = pm_wake_lock(buf); 841 return error ? error : n; 842 } 843 844 power_attr(wake_lock); 845 846 static ssize_t wake_unlock_show(struct kobject *kobj, 847 struct kobj_attribute *attr, 848 char *buf) 849 { 850 return pm_show_wakelocks(buf, false); 851 } 852 853 static ssize_t wake_unlock_store(struct kobject *kobj, 854 struct kobj_attribute *attr, 855 const char *buf, size_t n) 856 { 857 int error = pm_wake_unlock(buf); 858 return error ? error : n; 859 } 860 861 power_attr(wake_unlock); 862 863 #endif /* CONFIG_PM_WAKELOCKS */ 864 #endif /* CONFIG_PM_SLEEP */ 865 866 #ifdef CONFIG_PM_TRACE 867 int pm_trace_enabled; 868 869 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr, 870 char *buf) 871 { 872 return sprintf(buf, "%d\n", pm_trace_enabled); 873 } 874 875 static ssize_t 876 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, 877 const char *buf, size_t n) 878 { 879 int val; 880 881 if (sscanf(buf, "%d", &val) == 1) { 882 pm_trace_enabled = !!val; 883 if (pm_trace_enabled) { 884 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n" 885 "PM: Correct system time has to be restored manually after resume.\n"); 886 } 887 return n; 888 } 889 return -EINVAL; 890 } 891 892 power_attr(pm_trace); 893 894 static ssize_t pm_trace_dev_match_show(struct kobject *kobj, 895 struct kobj_attribute *attr, 896 char *buf) 897 { 898 return show_trace_dev_match(buf, PAGE_SIZE); 899 } 900 901 power_attr_ro(pm_trace_dev_match); 902 903 #endif /* CONFIG_PM_TRACE */ 904 905 #ifdef CONFIG_FREEZER 906 static ssize_t pm_freeze_timeout_show(struct kobject *kobj, 907 struct kobj_attribute *attr, char *buf) 908 { 909 return sprintf(buf, "%u\n", freeze_timeout_msecs); 910 } 911 912 static ssize_t pm_freeze_timeout_store(struct kobject *kobj, 913 struct kobj_attribute *attr, 914 const char *buf, size_t n) 915 { 916 unsigned long val; 917 918 if (kstrtoul(buf, 10, &val)) 919 return -EINVAL; 920 921 freeze_timeout_msecs = val; 922 return n; 923 } 924 925 power_attr(pm_freeze_timeout); 926 927 #endif /* CONFIG_FREEZER*/ 928 929 static struct attribute * g[] = { 930 &state_attr.attr, 931 #ifdef CONFIG_PM_TRACE 932 &pm_trace_attr.attr, 933 &pm_trace_dev_match_attr.attr, 934 #endif 935 #ifdef CONFIG_PM_SLEEP 936 &pm_async_attr.attr, 937 &wakeup_count_attr.attr, 938 #ifdef CONFIG_SUSPEND 939 &mem_sleep_attr.attr, 940 &sync_on_suspend_attr.attr, 941 #endif 942 #ifdef CONFIG_PM_AUTOSLEEP 943 &autosleep_attr.attr, 944 #endif 945 #ifdef CONFIG_PM_WAKELOCKS 946 &wake_lock_attr.attr, 947 &wake_unlock_attr.attr, 948 #endif 949 #ifdef CONFIG_PM_SLEEP_DEBUG 950 &pm_test_attr.attr, 951 &pm_print_times_attr.attr, 952 &pm_wakeup_irq_attr.attr, 953 &pm_debug_messages_attr.attr, 954 #endif 955 #endif 956 #ifdef CONFIG_FREEZER 957 &pm_freeze_timeout_attr.attr, 958 #endif 959 NULL, 960 }; 961 962 static const struct attribute_group attr_group = { 963 .attrs = g, 964 }; 965 966 static const struct attribute_group *attr_groups[] = { 967 &attr_group, 968 #ifdef CONFIG_PM_SLEEP 969 &suspend_attr_group, 970 #endif 971 NULL, 972 }; 973 974 struct workqueue_struct *pm_wq; 975 EXPORT_SYMBOL_GPL(pm_wq); 976 977 static int __init pm_start_workqueue(void) 978 { 979 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 980 981 return pm_wq ? 0 : -ENOMEM; 982 } 983 984 static int __init pm_init(void) 985 { 986 int error = pm_start_workqueue(); 987 if (error) 988 return error; 989 hibernate_image_size_init(); 990 hibernate_reserved_size_init(); 991 pm_states_init(); 992 power_kobj = kobject_create_and_add("power", NULL); 993 if (!power_kobj) 994 return -ENOMEM; 995 error = sysfs_create_groups(power_kobj, attr_groups); 996 if (error) 997 return error; 998 pm_print_times_init(); 999 return pm_autosleep_init(); 1000 } 1001 1002 core_initcall(pm_init); 1003