1 /* 2 * sleep.c - ACPI sleep support. 3 * 4 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> 5 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com> 6 * Copyright (c) 2000-2003 Patrick Mochel 7 * Copyright (c) 2003 Open Source Development Lab 8 * 9 * This file is released under the GPLv2. 10 * 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/irq.h> 15 #include <linux/dmi.h> 16 #include <linux/device.h> 17 #include <linux/interrupt.h> 18 #include <linux/suspend.h> 19 #include <linux/reboot.h> 20 #include <linux/acpi.h> 21 #include <linux/module.h> 22 #include <linux/syscore_ops.h> 23 #include <asm/io.h> 24 #include <trace/events/power.h> 25 26 #include "internal.h" 27 #include "sleep.h" 28 29 /* 30 * Some HW-full platforms do not have _S5, so they may need 31 * to leverage efi power off for a shutdown. 32 */ 33 bool acpi_no_s5; 34 static u8 sleep_states[ACPI_S_STATE_COUNT]; 35 36 static void acpi_sleep_tts_switch(u32 acpi_state) 37 { 38 acpi_status status; 39 40 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state); 41 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 42 /* 43 * OS can't evaluate the _TTS object correctly. Some warning 44 * message will be printed. But it won't break anything. 45 */ 46 printk(KERN_NOTICE "Failure in evaluating _TTS object\n"); 47 } 48 } 49 50 static int tts_notify_reboot(struct notifier_block *this, 51 unsigned long code, void *x) 52 { 53 acpi_sleep_tts_switch(ACPI_STATE_S5); 54 return NOTIFY_DONE; 55 } 56 57 static struct notifier_block tts_notifier = { 58 .notifier_call = tts_notify_reboot, 59 .next = NULL, 60 .priority = 0, 61 }; 62 63 static int acpi_sleep_prepare(u32 acpi_state) 64 { 65 #ifdef CONFIG_ACPI_SLEEP 66 /* do we have a wakeup address for S2 and S3? */ 67 if (acpi_state == ACPI_STATE_S3) { 68 if (!acpi_wakeup_address) 69 return -EFAULT; 70 acpi_set_waking_vector(acpi_wakeup_address); 71 72 } 73 ACPI_FLUSH_CPU_CACHE(); 74 #endif 75 printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", 76 acpi_state); 77 acpi_enable_wakeup_devices(acpi_state); 78 acpi_enter_sleep_state_prep(acpi_state); 79 return 0; 80 } 81 82 static bool acpi_sleep_state_supported(u8 sleep_state) 83 { 84 acpi_status status; 85 u8 type_a, type_b; 86 87 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); 88 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware 89 || (acpi_gbl_FADT.sleep_control.address 90 && acpi_gbl_FADT.sleep_status.address)); 91 } 92 93 #ifdef CONFIG_ACPI_SLEEP 94 static u32 acpi_target_sleep_state = ACPI_STATE_S0; 95 96 u32 acpi_target_system_state(void) 97 { 98 return acpi_target_sleep_state; 99 } 100 EXPORT_SYMBOL_GPL(acpi_target_system_state); 101 102 static bool pwr_btn_event_pending; 103 104 /* 105 * The ACPI specification wants us to save NVS memory regions during hibernation 106 * and to restore them during the subsequent resume. Windows does that also for 107 * suspend to RAM. However, it is known that this mechanism does not work on 108 * all machines, so we allow the user to disable it with the help of the 109 * 'acpi_sleep=nonvs' kernel command line option. 110 */ 111 static bool nvs_nosave; 112 113 void __init acpi_nvs_nosave(void) 114 { 115 nvs_nosave = true; 116 } 117 118 /* 119 * The ACPI specification wants us to save NVS memory regions during hibernation 120 * but says nothing about saving NVS during S3. Not all versions of Windows 121 * save NVS on S3 suspend either, and it is clear that not all systems need 122 * NVS to be saved at S3 time. To improve suspend/resume time, allow the 123 * user to disable saving NVS on S3 if their system does not require it, but 124 * continue to save/restore NVS for S4 as specified. 125 */ 126 static bool nvs_nosave_s3; 127 128 void __init acpi_nvs_nosave_s3(void) 129 { 130 nvs_nosave_s3 = true; 131 } 132 133 static int __init init_nvs_save_s3(const struct dmi_system_id *d) 134 { 135 nvs_nosave_s3 = false; 136 return 0; 137 } 138 139 /* 140 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 141 * user to request that behavior by using the 'acpi_old_suspend_ordering' 142 * kernel command line option that causes the following variable to be set. 143 */ 144 static bool old_suspend_ordering; 145 146 void __init acpi_old_suspend_ordering(void) 147 { 148 old_suspend_ordering = true; 149 } 150 151 static int __init init_old_suspend_ordering(const struct dmi_system_id *d) 152 { 153 acpi_old_suspend_ordering(); 154 return 0; 155 } 156 157 static int __init init_nvs_nosave(const struct dmi_system_id *d) 158 { 159 acpi_nvs_nosave(); 160 return 0; 161 } 162 163 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { 164 { 165 .callback = init_old_suspend_ordering, 166 .ident = "Abit KN9 (nForce4 variant)", 167 .matches = { 168 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), 169 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), 170 }, 171 }, 172 { 173 .callback = init_old_suspend_ordering, 174 .ident = "HP xw4600 Workstation", 175 .matches = { 176 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 177 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), 178 }, 179 }, 180 { 181 .callback = init_old_suspend_ordering, 182 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", 183 .matches = { 184 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), 185 DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), 186 }, 187 }, 188 { 189 .callback = init_old_suspend_ordering, 190 .ident = "Panasonic CF51-2L", 191 .matches = { 192 DMI_MATCH(DMI_BOARD_VENDOR, 193 "Matsushita Electric Industrial Co.,Ltd."), 194 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 195 }, 196 }, 197 { 198 .callback = init_nvs_nosave, 199 .ident = "Sony Vaio VGN-FW41E_H", 200 .matches = { 201 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 202 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), 203 }, 204 }, 205 { 206 .callback = init_nvs_nosave, 207 .ident = "Sony Vaio VGN-FW21E", 208 .matches = { 209 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 210 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), 211 }, 212 }, 213 { 214 .callback = init_nvs_nosave, 215 .ident = "Sony Vaio VGN-FW21M", 216 .matches = { 217 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 218 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"), 219 }, 220 }, 221 { 222 .callback = init_nvs_nosave, 223 .ident = "Sony Vaio VPCEB17FX", 224 .matches = { 225 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 226 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), 227 }, 228 }, 229 { 230 .callback = init_nvs_nosave, 231 .ident = "Sony Vaio VGN-SR11M", 232 .matches = { 233 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 234 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), 235 }, 236 }, 237 { 238 .callback = init_nvs_nosave, 239 .ident = "Everex StepNote Series", 240 .matches = { 241 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), 242 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), 243 }, 244 }, 245 { 246 .callback = init_nvs_nosave, 247 .ident = "Sony Vaio VPCEB1Z1E", 248 .matches = { 249 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 250 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), 251 }, 252 }, 253 { 254 .callback = init_nvs_nosave, 255 .ident = "Sony Vaio VGN-NW130D", 256 .matches = { 257 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 258 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), 259 }, 260 }, 261 { 262 .callback = init_nvs_nosave, 263 .ident = "Sony Vaio VPCCW29FX", 264 .matches = { 265 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 266 DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), 267 }, 268 }, 269 { 270 .callback = init_nvs_nosave, 271 .ident = "Averatec AV1020-ED2", 272 .matches = { 273 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), 274 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), 275 }, 276 }, 277 { 278 .callback = init_old_suspend_ordering, 279 .ident = "Asus A8N-SLI DELUXE", 280 .matches = { 281 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 282 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), 283 }, 284 }, 285 { 286 .callback = init_old_suspend_ordering, 287 .ident = "Asus A8N-SLI Premium", 288 .matches = { 289 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 290 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), 291 }, 292 }, 293 { 294 .callback = init_nvs_nosave, 295 .ident = "Sony Vaio VGN-SR26GN_P", 296 .matches = { 297 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 298 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), 299 }, 300 }, 301 { 302 .callback = init_nvs_nosave, 303 .ident = "Sony Vaio VPCEB1S1E", 304 .matches = { 305 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 306 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), 307 }, 308 }, 309 { 310 .callback = init_nvs_nosave, 311 .ident = "Sony Vaio VGN-FW520F", 312 .matches = { 313 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 314 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), 315 }, 316 }, 317 { 318 .callback = init_nvs_nosave, 319 .ident = "Asus K54C", 320 .matches = { 321 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 322 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), 323 }, 324 }, 325 { 326 .callback = init_nvs_nosave, 327 .ident = "Asus K54HR", 328 .matches = { 329 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 330 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), 331 }, 332 }, 333 /* 334 * https://bugzilla.kernel.org/show_bug.cgi?id=189431 335 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory 336 * saving during S3. 337 */ 338 { 339 .callback = init_nvs_save_s3, 340 .ident = "Lenovo G50-45", 341 .matches = { 342 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 343 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), 344 }, 345 }, 346 {}, 347 }; 348 349 static void __init acpi_sleep_dmi_check(void) 350 { 351 int year; 352 353 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012) 354 acpi_nvs_nosave_s3(); 355 356 dmi_check_system(acpisleep_dmi_table); 357 } 358 359 /** 360 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. 361 */ 362 static int acpi_pm_freeze(void) 363 { 364 acpi_disable_all_gpes(); 365 acpi_os_wait_events_complete(); 366 acpi_ec_block_transactions(); 367 return 0; 368 } 369 370 /** 371 * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. 372 */ 373 static int acpi_pm_pre_suspend(void) 374 { 375 acpi_pm_freeze(); 376 return suspend_nvs_save(); 377 } 378 379 /** 380 * __acpi_pm_prepare - Prepare the platform to enter the target state. 381 * 382 * If necessary, set the firmware waking vector and do arch-specific 383 * nastiness to get the wakeup code to the waking vector. 384 */ 385 static int __acpi_pm_prepare(void) 386 { 387 int error = acpi_sleep_prepare(acpi_target_sleep_state); 388 if (error) 389 acpi_target_sleep_state = ACPI_STATE_S0; 390 391 return error; 392 } 393 394 /** 395 * acpi_pm_prepare - Prepare the platform to enter the target sleep 396 * state and disable the GPEs. 397 */ 398 static int acpi_pm_prepare(void) 399 { 400 int error = __acpi_pm_prepare(); 401 if (!error) 402 error = acpi_pm_pre_suspend(); 403 404 return error; 405 } 406 407 static int find_powerf_dev(struct device *dev, void *data) 408 { 409 struct acpi_device *device = to_acpi_device(dev); 410 const char *hid = acpi_device_hid(device); 411 412 return !strcmp(hid, ACPI_BUTTON_HID_POWERF); 413 } 414 415 /** 416 * acpi_pm_finish - Instruct the platform to leave a sleep state. 417 * 418 * This is called after we wake back up (or if entering the sleep state 419 * failed). 420 */ 421 static void acpi_pm_finish(void) 422 { 423 struct device *pwr_btn_dev; 424 u32 acpi_state = acpi_target_sleep_state; 425 426 acpi_ec_unblock_transactions(); 427 suspend_nvs_free(); 428 429 if (acpi_state == ACPI_STATE_S0) 430 return; 431 432 printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", 433 acpi_state); 434 acpi_disable_wakeup_devices(acpi_state); 435 acpi_leave_sleep_state(acpi_state); 436 437 /* reset firmware waking vector */ 438 acpi_set_waking_vector(0); 439 440 acpi_target_sleep_state = ACPI_STATE_S0; 441 442 acpi_resume_power_resources(); 443 444 /* If we were woken with the fixed power button, provide a small 445 * hint to userspace in the form of a wakeup event on the fixed power 446 * button device (if it can be found). 447 * 448 * We delay the event generation til now, as the PM layer requires 449 * timekeeping to be running before we generate events. */ 450 if (!pwr_btn_event_pending) 451 return; 452 453 pwr_btn_event_pending = false; 454 pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, 455 find_powerf_dev); 456 if (pwr_btn_dev) { 457 pm_wakeup_event(pwr_btn_dev, 0); 458 put_device(pwr_btn_dev); 459 } 460 } 461 462 /** 463 * acpi_pm_start - Start system PM transition. 464 */ 465 static void acpi_pm_start(u32 acpi_state) 466 { 467 acpi_target_sleep_state = acpi_state; 468 acpi_sleep_tts_switch(acpi_target_sleep_state); 469 acpi_scan_lock_acquire(); 470 } 471 472 /** 473 * acpi_pm_end - Finish up system PM transition. 474 */ 475 static void acpi_pm_end(void) 476 { 477 acpi_turn_off_unused_power_resources(); 478 acpi_scan_lock_release(); 479 /* 480 * This is necessary in case acpi_pm_finish() is not called during a 481 * failing transition to a sleep state. 482 */ 483 acpi_target_sleep_state = ACPI_STATE_S0; 484 acpi_sleep_tts_switch(acpi_target_sleep_state); 485 } 486 #else /* !CONFIG_ACPI_SLEEP */ 487 #define acpi_target_sleep_state ACPI_STATE_S0 488 static inline void acpi_sleep_dmi_check(void) {} 489 #endif /* CONFIG_ACPI_SLEEP */ 490 491 #ifdef CONFIG_SUSPEND 492 static u32 acpi_suspend_states[] = { 493 [PM_SUSPEND_ON] = ACPI_STATE_S0, 494 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, 495 [PM_SUSPEND_MEM] = ACPI_STATE_S3, 496 [PM_SUSPEND_MAX] = ACPI_STATE_S5 497 }; 498 499 /** 500 * acpi_suspend_begin - Set the target system sleep state to the state 501 * associated with given @pm_state, if supported. 502 */ 503 static int acpi_suspend_begin(suspend_state_t pm_state) 504 { 505 u32 acpi_state = acpi_suspend_states[pm_state]; 506 int error; 507 508 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); 509 if (error) 510 return error; 511 512 if (!sleep_states[acpi_state]) { 513 pr_err("ACPI does not support sleep state S%u\n", acpi_state); 514 return -ENOSYS; 515 } 516 if (acpi_state > ACPI_STATE_S1) 517 pm_set_suspend_via_firmware(); 518 519 acpi_pm_start(acpi_state); 520 return 0; 521 } 522 523 /** 524 * acpi_suspend_enter - Actually enter a sleep state. 525 * @pm_state: ignored 526 * 527 * Flush caches and go to sleep. For STR we have to call arch-specific 528 * assembly, which in turn call acpi_enter_sleep_state(). 529 * It's unfortunate, but it works. Please fix if you're feeling frisky. 530 */ 531 static int acpi_suspend_enter(suspend_state_t pm_state) 532 { 533 acpi_status status = AE_OK; 534 u32 acpi_state = acpi_target_sleep_state; 535 int error; 536 537 ACPI_FLUSH_CPU_CACHE(); 538 539 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true); 540 switch (acpi_state) { 541 case ACPI_STATE_S1: 542 barrier(); 543 status = acpi_enter_sleep_state(acpi_state); 544 break; 545 546 case ACPI_STATE_S3: 547 if (!acpi_suspend_lowlevel) 548 return -ENOSYS; 549 error = acpi_suspend_lowlevel(); 550 if (error) 551 return error; 552 pr_info(PREFIX "Low-level resume complete\n"); 553 pm_set_resume_via_firmware(); 554 break; 555 } 556 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false); 557 558 /* This violates the spec but is required for bug compatibility. */ 559 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 560 561 /* Reprogram control registers */ 562 acpi_leave_sleep_state_prep(acpi_state); 563 564 /* ACPI 3.0 specs (P62) says that it's the responsibility 565 * of the OSPM to clear the status bit [ implying that the 566 * POWER_BUTTON event should not reach userspace ] 567 * 568 * However, we do generate a small hint for userspace in the form of 569 * a wakeup event. We flag this condition for now and generate the 570 * event later, as we're currently too early in resume to be able to 571 * generate wakeup events. 572 */ 573 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { 574 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED; 575 576 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); 577 578 if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) { 579 acpi_clear_event(ACPI_EVENT_POWER_BUTTON); 580 /* Flag for later */ 581 pwr_btn_event_pending = true; 582 } 583 } 584 585 /* 586 * Disable and clear GPE status before interrupt is enabled. Some GPEs 587 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. 588 * acpi_leave_sleep_state will reenable specific GPEs later 589 */ 590 acpi_disable_all_gpes(); 591 /* Allow EC transactions to happen. */ 592 acpi_ec_unblock_transactions(); 593 594 suspend_nvs_restore(); 595 596 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 597 } 598 599 static int acpi_suspend_state_valid(suspend_state_t pm_state) 600 { 601 u32 acpi_state; 602 603 switch (pm_state) { 604 case PM_SUSPEND_ON: 605 case PM_SUSPEND_STANDBY: 606 case PM_SUSPEND_MEM: 607 acpi_state = acpi_suspend_states[pm_state]; 608 609 return sleep_states[acpi_state]; 610 default: 611 return 0; 612 } 613 } 614 615 static const struct platform_suspend_ops acpi_suspend_ops = { 616 .valid = acpi_suspend_state_valid, 617 .begin = acpi_suspend_begin, 618 .prepare_late = acpi_pm_prepare, 619 .enter = acpi_suspend_enter, 620 .wake = acpi_pm_finish, 621 .end = acpi_pm_end, 622 }; 623 624 /** 625 * acpi_suspend_begin_old - Set the target system sleep state to the 626 * state associated with given @pm_state, if supported, and 627 * execute the _PTS control method. This function is used if the 628 * pre-ACPI 2.0 suspend ordering has been requested. 629 */ 630 static int acpi_suspend_begin_old(suspend_state_t pm_state) 631 { 632 int error = acpi_suspend_begin(pm_state); 633 if (!error) 634 error = __acpi_pm_prepare(); 635 636 return error; 637 } 638 639 /* 640 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has 641 * been requested. 642 */ 643 static const struct platform_suspend_ops acpi_suspend_ops_old = { 644 .valid = acpi_suspend_state_valid, 645 .begin = acpi_suspend_begin_old, 646 .prepare_late = acpi_pm_pre_suspend, 647 .enter = acpi_suspend_enter, 648 .wake = acpi_pm_finish, 649 .end = acpi_pm_end, 650 .recover = acpi_pm_finish, 651 }; 652 653 static bool s2idle_in_progress; 654 static bool s2idle_wakeup; 655 656 /* 657 * On platforms supporting the Low Power S0 Idle interface there is an ACPI 658 * device object with the PNP0D80 compatible device ID (System Power Management 659 * Controller) and a specific _DSM method under it. That method, if present, 660 * can be used to indicate to the platform that the OS is transitioning into a 661 * low-power state in which certain types of activity are not desirable or that 662 * it is leaving such a state, which allows the platform to adjust its operation 663 * mode accordingly. 664 */ 665 static const struct acpi_device_id lps0_device_ids[] = { 666 {"PNP0D80", }, 667 {"", }, 668 }; 669 670 #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" 671 672 #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 673 #define ACPI_LPS0_SCREEN_OFF 3 674 #define ACPI_LPS0_SCREEN_ON 4 675 #define ACPI_LPS0_ENTRY 5 676 #define ACPI_LPS0_EXIT 6 677 678 #define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT)) 679 680 static acpi_handle lps0_device_handle; 681 static guid_t lps0_dsm_guid; 682 static char lps0_dsm_func_mask; 683 684 /* Device constraint entry structure */ 685 struct lpi_device_info { 686 char *name; 687 int enabled; 688 union acpi_object *package; 689 }; 690 691 /* Constraint package structure */ 692 struct lpi_device_constraint { 693 int uid; 694 int min_dstate; 695 int function_states; 696 }; 697 698 struct lpi_constraints { 699 acpi_handle handle; 700 int min_dstate; 701 }; 702 703 static struct lpi_constraints *lpi_constraints_table; 704 static int lpi_constraints_table_size; 705 706 static void lpi_device_get_constraints(void) 707 { 708 union acpi_object *out_obj; 709 int i; 710 711 out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, 712 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, 713 NULL, ACPI_TYPE_PACKAGE); 714 715 acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", 716 out_obj ? "successful" : "failed"); 717 718 if (!out_obj) 719 return; 720 721 lpi_constraints_table = kcalloc(out_obj->package.count, 722 sizeof(*lpi_constraints_table), 723 GFP_KERNEL); 724 if (!lpi_constraints_table) 725 goto free_acpi_buffer; 726 727 acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); 728 729 for (i = 0; i < out_obj->package.count; i++) { 730 struct lpi_constraints *constraint; 731 acpi_status status; 732 union acpi_object *package = &out_obj->package.elements[i]; 733 struct lpi_device_info info = { }; 734 int package_count = 0, j; 735 736 if (!package) 737 continue; 738 739 for (j = 0; j < package->package.count; ++j) { 740 union acpi_object *element = 741 &(package->package.elements[j]); 742 743 switch (element->type) { 744 case ACPI_TYPE_INTEGER: 745 info.enabled = element->integer.value; 746 break; 747 case ACPI_TYPE_STRING: 748 info.name = element->string.pointer; 749 break; 750 case ACPI_TYPE_PACKAGE: 751 package_count = element->package.count; 752 info.package = element->package.elements; 753 break; 754 } 755 } 756 757 if (!info.enabled || !info.package || !info.name) 758 continue; 759 760 constraint = &lpi_constraints_table[lpi_constraints_table_size]; 761 762 status = acpi_get_handle(NULL, info.name, &constraint->handle); 763 if (ACPI_FAILURE(status)) 764 continue; 765 766 acpi_handle_debug(lps0_device_handle, 767 "index:%d Name:%s\n", i, info.name); 768 769 constraint->min_dstate = -1; 770 771 for (j = 0; j < package_count; ++j) { 772 union acpi_object *info_obj = &info.package[j]; 773 union acpi_object *cnstr_pkg; 774 union acpi_object *obj; 775 struct lpi_device_constraint dev_info; 776 777 switch (info_obj->type) { 778 case ACPI_TYPE_INTEGER: 779 /* version */ 780 break; 781 case ACPI_TYPE_PACKAGE: 782 if (info_obj->package.count < 2) 783 break; 784 785 cnstr_pkg = info_obj->package.elements; 786 obj = &cnstr_pkg[0]; 787 dev_info.uid = obj->integer.value; 788 obj = &cnstr_pkg[1]; 789 dev_info.min_dstate = obj->integer.value; 790 791 acpi_handle_debug(lps0_device_handle, 792 "uid:%d min_dstate:%s\n", 793 dev_info.uid, 794 acpi_power_state_string(dev_info.min_dstate)); 795 796 constraint->min_dstate = dev_info.min_dstate; 797 break; 798 } 799 } 800 801 if (constraint->min_dstate < 0) { 802 acpi_handle_debug(lps0_device_handle, 803 "Incomplete constraint defined\n"); 804 continue; 805 } 806 807 lpi_constraints_table_size++; 808 } 809 810 acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); 811 812 free_acpi_buffer: 813 ACPI_FREE(out_obj); 814 } 815 816 static void lpi_check_constraints(void) 817 { 818 int i; 819 820 for (i = 0; i < lpi_constraints_table_size; ++i) { 821 struct acpi_device *adev; 822 823 if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev)) 824 continue; 825 826 acpi_handle_debug(adev->handle, 827 "LPI: required min power state:%s current power state:%s\n", 828 acpi_power_state_string(lpi_constraints_table[i].min_dstate), 829 acpi_power_state_string(adev->power.state)); 830 831 if (!adev->flags.power_manageable) { 832 acpi_handle_info(adev->handle, "LPI: Device not power manageble\n"); 833 continue; 834 } 835 836 if (adev->power.state < lpi_constraints_table[i].min_dstate) 837 acpi_handle_info(adev->handle, 838 "LPI: Constraint not met; min power state:%s current power state:%s\n", 839 acpi_power_state_string(lpi_constraints_table[i].min_dstate), 840 acpi_power_state_string(adev->power.state)); 841 } 842 } 843 844 static void acpi_sleep_run_lps0_dsm(unsigned int func) 845 { 846 union acpi_object *out_obj; 847 848 if (!(lps0_dsm_func_mask & (1 << func))) 849 return; 850 851 out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL); 852 ACPI_FREE(out_obj); 853 854 acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n", 855 func, out_obj ? "successful" : "failed"); 856 } 857 858 static int lps0_device_attach(struct acpi_device *adev, 859 const struct acpi_device_id *not_used) 860 { 861 union acpi_object *out_obj; 862 863 if (lps0_device_handle) 864 return 0; 865 866 if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) 867 return 0; 868 869 guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid); 870 /* Check if the _DSM is present and as expected. */ 871 out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL); 872 if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) { 873 char bitmask = *(char *)out_obj->buffer.pointer; 874 875 if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) { 876 lps0_dsm_func_mask = bitmask; 877 lps0_device_handle = adev->handle; 878 /* 879 * Use suspend-to-idle by default if the default 880 * suspend mode was not set from the command line. 881 */ 882 if (mem_sleep_default > PM_SUSPEND_MEM) 883 mem_sleep_current = PM_SUSPEND_TO_IDLE; 884 } 885 886 acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", 887 bitmask); 888 } else { 889 acpi_handle_debug(adev->handle, 890 "_DSM function 0 evaluation failed\n"); 891 } 892 ACPI_FREE(out_obj); 893 894 lpi_device_get_constraints(); 895 896 return 0; 897 } 898 899 static struct acpi_scan_handler lps0_handler = { 900 .ids = lps0_device_ids, 901 .attach = lps0_device_attach, 902 }; 903 904 static int acpi_s2idle_begin(void) 905 { 906 acpi_scan_lock_acquire(); 907 s2idle_in_progress = true; 908 return 0; 909 } 910 911 static int acpi_s2idle_prepare(void) 912 { 913 if (lps0_device_handle) { 914 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); 915 acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY); 916 } else { 917 /* 918 * The configuration of GPEs is changed here to avoid spurious 919 * wakeups, but that should not be necessary if this is a 920 * "low-power S0" platform and the low-power S0 _DSM is present. 921 */ 922 acpi_enable_all_wakeup_gpes(); 923 acpi_os_wait_events_complete(); 924 } 925 if (acpi_sci_irq_valid()) 926 enable_irq_wake(acpi_sci_irq); 927 928 return 0; 929 } 930 931 static void acpi_s2idle_wake(void) 932 { 933 934 if (pm_debug_messages_on) 935 lpi_check_constraints(); 936 937 /* 938 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means 939 * that the SCI has triggered while suspended, so cancel the wakeup in 940 * case it has not been a wakeup event (the GPEs will be checked later). 941 */ 942 if (acpi_sci_irq_valid() && 943 !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) { 944 pm_system_cancel_wakeup(); 945 s2idle_wakeup = true; 946 } 947 } 948 949 static void acpi_s2idle_sync(void) 950 { 951 /* 952 * Process all pending events in case there are any wakeup ones. 953 * 954 * The EC driver uses the system workqueue and an additional special 955 * one, so those need to be flushed too. 956 */ 957 acpi_ec_flush_work(); 958 acpi_os_wait_events_complete(); 959 s2idle_wakeup = false; 960 } 961 962 static void acpi_s2idle_restore(void) 963 { 964 if (acpi_sci_irq_valid()) 965 disable_irq_wake(acpi_sci_irq); 966 967 if (lps0_device_handle) { 968 acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT); 969 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON); 970 } else { 971 acpi_enable_all_runtime_gpes(); 972 } 973 } 974 975 static void acpi_s2idle_end(void) 976 { 977 s2idle_in_progress = false; 978 acpi_scan_lock_release(); 979 } 980 981 static const struct platform_s2idle_ops acpi_s2idle_ops = { 982 .begin = acpi_s2idle_begin, 983 .prepare = acpi_s2idle_prepare, 984 .wake = acpi_s2idle_wake, 985 .sync = acpi_s2idle_sync, 986 .restore = acpi_s2idle_restore, 987 .end = acpi_s2idle_end, 988 }; 989 990 static void acpi_sleep_suspend_setup(void) 991 { 992 int i; 993 994 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) 995 if (acpi_sleep_state_supported(i)) 996 sleep_states[i] = 1; 997 998 suspend_set_ops(old_suspend_ordering ? 999 &acpi_suspend_ops_old : &acpi_suspend_ops); 1000 1001 acpi_scan_add_handler(&lps0_handler); 1002 s2idle_set_ops(&acpi_s2idle_ops); 1003 } 1004 1005 #else /* !CONFIG_SUSPEND */ 1006 #define s2idle_in_progress (false) 1007 #define s2idle_wakeup (false) 1008 #define lps0_device_handle (NULL) 1009 static inline void acpi_sleep_suspend_setup(void) {} 1010 #endif /* !CONFIG_SUSPEND */ 1011 1012 bool acpi_s2idle_wakeup(void) 1013 { 1014 return s2idle_wakeup; 1015 } 1016 1017 bool acpi_sleep_no_ec_events(void) 1018 { 1019 return !s2idle_in_progress || !lps0_device_handle; 1020 } 1021 1022 #ifdef CONFIG_PM_SLEEP 1023 static u32 saved_bm_rld; 1024 1025 static int acpi_save_bm_rld(void) 1026 { 1027 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 1028 return 0; 1029 } 1030 1031 static void acpi_restore_bm_rld(void) 1032 { 1033 u32 resumed_bm_rld = 0; 1034 1035 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 1036 if (resumed_bm_rld == saved_bm_rld) 1037 return; 1038 1039 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 1040 } 1041 1042 static struct syscore_ops acpi_sleep_syscore_ops = { 1043 .suspend = acpi_save_bm_rld, 1044 .resume = acpi_restore_bm_rld, 1045 }; 1046 1047 static void acpi_sleep_syscore_init(void) 1048 { 1049 register_syscore_ops(&acpi_sleep_syscore_ops); 1050 } 1051 #else 1052 static inline void acpi_sleep_syscore_init(void) {} 1053 #endif /* CONFIG_PM_SLEEP */ 1054 1055 #ifdef CONFIG_HIBERNATION 1056 static unsigned long s4_hardware_signature; 1057 static struct acpi_table_facs *facs; 1058 static bool nosigcheck; 1059 1060 void __init acpi_no_s4_hw_signature(void) 1061 { 1062 nosigcheck = true; 1063 } 1064 1065 static int acpi_hibernation_begin(void) 1066 { 1067 int error; 1068 1069 error = nvs_nosave ? 0 : suspend_nvs_alloc(); 1070 if (!error) 1071 acpi_pm_start(ACPI_STATE_S4); 1072 1073 return error; 1074 } 1075 1076 static int acpi_hibernation_enter(void) 1077 { 1078 acpi_status status = AE_OK; 1079 1080 ACPI_FLUSH_CPU_CACHE(); 1081 1082 /* This shouldn't return. If it returns, we have a problem */ 1083 status = acpi_enter_sleep_state(ACPI_STATE_S4); 1084 /* Reprogram control registers */ 1085 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 1086 1087 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 1088 } 1089 1090 static void acpi_hibernation_leave(void) 1091 { 1092 pm_set_resume_via_firmware(); 1093 /* 1094 * If ACPI is not enabled by the BIOS and the boot kernel, we need to 1095 * enable it here. 1096 */ 1097 acpi_enable(); 1098 /* Reprogram control registers */ 1099 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 1100 /* Check the hardware signature */ 1101 if (facs && s4_hardware_signature != facs->hardware_signature) 1102 pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n"); 1103 /* Restore the NVS memory area */ 1104 suspend_nvs_restore(); 1105 /* Allow EC transactions to happen. */ 1106 acpi_ec_unblock_transactions(); 1107 } 1108 1109 static void acpi_pm_thaw(void) 1110 { 1111 acpi_ec_unblock_transactions(); 1112 acpi_enable_all_runtime_gpes(); 1113 } 1114 1115 static const struct platform_hibernation_ops acpi_hibernation_ops = { 1116 .begin = acpi_hibernation_begin, 1117 .end = acpi_pm_end, 1118 .pre_snapshot = acpi_pm_prepare, 1119 .finish = acpi_pm_finish, 1120 .prepare = acpi_pm_prepare, 1121 .enter = acpi_hibernation_enter, 1122 .leave = acpi_hibernation_leave, 1123 .pre_restore = acpi_pm_freeze, 1124 .restore_cleanup = acpi_pm_thaw, 1125 }; 1126 1127 /** 1128 * acpi_hibernation_begin_old - Set the target system sleep state to 1129 * ACPI_STATE_S4 and execute the _PTS control method. This 1130 * function is used if the pre-ACPI 2.0 suspend ordering has been 1131 * requested. 1132 */ 1133 static int acpi_hibernation_begin_old(void) 1134 { 1135 int error; 1136 /* 1137 * The _TTS object should always be evaluated before the _PTS object. 1138 * When the old_suspended_ordering is true, the _PTS object is 1139 * evaluated in the acpi_sleep_prepare. 1140 */ 1141 acpi_sleep_tts_switch(ACPI_STATE_S4); 1142 1143 error = acpi_sleep_prepare(ACPI_STATE_S4); 1144 1145 if (!error) { 1146 if (!nvs_nosave) 1147 error = suspend_nvs_alloc(); 1148 if (!error) { 1149 acpi_target_sleep_state = ACPI_STATE_S4; 1150 acpi_scan_lock_acquire(); 1151 } 1152 } 1153 return error; 1154 } 1155 1156 /* 1157 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has 1158 * been requested. 1159 */ 1160 static const struct platform_hibernation_ops acpi_hibernation_ops_old = { 1161 .begin = acpi_hibernation_begin_old, 1162 .end = acpi_pm_end, 1163 .pre_snapshot = acpi_pm_pre_suspend, 1164 .prepare = acpi_pm_freeze, 1165 .finish = acpi_pm_finish, 1166 .enter = acpi_hibernation_enter, 1167 .leave = acpi_hibernation_leave, 1168 .pre_restore = acpi_pm_freeze, 1169 .restore_cleanup = acpi_pm_thaw, 1170 .recover = acpi_pm_finish, 1171 }; 1172 1173 static void acpi_sleep_hibernate_setup(void) 1174 { 1175 if (!acpi_sleep_state_supported(ACPI_STATE_S4)) 1176 return; 1177 1178 hibernation_set_ops(old_suspend_ordering ? 1179 &acpi_hibernation_ops_old : &acpi_hibernation_ops); 1180 sleep_states[ACPI_STATE_S4] = 1; 1181 if (nosigcheck) 1182 return; 1183 1184 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); 1185 if (facs) 1186 s4_hardware_signature = facs->hardware_signature; 1187 } 1188 #else /* !CONFIG_HIBERNATION */ 1189 static inline void acpi_sleep_hibernate_setup(void) {} 1190 #endif /* !CONFIG_HIBERNATION */ 1191 1192 static void acpi_power_off_prepare(void) 1193 { 1194 /* Prepare to power off the system */ 1195 acpi_sleep_prepare(ACPI_STATE_S5); 1196 acpi_disable_all_gpes(); 1197 acpi_os_wait_events_complete(); 1198 } 1199 1200 static void acpi_power_off(void) 1201 { 1202 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 1203 printk(KERN_DEBUG "%s called\n", __func__); 1204 local_irq_disable(); 1205 acpi_enter_sleep_state(ACPI_STATE_S5); 1206 } 1207 1208 int __init acpi_sleep_init(void) 1209 { 1210 char supported[ACPI_S_STATE_COUNT * 3 + 1]; 1211 char *pos = supported; 1212 int i; 1213 1214 acpi_sleep_dmi_check(); 1215 1216 sleep_states[ACPI_STATE_S0] = 1; 1217 1218 acpi_sleep_syscore_init(); 1219 acpi_sleep_suspend_setup(); 1220 acpi_sleep_hibernate_setup(); 1221 1222 if (acpi_sleep_state_supported(ACPI_STATE_S5)) { 1223 sleep_states[ACPI_STATE_S5] = 1; 1224 pm_power_off_prepare = acpi_power_off_prepare; 1225 pm_power_off = acpi_power_off; 1226 } else { 1227 acpi_no_s5 = true; 1228 } 1229 1230 supported[0] = 0; 1231 for (i = 0; i < ACPI_S_STATE_COUNT; i++) { 1232 if (sleep_states[i]) 1233 pos += sprintf(pos, " S%d", i); 1234 } 1235 pr_info(PREFIX "(supports%s)\n", supported); 1236 1237 /* 1238 * Register the tts_notifier to reboot notifier list so that the _TTS 1239 * object can also be evaluated when the system enters S5. 1240 */ 1241 register_reboot_notifier(&tts_notifier); 1242 return 0; 1243 } 1244