xref: /openbmc/linux/drivers/acpi/sleep.c (revision 35e6bcd1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * sleep.c - ACPI sleep support.
4  *
5  * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
6  * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
7  * Copyright (c) 2000-2003 Patrick Mochel
8  * Copyright (c) 2003 Open Source Development Lab
9  */
10 
11 #define pr_fmt(fmt) "ACPI: PM: " fmt
12 
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/dmi.h>
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/suspend.h>
19 #include <linux/reboot.h>
20 #include <linux/acpi.h>
21 #include <linux/module.h>
22 #include <linux/syscore_ops.h>
23 #include <asm/io.h>
24 #include <trace/events/power.h>
25 
26 #include "internal.h"
27 #include "sleep.h"
28 
29 /*
30  * Some HW-full platforms do not have _S5, so they may need
31  * to leverage efi power off for a shutdown.
32  */
33 bool acpi_no_s5;
34 static u8 sleep_states[ACPI_S_STATE_COUNT];
35 
36 static void acpi_sleep_tts_switch(u32 acpi_state)
37 {
38 	acpi_status status;
39 
40 	status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
41 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
42 		/*
43 		 * OS can't evaluate the _TTS object correctly. Some warning
44 		 * message will be printed. But it won't break anything.
45 		 */
46 		pr_notice("Failure in evaluating _TTS object\n");
47 	}
48 }
49 
50 static int tts_notify_reboot(struct notifier_block *this,
51 			unsigned long code, void *x)
52 {
53 	acpi_sleep_tts_switch(ACPI_STATE_S5);
54 	return NOTIFY_DONE;
55 }
56 
57 static struct notifier_block tts_notifier = {
58 	.notifier_call	= tts_notify_reboot,
59 	.next		= NULL,
60 	.priority	= 0,
61 };
62 
63 #ifndef acpi_skip_set_wakeup_address
64 #define acpi_skip_set_wakeup_address() false
65 #endif
66 
67 static int acpi_sleep_prepare(u32 acpi_state)
68 {
69 #ifdef CONFIG_ACPI_SLEEP
70 	unsigned long acpi_wakeup_address;
71 
72 	/* do we have a wakeup address for S2 and S3? */
73 	if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) {
74 		acpi_wakeup_address = acpi_get_wakeup_address();
75 		if (!acpi_wakeup_address)
76 			return -EFAULT;
77 		acpi_set_waking_vector(acpi_wakeup_address);
78 
79 	}
80 #endif
81 	pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
82 	acpi_enable_wakeup_devices(acpi_state);
83 	acpi_enter_sleep_state_prep(acpi_state);
84 	return 0;
85 }
86 
87 bool acpi_sleep_state_supported(u8 sleep_state)
88 {
89 	acpi_status status;
90 	u8 type_a, type_b;
91 
92 	status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
93 	return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
94 		|| (acpi_gbl_FADT.sleep_control.address
95 			&& acpi_gbl_FADT.sleep_status.address));
96 }
97 
98 #ifdef CONFIG_ACPI_SLEEP
99 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
100 
101 u32 acpi_target_system_state(void)
102 {
103 	return acpi_target_sleep_state;
104 }
105 EXPORT_SYMBOL_GPL(acpi_target_system_state);
106 
107 static bool pwr_btn_event_pending;
108 
109 /*
110  * The ACPI specification wants us to save NVS memory regions during hibernation
111  * and to restore them during the subsequent resume.  Windows does that also for
112  * suspend to RAM.  However, it is known that this mechanism does not work on
113  * all machines, so we allow the user to disable it with the help of the
114  * 'acpi_sleep=nonvs' kernel command line option.
115  */
116 static bool nvs_nosave;
117 
118 void __init acpi_nvs_nosave(void)
119 {
120 	nvs_nosave = true;
121 }
122 
123 /*
124  * The ACPI specification wants us to save NVS memory regions during hibernation
125  * but says nothing about saving NVS during S3.  Not all versions of Windows
126  * save NVS on S3 suspend either, and it is clear that not all systems need
127  * NVS to be saved at S3 time.  To improve suspend/resume time, allow the
128  * user to disable saving NVS on S3 if their system does not require it, but
129  * continue to save/restore NVS for S4 as specified.
130  */
131 static bool nvs_nosave_s3;
132 
133 void __init acpi_nvs_nosave_s3(void)
134 {
135 	nvs_nosave_s3 = true;
136 }
137 
138 static int __init init_nvs_save_s3(const struct dmi_system_id *d)
139 {
140 	nvs_nosave_s3 = false;
141 	return 0;
142 }
143 
144 /*
145  * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
146  * user to request that behavior by using the 'acpi_old_suspend_ordering'
147  * kernel command line option that causes the following variable to be set.
148  */
149 static bool old_suspend_ordering;
150 
151 void __init acpi_old_suspend_ordering(void)
152 {
153 	old_suspend_ordering = true;
154 }
155 
156 static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
157 {
158 	acpi_old_suspend_ordering();
159 	return 0;
160 }
161 
162 static int __init init_nvs_nosave(const struct dmi_system_id *d)
163 {
164 	acpi_nvs_nosave();
165 	return 0;
166 }
167 
168 bool acpi_sleep_default_s3;
169 
170 static int __init init_default_s3(const struct dmi_system_id *d)
171 {
172 	acpi_sleep_default_s3 = true;
173 	return 0;
174 }
175 
176 static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
177 	{
178 	.callback = init_old_suspend_ordering,
179 	.ident = "Abit KN9 (nForce4 variant)",
180 	.matches = {
181 		DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
182 		DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
183 		},
184 	},
185 	{
186 	.callback = init_old_suspend_ordering,
187 	.ident = "HP xw4600 Workstation",
188 	.matches = {
189 		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
190 		DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
191 		},
192 	},
193 	{
194 	.callback = init_old_suspend_ordering,
195 	.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
196 	.matches = {
197 		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
198 		DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
199 		},
200 	},
201 	{
202 	.callback = init_old_suspend_ordering,
203 	.ident = "Panasonic CF51-2L",
204 	.matches = {
205 		DMI_MATCH(DMI_BOARD_VENDOR,
206 				"Matsushita Electric Industrial Co.,Ltd."),
207 		DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
208 		},
209 	},
210 	{
211 	.callback = init_nvs_nosave,
212 	.ident = "Sony Vaio VGN-FW41E_H",
213 	.matches = {
214 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
215 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
216 		},
217 	},
218 	{
219 	.callback = init_nvs_nosave,
220 	.ident = "Sony Vaio VGN-FW21E",
221 	.matches = {
222 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
223 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
224 		},
225 	},
226 	{
227 	.callback = init_nvs_nosave,
228 	.ident = "Sony Vaio VGN-FW21M",
229 	.matches = {
230 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
231 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
232 		},
233 	},
234 	{
235 	.callback = init_nvs_nosave,
236 	.ident = "Sony Vaio VPCEB17FX",
237 	.matches = {
238 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
239 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
240 		},
241 	},
242 	{
243 	.callback = init_nvs_nosave,
244 	.ident = "Sony Vaio VGN-SR11M",
245 	.matches = {
246 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
247 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
248 		},
249 	},
250 	{
251 	.callback = init_nvs_nosave,
252 	.ident = "Everex StepNote Series",
253 	.matches = {
254 		DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
255 		DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
256 		},
257 	},
258 	{
259 	.callback = init_nvs_nosave,
260 	.ident = "Sony Vaio VPCEB1Z1E",
261 	.matches = {
262 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
263 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
264 		},
265 	},
266 	{
267 	.callback = init_nvs_nosave,
268 	.ident = "Sony Vaio VGN-NW130D",
269 	.matches = {
270 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
271 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
272 		},
273 	},
274 	{
275 	.callback = init_nvs_nosave,
276 	.ident = "Sony Vaio VPCCW29FX",
277 	.matches = {
278 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
279 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
280 		},
281 	},
282 	{
283 	.callback = init_nvs_nosave,
284 	.ident = "Averatec AV1020-ED2",
285 	.matches = {
286 		DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
287 		DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
288 		},
289 	},
290 	{
291 	.callback = init_old_suspend_ordering,
292 	.ident = "Asus A8N-SLI DELUXE",
293 	.matches = {
294 		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
295 		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
296 		},
297 	},
298 	{
299 	.callback = init_old_suspend_ordering,
300 	.ident = "Asus A8N-SLI Premium",
301 	.matches = {
302 		DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
303 		DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
304 		},
305 	},
306 	{
307 	.callback = init_nvs_nosave,
308 	.ident = "Sony Vaio VGN-SR26GN_P",
309 	.matches = {
310 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
311 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
312 		},
313 	},
314 	{
315 	.callback = init_nvs_nosave,
316 	.ident = "Sony Vaio VPCEB1S1E",
317 	.matches = {
318 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
319 		DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
320 		},
321 	},
322 	{
323 	.callback = init_nvs_nosave,
324 	.ident = "Sony Vaio VGN-FW520F",
325 	.matches = {
326 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
327 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
328 		},
329 	},
330 	{
331 	.callback = init_nvs_nosave,
332 	.ident = "Asus K54C",
333 	.matches = {
334 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
335 		DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
336 		},
337 	},
338 	{
339 	.callback = init_nvs_nosave,
340 	.ident = "Asus K54HR",
341 	.matches = {
342 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
343 		DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
344 		},
345 	},
346 	{
347 	.callback = init_nvs_save_s3,
348 	.ident = "Asus 1025C",
349 	.matches = {
350 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
351 		DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
352 		},
353 	},
354 	/*
355 	 * https://bugzilla.kernel.org/show_bug.cgi?id=189431
356 	 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
357 	 * saving during S3.
358 	 */
359 	{
360 	.callback = init_nvs_save_s3,
361 	.ident = "Lenovo G50-45",
362 	.matches = {
363 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
364 		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
365 		},
366 	},
367 	{
368 	.callback = init_nvs_save_s3,
369 	.ident = "Lenovo G40-45",
370 	.matches = {
371 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
372 		DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
373 		},
374 	},
375 	/*
376 	 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
377 	 * the Low Power S0 Idle firmware interface (see
378 	 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
379 	 */
380 	{
381 	.callback = init_default_s3,
382 	.ident = "ThinkPad X1 Tablet(2016)",
383 	.matches = {
384 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
385 		DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
386 		},
387 	},
388 	/*
389 	 * ASUS B1400CEAE hangs on resume from suspend (see
390 	 * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
391 	 */
392 	{
393 	.callback = init_default_s3,
394 	.ident = "ASUS B1400CEAE",
395 	.matches = {
396 		DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
397 		DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
398 		},
399 	},
400 	{},
401 };
402 
403 static bool ignore_blacklist;
404 
405 void __init acpi_sleep_no_blacklist(void)
406 {
407 	ignore_blacklist = true;
408 }
409 
410 static void __init acpi_sleep_dmi_check(void)
411 {
412 	if (ignore_blacklist)
413 		return;
414 
415 	if (dmi_get_bios_year() >= 2012)
416 		acpi_nvs_nosave_s3();
417 
418 	dmi_check_system(acpisleep_dmi_table);
419 }
420 
421 /**
422  * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
423  */
424 static int acpi_pm_freeze(void)
425 {
426 	acpi_disable_all_gpes();
427 	acpi_os_wait_events_complete();
428 	acpi_ec_block_transactions();
429 	return 0;
430 }
431 
432 /**
433  * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
434  */
435 static int acpi_pm_pre_suspend(void)
436 {
437 	acpi_pm_freeze();
438 	return suspend_nvs_save();
439 }
440 
441 /**
442  *	__acpi_pm_prepare - Prepare the platform to enter the target state.
443  *
444  *	If necessary, set the firmware waking vector and do arch-specific
445  *	nastiness to get the wakeup code to the waking vector.
446  */
447 static int __acpi_pm_prepare(void)
448 {
449 	int error = acpi_sleep_prepare(acpi_target_sleep_state);
450 	if (error)
451 		acpi_target_sleep_state = ACPI_STATE_S0;
452 
453 	return error;
454 }
455 
456 /**
457  *	acpi_pm_prepare - Prepare the platform to enter the target sleep
458  *		state and disable the GPEs.
459  */
460 static int acpi_pm_prepare(void)
461 {
462 	int error = __acpi_pm_prepare();
463 	if (!error)
464 		error = acpi_pm_pre_suspend();
465 
466 	return error;
467 }
468 
469 /**
470  *	acpi_pm_finish - Instruct the platform to leave a sleep state.
471  *
472  *	This is called after we wake back up (or if entering the sleep state
473  *	failed).
474  */
475 static void acpi_pm_finish(void)
476 {
477 	struct acpi_device *pwr_btn_adev;
478 	u32 acpi_state = acpi_target_sleep_state;
479 
480 	acpi_ec_unblock_transactions();
481 	suspend_nvs_free();
482 
483 	if (acpi_state == ACPI_STATE_S0)
484 		return;
485 
486 	pr_info("Waking up from system sleep state S%d\n", acpi_state);
487 	acpi_disable_wakeup_devices(acpi_state);
488 	acpi_leave_sleep_state(acpi_state);
489 
490 	/* reset firmware waking vector */
491 	acpi_set_waking_vector(0);
492 
493 	acpi_target_sleep_state = ACPI_STATE_S0;
494 
495 	acpi_resume_power_resources();
496 
497 	/* If we were woken with the fixed power button, provide a small
498 	 * hint to userspace in the form of a wakeup event on the fixed power
499 	 * button device (if it can be found).
500 	 *
501 	 * We delay the event generation til now, as the PM layer requires
502 	 * timekeeping to be running before we generate events. */
503 	if (!pwr_btn_event_pending)
504 		return;
505 
506 	pwr_btn_event_pending = false;
507 	pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
508 						    NULL, -1);
509 	if (pwr_btn_adev) {
510 		pm_wakeup_event(&pwr_btn_adev->dev, 0);
511 		acpi_dev_put(pwr_btn_adev);
512 	}
513 }
514 
515 /**
516  * acpi_pm_start - Start system PM transition.
517  */
518 static void acpi_pm_start(u32 acpi_state)
519 {
520 	acpi_target_sleep_state = acpi_state;
521 	acpi_sleep_tts_switch(acpi_target_sleep_state);
522 	acpi_scan_lock_acquire();
523 }
524 
525 /**
526  * acpi_pm_end - Finish up system PM transition.
527  */
528 static void acpi_pm_end(void)
529 {
530 	acpi_turn_off_unused_power_resources();
531 	acpi_scan_lock_release();
532 	/*
533 	 * This is necessary in case acpi_pm_finish() is not called during a
534 	 * failing transition to a sleep state.
535 	 */
536 	acpi_target_sleep_state = ACPI_STATE_S0;
537 	acpi_sleep_tts_switch(acpi_target_sleep_state);
538 }
539 #else /* !CONFIG_ACPI_SLEEP */
540 #define sleep_no_lps0	(1)
541 #define acpi_target_sleep_state	ACPI_STATE_S0
542 #define acpi_sleep_default_s3	(1)
543 static inline void acpi_sleep_dmi_check(void) {}
544 #endif /* CONFIG_ACPI_SLEEP */
545 
546 #ifdef CONFIG_SUSPEND
547 static u32 acpi_suspend_states[] = {
548 	[PM_SUSPEND_ON] = ACPI_STATE_S0,
549 	[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
550 	[PM_SUSPEND_MEM] = ACPI_STATE_S3,
551 	[PM_SUSPEND_MAX] = ACPI_STATE_S5
552 };
553 
554 /**
555  *	acpi_suspend_begin - Set the target system sleep state to the state
556  *		associated with given @pm_state, if supported.
557  */
558 static int acpi_suspend_begin(suspend_state_t pm_state)
559 {
560 	u32 acpi_state = acpi_suspend_states[pm_state];
561 	int error;
562 
563 	error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
564 	if (error)
565 		return error;
566 
567 	if (!sleep_states[acpi_state]) {
568 		pr_err("ACPI does not support sleep state S%u\n", acpi_state);
569 		return -ENOSYS;
570 	}
571 	if (acpi_state > ACPI_STATE_S1)
572 		pm_set_suspend_via_firmware();
573 
574 	acpi_pm_start(acpi_state);
575 	return 0;
576 }
577 
578 /**
579  *	acpi_suspend_enter - Actually enter a sleep state.
580  *	@pm_state: ignored
581  *
582  *	Flush caches and go to sleep. For STR we have to call arch-specific
583  *	assembly, which in turn call acpi_enter_sleep_state().
584  *	It's unfortunate, but it works. Please fix if you're feeling frisky.
585  */
586 static int acpi_suspend_enter(suspend_state_t pm_state)
587 {
588 	acpi_status status = AE_OK;
589 	u32 acpi_state = acpi_target_sleep_state;
590 	int error;
591 
592 	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
593 	switch (acpi_state) {
594 	case ACPI_STATE_S1:
595 		barrier();
596 		status = acpi_enter_sleep_state(acpi_state);
597 		break;
598 
599 	case ACPI_STATE_S3:
600 		if (!acpi_suspend_lowlevel)
601 			return -ENOSYS;
602 		error = acpi_suspend_lowlevel();
603 		if (error)
604 			return error;
605 		pr_info("Low-level resume complete\n");
606 		pm_set_resume_via_firmware();
607 		break;
608 	}
609 	trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
610 
611 	/* This violates the spec but is required for bug compatibility. */
612 	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
613 
614 	/* Reprogram control registers */
615 	acpi_leave_sleep_state_prep(acpi_state);
616 
617 	/* ACPI 3.0 specs (P62) says that it's the responsibility
618 	 * of the OSPM to clear the status bit [ implying that the
619 	 * POWER_BUTTON event should not reach userspace ]
620 	 *
621 	 * However, we do generate a small hint for userspace in the form of
622 	 * a wakeup event. We flag this condition for now and generate the
623 	 * event later, as we're currently too early in resume to be able to
624 	 * generate wakeup events.
625 	 */
626 	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
627 		acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
628 
629 		acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
630 
631 		if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
632 			acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
633 			/* Flag for later */
634 			pwr_btn_event_pending = true;
635 		}
636 	}
637 
638 	/*
639 	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
640 	 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
641 	 * acpi_leave_sleep_state will reenable specific GPEs later
642 	 */
643 	acpi_disable_all_gpes();
644 	/* Allow EC transactions to happen. */
645 	acpi_ec_unblock_transactions();
646 
647 	suspend_nvs_restore();
648 
649 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
650 }
651 
652 static int acpi_suspend_state_valid(suspend_state_t pm_state)
653 {
654 	u32 acpi_state;
655 
656 	switch (pm_state) {
657 	case PM_SUSPEND_ON:
658 	case PM_SUSPEND_STANDBY:
659 	case PM_SUSPEND_MEM:
660 		acpi_state = acpi_suspend_states[pm_state];
661 
662 		return sleep_states[acpi_state];
663 	default:
664 		return 0;
665 	}
666 }
667 
668 static const struct platform_suspend_ops acpi_suspend_ops = {
669 	.valid = acpi_suspend_state_valid,
670 	.begin = acpi_suspend_begin,
671 	.prepare_late = acpi_pm_prepare,
672 	.enter = acpi_suspend_enter,
673 	.wake = acpi_pm_finish,
674 	.end = acpi_pm_end,
675 };
676 
677 /**
678  *	acpi_suspend_begin_old - Set the target system sleep state to the
679  *		state associated with given @pm_state, if supported, and
680  *		execute the _PTS control method.  This function is used if the
681  *		pre-ACPI 2.0 suspend ordering has been requested.
682  */
683 static int acpi_suspend_begin_old(suspend_state_t pm_state)
684 {
685 	int error = acpi_suspend_begin(pm_state);
686 	if (!error)
687 		error = __acpi_pm_prepare();
688 
689 	return error;
690 }
691 
692 /*
693  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
694  * been requested.
695  */
696 static const struct platform_suspend_ops acpi_suspend_ops_old = {
697 	.valid = acpi_suspend_state_valid,
698 	.begin = acpi_suspend_begin_old,
699 	.prepare_late = acpi_pm_pre_suspend,
700 	.enter = acpi_suspend_enter,
701 	.wake = acpi_pm_finish,
702 	.end = acpi_pm_end,
703 	.recover = acpi_pm_finish,
704 };
705 
706 static bool s2idle_wakeup;
707 
708 int acpi_s2idle_begin(void)
709 {
710 	acpi_scan_lock_acquire();
711 	return 0;
712 }
713 
714 int acpi_s2idle_prepare(void)
715 {
716 	if (acpi_sci_irq_valid()) {
717 		int error;
718 
719 		error = enable_irq_wake(acpi_sci_irq);
720 		if (error)
721 			pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n",
722 				acpi_sci_irq, error);
723 
724 		acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
725 	}
726 
727 	acpi_enable_wakeup_devices(ACPI_STATE_S0);
728 
729 	/* Change the configuration of GPEs to avoid spurious wakeup. */
730 	acpi_enable_all_wakeup_gpes();
731 	acpi_os_wait_events_complete();
732 
733 	s2idle_wakeup = true;
734 	return 0;
735 }
736 
737 bool acpi_s2idle_wake(void)
738 {
739 	if (!acpi_sci_irq_valid())
740 		return pm_wakeup_pending();
741 
742 	while (pm_wakeup_pending()) {
743 		/*
744 		 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
745 		 * SCI has not triggered while suspended, so bail out (the
746 		 * wakeup is pending anyway and the SCI is not the source of
747 		 * it).
748 		 */
749 		if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
750 			pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
751 			return true;
752 		}
753 
754 		/*
755 		 * If the status bit of any enabled fixed event is set, the
756 		 * wakeup is regarded as valid.
757 		 */
758 		if (acpi_any_fixed_event_status_set()) {
759 			pm_pr_dbg("ACPI fixed event wakeup\n");
760 			return true;
761 		}
762 
763 		/* Check wakeups from drivers sharing the SCI. */
764 		if (acpi_check_wakeup_handlers()) {
765 			pm_pr_dbg("ACPI custom handler wakeup\n");
766 			return true;
767 		}
768 
769 		/*
770 		 * Check non-EC GPE wakeups and if there are none, cancel the
771 		 * SCI-related wakeup and dispatch the EC GPE.
772 		 */
773 		if (acpi_ec_dispatch_gpe()) {
774 			pm_pr_dbg("ACPI non-EC GPE wakeup\n");
775 			return true;
776 		}
777 
778 		acpi_os_wait_events_complete();
779 
780 		/*
781 		 * The SCI is in the "suspended" state now and it cannot produce
782 		 * new wakeup events till the rearming below, so if any of them
783 		 * are pending here, they must be resulting from the processing
784 		 * of EC events above or coming from somewhere else.
785 		 */
786 		if (pm_wakeup_pending()) {
787 			pm_pr_dbg("Wakeup after ACPI Notify sync\n");
788 			return true;
789 		}
790 
791 		pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
792 
793 		pm_wakeup_clear(acpi_sci_irq);
794 		rearm_wake_irq(acpi_sci_irq);
795 	}
796 
797 	return false;
798 }
799 
800 void acpi_s2idle_restore(void)
801 {
802 	/*
803 	 * Drain pending events before restoring the working-state configuration
804 	 * of GPEs.
805 	 */
806 	acpi_os_wait_events_complete(); /* synchronize GPE processing */
807 	acpi_ec_flush_work(); /* flush the EC driver's workqueues */
808 	acpi_os_wait_events_complete(); /* synchronize Notify handling */
809 
810 	s2idle_wakeup = false;
811 
812 	acpi_enable_all_runtime_gpes();
813 
814 	acpi_disable_wakeup_devices(ACPI_STATE_S0);
815 
816 	if (acpi_sci_irq_valid()) {
817 		acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
818 		disable_irq_wake(acpi_sci_irq);
819 	}
820 }
821 
822 void acpi_s2idle_end(void)
823 {
824 	acpi_scan_lock_release();
825 }
826 
827 static const struct platform_s2idle_ops acpi_s2idle_ops = {
828 	.begin = acpi_s2idle_begin,
829 	.prepare = acpi_s2idle_prepare,
830 	.wake = acpi_s2idle_wake,
831 	.restore = acpi_s2idle_restore,
832 	.end = acpi_s2idle_end,
833 };
834 
835 void __weak acpi_s2idle_setup(void)
836 {
837 	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
838 		pr_info("Efficient low-power S0 idle declared\n");
839 
840 	s2idle_set_ops(&acpi_s2idle_ops);
841 }
842 
843 static void acpi_sleep_suspend_setup(void)
844 {
845 	bool suspend_ops_needed = false;
846 	int i;
847 
848 	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
849 		if (acpi_sleep_state_supported(i)) {
850 			sleep_states[i] = 1;
851 			suspend_ops_needed = true;
852 		}
853 
854 	if (suspend_ops_needed)
855 		suspend_set_ops(old_suspend_ordering ?
856 				&acpi_suspend_ops_old : &acpi_suspend_ops);
857 
858 	acpi_s2idle_setup();
859 }
860 
861 #else /* !CONFIG_SUSPEND */
862 #define s2idle_wakeup		(false)
863 static inline void acpi_sleep_suspend_setup(void) {}
864 #endif /* !CONFIG_SUSPEND */
865 
866 bool acpi_s2idle_wakeup(void)
867 {
868 	return s2idle_wakeup;
869 }
870 
871 #ifdef CONFIG_PM_SLEEP
872 static u32 saved_bm_rld;
873 
874 static int  acpi_save_bm_rld(void)
875 {
876 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
877 	return 0;
878 }
879 
880 static void  acpi_restore_bm_rld(void)
881 {
882 	u32 resumed_bm_rld = 0;
883 
884 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
885 	if (resumed_bm_rld == saved_bm_rld)
886 		return;
887 
888 	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
889 }
890 
891 static struct syscore_ops acpi_sleep_syscore_ops = {
892 	.suspend = acpi_save_bm_rld,
893 	.resume = acpi_restore_bm_rld,
894 };
895 
896 static void acpi_sleep_syscore_init(void)
897 {
898 	register_syscore_ops(&acpi_sleep_syscore_ops);
899 }
900 #else
901 static inline void acpi_sleep_syscore_init(void) {}
902 #endif /* CONFIG_PM_SLEEP */
903 
904 #ifdef CONFIG_HIBERNATION
905 static unsigned long s4_hardware_signature;
906 static struct acpi_table_facs *facs;
907 int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
908 
909 static int acpi_hibernation_begin(pm_message_t stage)
910 {
911 	if (!nvs_nosave) {
912 		int error = suspend_nvs_alloc();
913 		if (error)
914 			return error;
915 	}
916 
917 	if (stage.event == PM_EVENT_HIBERNATE)
918 		pm_set_suspend_via_firmware();
919 
920 	acpi_pm_start(ACPI_STATE_S4);
921 	return 0;
922 }
923 
924 static int acpi_hibernation_enter(void)
925 {
926 	acpi_status status = AE_OK;
927 
928 	/* This shouldn't return.  If it returns, we have a problem */
929 	status = acpi_enter_sleep_state(ACPI_STATE_S4);
930 	/* Reprogram control registers */
931 	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
932 
933 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
934 }
935 
936 static void acpi_hibernation_leave(void)
937 {
938 	pm_set_resume_via_firmware();
939 	/*
940 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
941 	 * enable it here.
942 	 */
943 	acpi_enable();
944 	/* Reprogram control registers */
945 	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
946 	/* Check the hardware signature */
947 	if (facs && s4_hardware_signature != facs->hardware_signature)
948 		pr_crit("Hardware changed while hibernated, success doubtful!\n");
949 	/* Restore the NVS memory area */
950 	suspend_nvs_restore();
951 	/* Allow EC transactions to happen. */
952 	acpi_ec_unblock_transactions();
953 }
954 
955 static void acpi_pm_thaw(void)
956 {
957 	acpi_ec_unblock_transactions();
958 	acpi_enable_all_runtime_gpes();
959 }
960 
961 static const struct platform_hibernation_ops acpi_hibernation_ops = {
962 	.begin = acpi_hibernation_begin,
963 	.end = acpi_pm_end,
964 	.pre_snapshot = acpi_pm_prepare,
965 	.finish = acpi_pm_finish,
966 	.prepare = acpi_pm_prepare,
967 	.enter = acpi_hibernation_enter,
968 	.leave = acpi_hibernation_leave,
969 	.pre_restore = acpi_pm_freeze,
970 	.restore_cleanup = acpi_pm_thaw,
971 };
972 
973 /**
974  *	acpi_hibernation_begin_old - Set the target system sleep state to
975  *		ACPI_STATE_S4 and execute the _PTS control method.  This
976  *		function is used if the pre-ACPI 2.0 suspend ordering has been
977  *		requested.
978  */
979 static int acpi_hibernation_begin_old(pm_message_t stage)
980 {
981 	int error;
982 	/*
983 	 * The _TTS object should always be evaluated before the _PTS object.
984 	 * When the old_suspended_ordering is true, the _PTS object is
985 	 * evaluated in the acpi_sleep_prepare.
986 	 */
987 	acpi_sleep_tts_switch(ACPI_STATE_S4);
988 
989 	error = acpi_sleep_prepare(ACPI_STATE_S4);
990 	if (error)
991 		return error;
992 
993 	if (!nvs_nosave) {
994 		error = suspend_nvs_alloc();
995 		if (error)
996 			return error;
997 	}
998 
999 	if (stage.event == PM_EVENT_HIBERNATE)
1000 		pm_set_suspend_via_firmware();
1001 
1002 	acpi_target_sleep_state = ACPI_STATE_S4;
1003 	acpi_scan_lock_acquire();
1004 	return 0;
1005 }
1006 
1007 /*
1008  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
1009  * been requested.
1010  */
1011 static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
1012 	.begin = acpi_hibernation_begin_old,
1013 	.end = acpi_pm_end,
1014 	.pre_snapshot = acpi_pm_pre_suspend,
1015 	.prepare = acpi_pm_freeze,
1016 	.finish = acpi_pm_finish,
1017 	.enter = acpi_hibernation_enter,
1018 	.leave = acpi_hibernation_leave,
1019 	.pre_restore = acpi_pm_freeze,
1020 	.restore_cleanup = acpi_pm_thaw,
1021 	.recover = acpi_pm_finish,
1022 };
1023 
1024 static void acpi_sleep_hibernate_setup(void)
1025 {
1026 	if (!acpi_sleep_state_supported(ACPI_STATE_S4))
1027 		return;
1028 
1029 	hibernation_set_ops(old_suspend_ordering ?
1030 			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
1031 	sleep_states[ACPI_STATE_S4] = 1;
1032 	if (!acpi_check_s4_hw_signature)
1033 		return;
1034 
1035 	acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
1036 	if (facs) {
1037 		/*
1038 		 * s4_hardware_signature is the local variable which is just
1039 		 * used to warn about mismatch after we're attempting to
1040 		 * resume (in violation of the ACPI specification.)
1041 		 */
1042 		s4_hardware_signature = facs->hardware_signature;
1043 
1044 		if (acpi_check_s4_hw_signature > 0) {
1045 			/*
1046 			 * If we're actually obeying the ACPI specification
1047 			 * then the signature is written out as part of the
1048 			 * swsusp header, in order to allow the boot kernel
1049 			 * to gracefully decline to resume.
1050 			 */
1051 			swsusp_hardware_signature = facs->hardware_signature;
1052 		}
1053 	}
1054 }
1055 #else /* !CONFIG_HIBERNATION */
1056 static inline void acpi_sleep_hibernate_setup(void) {}
1057 #endif /* !CONFIG_HIBERNATION */
1058 
1059 static int acpi_power_off_prepare(struct sys_off_data *data)
1060 {
1061 	/* Prepare to power off the system */
1062 	acpi_sleep_prepare(ACPI_STATE_S5);
1063 	acpi_disable_all_gpes();
1064 	acpi_os_wait_events_complete();
1065 	return NOTIFY_DONE;
1066 }
1067 
1068 static int acpi_power_off(struct sys_off_data *data)
1069 {
1070 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
1071 	pr_debug("%s called\n", __func__);
1072 	local_irq_disable();
1073 	acpi_enter_sleep_state(ACPI_STATE_S5);
1074 	return NOTIFY_DONE;
1075 }
1076 
1077 int __init acpi_sleep_init(void)
1078 {
1079 	char supported[ACPI_S_STATE_COUNT * 3 + 1];
1080 	char *pos = supported;
1081 	int i;
1082 
1083 	acpi_sleep_dmi_check();
1084 
1085 	sleep_states[ACPI_STATE_S0] = 1;
1086 
1087 	acpi_sleep_syscore_init();
1088 	acpi_sleep_suspend_setup();
1089 	acpi_sleep_hibernate_setup();
1090 
1091 	if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
1092 		sleep_states[ACPI_STATE_S5] = 1;
1093 
1094 		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
1095 					 SYS_OFF_PRIO_FIRMWARE,
1096 					 acpi_power_off_prepare, NULL);
1097 
1098 		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
1099 					 SYS_OFF_PRIO_FIRMWARE,
1100 					 acpi_power_off, NULL);
1101 
1102 		/*
1103 		 * Windows uses S5 for reboot, so some BIOSes depend on it to
1104 		 * perform proper reboot.
1105 		 */
1106 		register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
1107 					 SYS_OFF_PRIO_FIRMWARE,
1108 					 acpi_power_off_prepare, NULL);
1109 	} else {
1110 		acpi_no_s5 = true;
1111 	}
1112 
1113 	supported[0] = 0;
1114 	for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
1115 		if (sleep_states[i])
1116 			pos += sprintf(pos, " S%d", i);
1117 	}
1118 	pr_info("(supports%s)\n", supported);
1119 
1120 	/*
1121 	 * Register the tts_notifier to reboot notifier list so that the _TTS
1122 	 * object can also be evaluated when the system enters S5.
1123 	 */
1124 	register_reboot_notifier(&tts_notifier);
1125 	return 0;
1126 }
1127